From fb94b40c2333db585e081f6ba8ff94ce7429429b Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Wed, 14 Aug 2024 01:05:06 +0000 Subject: [PATCH] Refactor to cache AWS resources --- bp-base.json | 17 +- main.py | 12 +- models.py | 10 + services/__init__.py | 1 - services/_lambda.py | 171 ++++++----- services/alb.py | 239 +++++++++------- services/api_gw.py | 356 ++++++++++++----------- services/asg.py | 98 ++++--- services/cloudfront.py | 274 +++++++++--------- services/cloudwatch.py | 107 +++---- services/codeseries.py | 132 +++++---- services/dynamodb.py | 292 +++++++++---------- services/ec2.py | 236 +++++++--------- services/ecr.py | 151 +++++----- services/ecs.py | 341 +++++++++++----------- services/efs.py | 220 ++++++++------- services/eks.py | 131 ++++----- services/elasticache.py | 218 +++++++------- services/iam.py | 163 ++++++----- services/kms.py | 44 +-- services/rds.py | 550 +++++++++++++++++++----------------- services/s3.py | 402 +++++++++++++------------- services/secrets_manager.py | 144 +++++----- services/security_hub.py | 47 +-- services/sns.py | 87 +++--- services/tags.py | 11 - services/vpc.py | 456 +++++++++++++++--------------- services/wafv2.py | 240 +++++++++------- 28 files changed, 2676 insertions(+), 2474 deletions(-) delete mode 100644 services/tags.py diff --git a/bp-base.json b/bp-base.json index bd040a9..48f4056 100644 --- a/bp-base.json +++ b/bp-base.json @@ -124,16 +124,16 @@ "autoscaling-multiple-az": { "enabled": true, "level": 2 + }, + "autoscaling-launch-template": { + "enabled": true, + "level": 2 } } }, "EC2": { "enabled": true, "rules": { - "autoscaling-launch-template": { - "enabled": true, - "level": 2 - }, "ec2-ebs-encryption-by-default": { "enabled": true, "level": 2 @@ -432,15 +432,6 @@ } } }, - "Tags": { - "enabled": true, - "rules": { - "required-tags": { - "enabled": true, - "level": 2 - } - } - }, "S3": { "enabled": true, "rules": { diff --git a/main.py b/main.py index 602233b..e049590 100644 --- a/main.py +++ b/main.py @@ -2,6 +2,8 @@ from InquirerLib import prompt from InquirerLib.InquirerPy.utils import InquirerPyKeybindings from InquirerLib.InquirerPy.base import Choice from colorama import Style, Fore +from datetime import datetime +from importlib import import_module from utils import * import services @@ -40,12 +42,18 @@ def perform_bp_rules_check(bp): if service_name == "Lambda": service_name = "_lambda" - module = getattr(services, convert_snake_case(service_name)) + now = datetime.now() + rule_checker = getattr( + services, convert_snake_case(service_name) + ).rule_checker() + for rule_name, rule in service["rules"].items(): if not rule["enabled"]: continue + rule["result"] = rule_checker.check_rule(convert_snake_case(rule_name)) - rule["result"] = getattr(module, convert_snake_case(rule_name))() + elapsed_time = datetime.now() - now + print(convert_snake_case(service_name), elapsed_time.total_seconds()) return bp diff --git a/models.py b/models.py index 0af4edb..26ca79e 100644 --- a/models.py +++ b/models.py @@ -1,4 +1,5 @@ from pydantic import BaseModel +from utils import convert_snake_case from typing import List @@ -6,3 +7,12 @@ class RuleCheckResult(BaseModel): passed: bool compliant_resources: List[str] non_compliant_resources: List[str] + + +class RuleChecker: + def __init__(self): + pass + + def check_rule(self, rule_name) -> RuleCheckResult: + check_func = getattr(self, convert_snake_case(rule_name)) + return check_func() diff --git a/services/__init__.py b/services/__init__.py index 940935f..60201b4 100644 --- a/services/__init__.py +++ b/services/__init__.py @@ -16,7 +16,6 @@ from . import ( elasticache, iam, _lambda, - tags, s3, secrets_manager, security_hub, diff --git a/services/_lambda.py b/services/_lambda.py index bf7248a..21f51a3 100644 --- a/services/_lambda.py +++ b/services/_lambda.py @@ -1,91 +1,106 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property + import boto3 import json -client = boto3.client("lambda") -iam_client = boto3.client("iam") +class LambdaRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("lambda") + self.iam_client = boto3.client("iam") + @cached_property + def functions(self): + return self.client.list_functions()["Functions"] -def lambda_dlq_check(): - compliant_resource = [] - non_compliant_resources = [] - functions = client.list_functions()["Functions"] + def lambda_dlq_check(self): + compliant_resource = [] + non_compliant_resources = [] - for function in functions: - if "DeadLetterConfig" in function: - compliant_resource.append(function["FunctionArn"]) - else: - non_compliant_resources.append(function["FunctionArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def lambda_function_public_access_prohibited(): - compliant_resource = [] - non_compliant_resources = [] - functions = client.list_functions()["Functions"] - - for function in functions: - try: - policy = json.loads(client.get_policy(FunctionName=function["FunctionName"])["Policy"]) - for statement in policy["Statement"]: - if statement["Principal"] in ["*", "", '{"AWS": ""}', '{"AWS": "*"}']: - non_compliant_resources.append(function["FunctionArn"]) - break - else: + for function in self.functions: + if "DeadLetterConfig" in function: compliant_resource.append(function["FunctionArn"]) - except Exception as e: - if e.__class__.__name__ == "ResourceNotFoundException": + else: + non_compliant_resources.append(function["FunctionArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def lambda_function_public_access_prohibited(self): + compliant_resource = [] + non_compliant_resources = [] + + for function in self.functions: + try: + policy = json.loads( + self.client.get_policy(FunctionName=function["FunctionName"])[ + "Policy" + ] + ) + for statement in policy["Statement"]: + if statement["Principal"] in [ + "*", + "", + '{"AWS": ""}', + '{"AWS": "*"}', + ]: + non_compliant_resources.append(function["FunctionArn"]) + break + else: + compliant_resource.append(function["FunctionArn"]) + except Exception as e: + if e.__class__.__name__ == "ResourceNotFoundException": + non_compliant_resources.append(function["FunctionArn"]) + else: + raise e + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def lambda_function_settings_check(self): + compliant_resource = [] + non_compliant_resources = [] + + default_timeout = 3 + default_memory_size = 128 + + for function in self.functions: + if ( + function["Timeout"] == default_timeout + or function["MemorySize"] == default_memory_size + ): non_compliant_resources.append(function["FunctionArn"]) else: - raise e + compliant_resource.append(function["FunctionArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def lambda_inside_vpc(self): + compliant_resource = [] + non_compliant_resources = [] + + for function in self.functions: + if "VpcConfig" in function: + compliant_resource.append(function["FunctionArn"]) + else: + non_compliant_resources.append(function["FunctionArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) -def lambda_function_settings_check(): - compliant_resource = [] - non_compliant_resources = [] - functions = client.list_functions()["Functions"] - - default_timeout = 3 - default_memory_size = 128 - - for function in functions: - if function["Timeout"] == default_timeout or function["MemorySize"] == default_memory_size: - non_compliant_resources.append(function["FunctionArn"]) - else: - compliant_resource.append(function["FunctionArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def lambda_inside_vpc(): - compliant_resource = [] - non_compliant_resources = [] - functions = client.list_functions()["Functions"] - - for function in functions: - if "VpcConfig" in function: - compliant_resource.append(function["FunctionArn"]) - else: - non_compliant_resources.append(function["FunctionArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = LambdaRuleChecker diff --git a/services/alb.py b/services/alb.py index b6763fe..b8268ef 100644 --- a/services/alb.py +++ b/services/alb.py @@ -1,123 +1,150 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("elbv2") -wafv2_client = boto3.client("wafv2") +class ALBRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("elbv2") + self.wafv2_client = boto3.client("wafv2") -def alb_http_drop_invalid_header_enabled(): - load_balancers = client.describe_load_balancers() - compliant_resource = [] - non_compliant_resources = [] - for load_balancer in load_balancers['LoadBalancers']: - response = client.describe_load_balancer_attributes( - LoadBalancerArn=load_balancer['LoadBalancerArn'] - ) - result = [ - attribute - for attribute in filter( - lambda x: x['Key'] == "routing.http.drop_invalid_header_fields.enabled" - and x['Value'] == "true", - response['Attributes'], + @cached_property + def load_balancers(self): + return self.client.describe_load_balancers()["LoadBalancers"] + + @cached_property + def load_balancer_attributes(self): + responses = [ + self.client.describe_load_balancer_attributes( + LoadBalancerArn=load_balancer["LoadBalancerArn"] ) + for load_balancer in self.load_balancers ] - if result: compliant_resource.append(load_balancer['LoadBalancerArn']) - else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) + return { + load_balancer["LoadBalancerArn"]: response + for load_balancer, response in zip(self.load_balancers, responses) + } + def alb_http_drop_invalid_header_enabled(self): + compliant_resource = [] + non_compliant_resources = [] -def alb_waf_enabled(): - load_balancers = client.describe_load_balancers() - compliant_resource = [] - non_compliant_resources = [] - for load_balancer in load_balancers['LoadBalancers']: - response = wafv2_client.get_web_acl_for_resource( - ResourceArn=load_balancer['LoadBalancerArn'] + for load_balancer in self.load_balancers: + response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]] + result = [ + attribute + for attribute in filter( + lambda x: x["Key"] + == "routing.http.drop_invalid_header_fields.enabled" + and x["Value"] == "true", + response["Attributes"], + ) + ] + if result: + compliant_resource.append(load_balancer["LoadBalancerArn"]) + else: + non_compliant_resources.append(load_balancer["LoadBalancerArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, ) - - if 'WebACL' in response: compliant_resource.append(load_balancer['LoadBalancerArn']) - else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) + def alb_waf_enabled(self): + compliant_resource = [] + non_compliant_resources = [] -def elb_cross_zone_load_balancing_enabled(): - load_balancers = client.describe_load_balancers() - compliant_resource = [] - non_compliant_resources = [] - for load_balancer in load_balancers['LoadBalancers']: - response = client.describe_load_balancer_attributes( - LoadBalancerArn=load_balancer['LoadBalancerArn'] - ) - result = [ - attribute - for attribute in filter( - lambda x: x['Key'] == "load_balancing.cross_zone.enabled" - and x['Value'] == "true", - response['Attributes'], + for load_balancer in self.load_balancers: + response = self.wafv2_client.get_web_acl_for_resource( + ResourceArn=load_balancer["LoadBalancerArn"] ) - ] - if result: compliant_resource.append(load_balancer['LoadBalancerArn']) - else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - -def elb_deletion_protection_enabled(): - load_balancers = client.describe_load_balancers() - compliant_resource = [] - non_compliant_resources = [] - for load_balancer in load_balancers['LoadBalancers']: - response = client.describe_load_balancer_attributes( - LoadBalancerArn=load_balancer['LoadBalancerArn'] + if "WebACL" in response: + compliant_resource.append(load_balancer["LoadBalancerArn"]) + else: + non_compliant_resources.append(load_balancer["LoadBalancerArn"]) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, ) - result = [ - attribute - for attribute in filter( - lambda x: x['Key'] == "deletion_protection.enabled" - and x['Value'] == "true", - response['Attributes'], - ) - ] - if result: compliant_resource.append(load_balancer['LoadBalancerArn']) - else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) + def elb_cross_zone_load_balancing_enabled(self): + compliant_resource = [] + non_compliant_resources = [] -def elb_logging_enabled(): - load_balancers = client.describe_load_balancers() - compliant_resource = [] - non_compliant_resources = [] - for load_balancer in load_balancers['LoadBalancers']: - response = client.describe_load_balancer_attributes( - LoadBalancerArn=load_balancer['LoadBalancerArn'] + for load_balancer in self.load_balancers: + response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]] + result = [ + attribute + for attribute in filter( + lambda x: x["Key"] == "load_balancing.cross_zone.enabled" + and x["Value"] == "true", + response["Attributes"], + ) + ] + if result: + compliant_resource.append(load_balancer["LoadBalancerArn"]) + else: + non_compliant_resources.append(load_balancer["LoadBalancerArn"]) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, ) - result = [ - attribute - for attribute in filter( - lambda x: x['Key'] == "access_logs.s3.enabled" - and x['Value'] == "true", - response['Attributes'], - ) - ] - if result: compliant_resource.append(load_balancer['LoadBalancerArn']) - else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) + + def elb_deletion_protection_enabled(self): + compliant_resource = [] + non_compliant_resources = [] + + for load_balancer in self.load_balancers: + response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]] + + result = [ + attribute + for attribute in filter( + lambda x: x["Key"] == "deletion_protection.enabled" + and x["Value"] == "true", + response["Attributes"], + ) + ] + if result: + compliant_resource.append(load_balancer["LoadBalancerArn"]) + else: + non_compliant_resources.append(load_balancer["LoadBalancerArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def elb_logging_enabled(self): + compliant_resource = [] + non_compliant_resources = [] + + for load_balancer in self.load_balancers: + response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]] + + result = [ + attribute + for attribute in filter( + lambda x: x["Key"] == "access_logs.s3.enabled" + and x["Value"] == "true", + response["Attributes"], + ) + ] + if result: + compliant_resource.append(load_balancer["LoadBalancerArn"]) + else: + non_compliant_resources.append(load_balancer["LoadBalancerArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = ALBRuleChecker diff --git a/services/api_gw.py b/services/api_gw.py index 5ae5266..a5f61a7 100644 --- a/services/api_gw.py +++ b/services/api_gw.py @@ -1,189 +1,203 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -v1_client = boto3.client("apigateway") -v2_client = boto3.client("apigatewayv2") +class APIGatewayRuleChecker(RuleChecker): + def __init__(self): + self.v1_client = boto3.client("apigateway") + self.v2_client = boto3.client("apigatewayv2") + @cached_property + def http_apis(self): + return self.v2_client.get_apis()["Items"] -def api_gwv2_access_logs_enabled(): - apis = v2_client.get_apis() - compliant_resources = [] - non_compliant_resources = [] + @cached_property + def rest_apis(self): + return self.v1_client.get_rest_apis()["items"] - for api in apis["Items"]: - stages = v2_client.get_stages( - ApiId=api["ApiId"], - ) - - non_compliant_resources += [ - f"{api['Name']} / {stage['StageName']}" - for stage in stages["Items"] - if "AccessLogSettings" not in stage - ] - - compliant_resources += list( - set([f"{api['Name']} / {stage['StageName']}" for stage in stages["Items"]]) - - set(non_compliant_resources) - ) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def api_gwv2_authorization_type_configured(): - apis = v2_client.get_apis() - compliant_resources = [] - non_compliant_resources = [] - - for api in apis["Items"]: - response = v2_client.get_routes( - ApiId=api["ApiId"], - ) - - non_compliant_resources += [ - f"{api['Name']} / {route['RouteKey']}" - for route in response["Items"] - if route["AuthorizationType"] == "NONE" - ] - - compliant_resources += list( - set([f"{api['Name']} / {route['RouteKey']}" for route in response["Items"]]) - - set(non_compliant_resources) - ) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def api_gw_associated_with_waf(): - apis = v1_client.get_rest_apis() - compliant_resources = [] - non_compliant_resources = [] - - for api in apis["items"]: - stages = v1_client.get_stages( - restApiId=api["id"], - ) - - for stage in stages["item"]: - stage_arn = f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" - - if "webAclArn" in stage: - compliant_resources.append(stage_arn) - else: - non_compliant_resources.append(stage_arn) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def api_gw_cache_enabled_and_encrypted(): - apis = v1_client.get_rest_apis() - compliant_resources = [] - non_compliant_resources = [] - - for api in apis["items"]: - stages = v1_client.get_stages( - restApiId=api["id"], - ) - - non_compliant_resources += [ - f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" - for stage in stages["item"] - if not "*/*" in stage["methodSettings"] - or ( - not stage["methodSettings"]["*/*"]["cachingEnabled"] - or not stage["methodSettings"]["*/*"]["cacheDataEncrypted"] + @cached_property + def rest_api_stages(self): + responses = [ + self.v1_client.get_stages( + restApiId=api["id"], ) + for api in self.rest_apis ] - compliant_resources += list( - set( - [ - f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" - for stage in stages["item"] - ] + return {api["id"]: response for api, response in zip(self.rest_apis, responses)} + + def api_gwv2_access_logs_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for api in self.http_apis: + stages = self.v2_client.get_stages( + ApiId=api["ApiId"], ) - - set(non_compliant_resources) - ) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + non_compliant_resources += [ + f"{api['Name']} / {stage['StageName']}" + for stage in stages["Items"] + if "AccessLogSettings" not in stage + ] - -def api_gw_execution_logging_enabled(): - apis = v1_client.get_rest_apis() - compliant_resources = [] - non_compliant_resources = [] - for api in apis["items"]: - stages = v1_client.get_stages( - restApiId=api["id"], - ) - - non_compliant_resources += [ - f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" - for stage in stages["item"] - if not "*/*" in stage["methodSettings"] - or ( - not "loggingLevel" in stage["methodSettings"]["*/*"] - or stage["methodSettings"]["*/*"]["loggingLevel"] == "OFF" + compliant_resources += list( + set( + [ + f"{api['Name']} / {stage['StageName']}" + for stage in stages["Items"] + ] + ) + - set(non_compliant_resources) ) - ] - compliant_resources += list( - set( - [ - f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" - for stage in stages["item"] - ] + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def api_gwv2_authorization_type_configured(self): + compliant_resources = [] + non_compliant_resources = [] + + for api in self.http_apis: + response = self.v2_client.get_routes( + ApiId=api["ApiId"], ) - - set(non_compliant_resources) - ) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + non_compliant_resources += [ + f"{api['Name']} / {route['RouteKey']}" + for route in response["Items"] + if route["AuthorizationType"] == "NONE" + ] - -def api_gw_xray_enabled(): - apis = v1_client.get_rest_apis() - compliant_resources = [] - non_compliant_resources = [] - for api in apis["items"]: - stages = v1_client.get_stages( - restApiId=api["id"], - ) - - non_compliant_resources += [ - f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" - for stage in stages["item"] - if not stage["tracingEnabled"] - ] - compliant_resources += list( - set( - [ - f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" - for stage in stages["item"] - ] + compliant_resources += list( + set( + [ + f"{api['Name']} / {route['RouteKey']}" + for route in response["Items"] + ] + ) + - set(non_compliant_resources) ) - - set(non_compliant_resources) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, ) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + def api_gw_associated_with_waf(self): + compliant_resources = [] + non_compliant_resources = [] + + for api in self.rest_apis: + stages = self.rest_api_stages[api["id"]] + + for stage in stages["item"]: + stage_arn = f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" + + if "webAclArn" in stage: + compliant_resources.append(stage_arn) + else: + non_compliant_resources.append(stage_arn) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def api_gw_cache_enabled_and_encrypted(self): + compliant_resources = [] + non_compliant_resources = [] + + for api in self.rest_apis: + stages = self.rest_api_stages[api["id"]] + + non_compliant_resources += [ + f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" + for stage in stages["item"] + if not "*/*" in stage["methodSettings"] + or ( + not stage["methodSettings"]["*/*"]["cachingEnabled"] + or not stage["methodSettings"]["*/*"]["cacheDataEncrypted"] + ) + ] + compliant_resources += list( + set( + [ + f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" + for stage in stages["item"] + ] + ) + - set(non_compliant_resources) + ) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def api_gw_execution_logging_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + for api in self.rest_apis: + stages = self.rest_api_stages[api["id"]] + + non_compliant_resources += [ + f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" + for stage in stages["item"] + if not "*/*" in stage["methodSettings"] + or ( + not "loggingLevel" in stage["methodSettings"]["*/*"] + or stage["methodSettings"]["*/*"]["loggingLevel"] == "OFF" + ) + ] + compliant_resources += list( + set( + [ + f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" + for stage in stages["item"] + ] + ) + - set(non_compliant_resources) + ) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def api_gw_xray_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + for api in self.rest_apis: + stages = self.rest_api_stages[api["id"]] + + non_compliant_resources += [ + f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" + for stage in stages["item"] + if not stage["tracingEnabled"] + ] + compliant_resources += list( + set( + [ + f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" + for stage in stages["item"] + ] + ) + - set(non_compliant_resources) + ) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = APIGatewayRuleChecker diff --git a/services/asg.py b/services/asg.py index efe8f85..38f5624 100644 --- a/services/asg.py +++ b/services/asg.py @@ -1,41 +1,67 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("autoscaling") +class ASGRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("autoscaling") + + @cached_property + def asgs(self): + return self.client.describe_auto_scaling_groups()["AutoScalingGroups"] + + def autoscaling_group_elb_healthcheck_required(self): + compliant_resources = [] + non_compliant_resources = [] + + for asg in self.asgs: + if ( + asg["LoadBalancerNames"] + or asg["TargetGroupARNs"] + and asg["HealthCheckType"] != "ELB" + ): + non_compliant_resources.append(asg["AutoScalingGroupARN"]) + else: + compliant_resources.append(asg["AutoScalingGroupARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def autoscaling_multiple_az(self): + compliant_resources = [] + non_compliant_resources = [] + + for asg in self.asgs: + if len(asg["AvailabilityZones"]) > 1: + compliant_resources.append(asg["AutoScalingGroupARN"]) + else: + non_compliant_resources.append(asg["AutoScalingGroupARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def autoscaling_launch_template(self): + compliant_resources = [] + non_compliant_resources = [] + + for asg in self.asgs: + if "LaunchConfigurationName" in asg: + non_compliant_resources.append(asg["AutoScalingGroupARN"]) + else: + compliant_resources.append(asg["AutoScalingGroupARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) -def autoscaling_group_elb_healthcheck_required(): - compliant_resources = [] - non_compliant_resources = [] - asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"] - - for asg in asgs: - if asg["LoadBalancerNames"] or asg["TargetGroupARNs"] and asg["HealthCheckType"] != "ELB": - non_compliant_resources.append(asg["AutoScalingGroupARN"]) - else: - compliant_resources.append(asg["AutoScalingGroupARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def autoscaling_multiple_az(): - compliant_resources = [] - non_compliant_resources = [] - asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"] - - for asg in asgs: - if len(asg["AvailabilityZones"]) > 1: - compliant_resources.append(asg["AutoScalingGroupARN"]) - else: - non_compliant_resources.append(asg["AutoScalingGroupARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = ASGRuleChecker diff --git a/services/cloudfront.py b/services/cloudfront.py index 05448db..7e78237 100644 --- a/services/cloudfront.py +++ b/services/cloudfront.py @@ -1,138 +1,152 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("cloudfront") +class CloudFrontRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("cloudfront") + @cached_property + def distributions(self): + return self.client.list_distributions()["DistributionList"]["Items"] -def cloudfront_accesslogs_enabled(): - compliant_resources = [] - non_compliant_resources = [] - distributions = client.list_distributions()["DistributionList"]["Items"] - - for distribution in distributions: - distribution = client.get_distribution(Id=distribution["Id"])["Distribution"] - if ( - "Logging" in distribution["DistributionConfig"] - and distribution["DistributionConfig"]["Logging"]["Enabled"] == True - ): - compliant_resources.append(distribution["ARN"]) - else: - non_compliant_resources.append(distribution["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def cloudfront_associated_with_waf(): - compliant_resources = [] - non_compliant_resources = [] - distributions = client.list_distributions()["DistributionList"]["Items"] - - for distribution in distributions: - if "WebACLId" in distribution and distribution["WebACLId"] != "": - compliant_resources.append(distribution["ARN"]) - else: - non_compliant_resources.append(distribution["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def cloudfront_default_root_object_configured(): - compliant_resources = [] - non_compliant_resources = [] - distributions = client.list_distributions()["DistributionList"]["Items"] - - for distribution in distributions: - distribution = client.get_distribution(Id=distribution["Id"])["Distribution"] - - if distribution["DistributionConfig"]["DefaultRootObject"] != "": - compliant_resources.append(distribution["ARN"]) - else: - non_compliant_resources.append(distribution["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def cloudfront_no_deprecated_ssl_protocols(): - compliant_resources = [] - non_compliant_resources = [] - distributions = client.list_distributions()["DistributionList"]["Items"] - - for distribution in distributions: - for origin in distribution["Origins"]["Items"]: - if ( - "CustomOriginConfig" in origin - and origin["CustomOriginConfig"]["OriginProtocolPolicy"] in ["https-only", "match-viewer"] - and "SSLv3" in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"] - ): - - non_compliant_resources.append(distribution["ARN"]) - break - else: - compliant_resources.append(distribution["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def cloudfront_s3_origin_access_control_enabled(): - compliant_resources = [] - non_compliant_resources = [] - distributions = client.list_distributions()["DistributionList"] - - for distribution in distributions["Items"]: - for origin in distribution["Origins"]["Items"]: - if "S3OriginConfig" in origin and origin["OriginAccessControlId"] == "": - non_compliant_resources.append(distribution["ARN"]) - break - else: - compliant_resources.append(distribution["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def cloudfront_viewer_policy_https(): - compliant_resources = [] - non_compliant_resources = [] - distributions = client.list_distributions()["DistributionList"]["Items"] - - for distribution in distributions: - if distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"] == "allow-all": - non_compliant_resources.append(distribution["ARN"]) - continue - - allow_alls = [ - behavior - for behavior in distribution["CacheBehaviors"]["Items"] - if behavior["ViewerProtocolPolicy"] == "allow-all" + @cached_property + def distribution_details(self): + responses = [ + self.client.get_distribution(Id=distribution["Id"])["Distribution"] + for distribution in self.distributions ] - if allow_alls: - non_compliant_resources.append(distribution["ARN"]) - continue + return { + distribution["Id"]: response + for distribution, response in zip(self.distributions, responses) + } - compliant_resources.append(distribution["ARN"]) + def cloudfront_accesslogs_enabled(self): + compliant_resources = [] + non_compliant_resources = [] - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + for distribution in self.distributions: + distribution = self.distribution_details[distribution["Id"]] + if ( + "Logging" in distribution["DistributionConfig"] + and distribution["DistributionConfig"]["Logging"]["Enabled"] == True + ): + compliant_resources.append(distribution["ARN"]) + else: + non_compliant_resources.append(distribution["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def cloudfront_associated_with_waf(self): + compliant_resources = [] + non_compliant_resources = [] + + for distribution in self.distributions: + if "WebACLId" in distribution and distribution["WebACLId"] != "": + compliant_resources.append(distribution["ARN"]) + else: + non_compliant_resources.append(distribution["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def cloudfront_default_root_object_configured(self): + compliant_resources = [] + non_compliant_resources = [] + + for distribution in self.distributions: + distribution = self.distribution_details[distribution["Id"]] + + if distribution["DistributionConfig"]["DefaultRootObject"] != "": + compliant_resources.append(distribution["ARN"]) + else: + non_compliant_resources.append(distribution["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def cloudfront_no_deprecated_ssl_protocols(self): + compliant_resources = [] + non_compliant_resources = [] + + for distribution in self.distributions: + for origin in distribution["Origins"]["Items"]: + if ( + "CustomOriginConfig" in origin + and origin["CustomOriginConfig"]["OriginProtocolPolicy"] + in ["https-only", "match-viewer"] + and "SSLv3" + in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"] + ): + + non_compliant_resources.append(distribution["ARN"]) + break + else: + compliant_resources.append(distribution["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def cloudfront_s3_origin_access_control_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for distribution in self.distributions: + for origin in distribution["Origins"]["Items"]: + if "S3OriginConfig" in origin and origin["OriginAccessControlId"] == "": + non_compliant_resources.append(distribution["ARN"]) + break + else: + compliant_resources.append(distribution["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def cloudfront_viewer_policy_https(self): + compliant_resources = [] + non_compliant_resources = [] + + for distribution in self.distributions: + if ( + distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"] + == "allow-all" + ): + non_compliant_resources.append(distribution["ARN"]) + continue + + allow_alls = [ + behavior + for behavior in distribution["CacheBehaviors"]["Items"] + if behavior["ViewerProtocolPolicy"] == "allow-all" + ] + if allow_alls: + non_compliant_resources.append(distribution["ARN"]) + continue + + compliant_resources.append(distribution["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = CloudFrontRuleChecker diff --git a/services/cloudwatch.py b/services/cloudwatch.py index 1496015..4e25ed0 100644 --- a/services/cloudwatch.py +++ b/services/cloudwatch.py @@ -1,57 +1,60 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker import boto3 -client = boto3.client("cloudwatch") -logs_client = boto3.client("logs") +class CloudWatchRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("cloudwatch") + self.logs_client = boto3.client("logs") + + def cw_loggroup_retention_period_check(self): + compliant_resources = [] + non_compliant_resources = [] + log_groups = self.logs_client.describe_log_groups()["logGroups"] + + # This rule should check if `retentionInDays` is less than n days. + # But, instead of that, this will check if the retention setting is set to "Never expire" or not + for log_group in log_groups: + if "retentionInDays" in log_group: + compliant_resources.append(log_group["logGroupArn"]) + else: + non_compliant_resources.append(log_group["logGroupArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def cloudwatch_alarm_settings_check(self): + compliant_resources = [] + non_compliant_resources = [] + alarms = self.client.describe_alarms()["MetricAlarms"] + parameters = { + "MetricName": "", # required + "Threshold": None, + "EvaluationPeriods": None, + "Period": None, + "ComparisonOperator": None, + "Statistic": None, + } + + for alarm in alarms: + for check in [i for i in parameters.keys() if parameters[i] != None]: + if alarm["MetricName"] != parameters["MetricName"]: + continue + + if alarm[check] != parameters[check]: + non_compliant_resources.append(alarm["AlarmArn"]) + break + else: + compliant_resources.append(alarm["AlarmArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) -def cw_loggroup_retention_period_check(): - compliant_resources = [] - non_compliant_resources = [] - log_groups = logs_client.describe_log_groups()["logGroups"] - - # This rule should check if `retentionInDays` is less than n days. - # But, instead of that, this will check if the retention setting is set to "Never expire" or not - for log_group in log_groups: - if "retentionInDays" in log_group: - compliant_resources.append(log_group["logGroupArn"]) - else: - non_compliant_resources.append(log_group["logGroupArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def cloudwatch_alarm_settings_check(): - compliant_resources = [] - non_compliant_resources = [] - alarms = client.describe_alarms()["MetricAlarms"] - parameters = { - "MetricName": "", # required - "Threshold": None, - "EvaluationPeriods": None, - "Period": None, - "ComparisonOperator": None, - "Statistic": None, - } - - for alarm in alarms: - for check in [i for i in parameters.keys() if parameters[i] != None]: - if alarm["MetricName"] != parameters["MetricName"]: - continue - - if alarm[check] != parameters[check]: - non_compliant_resources.append(alarm["AlarmArn"]) - break - else: - compliant_resources.append(alarm["AlarmArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = CloudWatchRuleChecker diff --git a/services/codeseries.py b/services/codeseries.py index 527c826..ca3d013 100644 --- a/services/codeseries.py +++ b/services/codeseries.py @@ -1,75 +1,85 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -build_client = boto3.client("codebuild") +class CodeSeriesChecker(RuleChecker): + def __init__(self): + self.build_client = boto3.client("codebuild") + self.deploy_client = boto3.client("codedeploy") -deploy_client = boto3.client("codedeploy") + @cached_property + def projects(self): + project_names = self.build_client.list_projects()["projects"] + return self.build_client.batch_get_projects(names=project_names)["projects"] + def codebuild_project_environment_privileged_check(self): + compliant_resources = [] + non_compliant_resources = [] -def codebuild_project_environment_privileged_check(): - compliant_resources = [] - non_compliant_resources = [] - projects = build_client.list_projects()["projects"] + for project in self.projects: + if not project["environment"]["privilegedMode"]: + compliant_resources.append(project["arn"]) + else: + non_compliant_resources.append(project["arn"]) - for project in projects: - project = build_client.batch_get_projects(names=[project])["projects"][0] + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) - if not project["environment"]["privilegedMode"]: - compliant_resources.append(project["arn"]) - else: - non_compliant_resources.append(project["arn"]) + def codebuild_project_logging_enabled(self): + compliant_resources = [] + non_compliant_resources = [] - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def codebuild_project_logging_enabled(): - compliant_resources = [] - non_compliant_resources = [] - projects = build_client.list_projects()["projects"] - - for project in projects: - project = build_client.batch_get_projects(names=[project])["projects"][0] - logs_config = project["logsConfig"] - - if logs_config["cloudWatchLogs"]["status"] == "ENABLED" or logs_config["s3Logs"]["status"] == "ENABLED": - compliant_resources.append(project["arn"]) - else: - non_compliant_resources.append(project["arn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def codedeploy_auto_rollback_monitor_enabled(): - compliant_resources = [] - non_compliant_resources = [] - applications = deploy_client.list_applications()["applications"] - - for application in applications: - deployment_groups = deploy_client.list_deployment_groups(applicationName=application)["deploymentGroups"] - for deployment_group in deployment_groups: - deployment_group = deploy_client.get_deployment_group( - applicationName=application, deploymentGroupName=deployment_group - )["deploymentGroupInfo"] + for project in self.projects: + logs_config = project["logsConfig"] if ( - deployment_group["alarmConfiguration"]["enabled"] - and deployment_group["autoRollbackConfiguration"]["enabled"] + logs_config["cloudWatchLogs"]["status"] == "ENABLED" + or logs_config["s3Logs"]["status"] == "ENABLED" ): - compliant_resources.append(deployment_group["deploymentGroupId"]) + compliant_resources.append(project["arn"]) else: - non_compliant_resources.append(deployment_group["deploymentGroupId"]) + non_compliant_resources.append(project["arn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def codedeploy_auto_rollback_monitor_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + applications = self.deploy_client.list_applications()["applications"] + for application in applications: + deployment_group_names = self.deploy_client.list_deployment_groups( + applicationName=application + )["deploymentGroups"] + deployment_groups = self.deploy_client.batch_get_deployment_groups( + applicationName=application, deploymentGroupNames=deployment_group_names + )["deploymentGroupsInfo"] + + for deployment_group in deployment_groups: + + if ( + deployment_group["alarmConfiguration"]["enabled"] + and deployment_group["autoRollbackConfiguration"]["enabled"] + ): + compliant_resources.append(deployment_group["deploymentGroupId"]) + else: + non_compliant_resources.append( + deployment_group["deploymentGroupId"] + ) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = CodeSeriesChecker diff --git a/services/dynamodb.py b/services/dynamodb.py index 4907327..cb141e6 100644 --- a/services/dynamodb.py +++ b/services/dynamodb.py @@ -1,153 +1,161 @@ -from models import RuleCheckResult -import datetime +from models import RuleCheckResult, RuleChecker +from functools import cached_property +from datetime import datetime, timedelta from dateutil.tz import tzlocal import boto3 -client = boto3.client("dynamodb") -backup_client = boto3.client("backup") -autoscaling_client = boto3.client("application-autoscaling") +class DynamoDBRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("dynamodb") + self.backup_client = boto3.client("backup") + self.autoscaling_client = boto3.client("application-autoscaling") - -def dynamodb_autoscaling_enabled(): - compliant_resources = [] - non_compliant_resources = [] - table_names = client.list_tables()["TableNames"] - - for table_name in table_names: - table = client.describe_table(TableName=table_name)["Table"] - - if table.get("BillingModeSummary", {}).get("BillingMode") == "PAY_PER_REQUEST": - compliant_resources.append(table["TableArn"]) - continue - - scaling_policies = autoscaling_client.describe_scaling_policies( - ServiceNamespace="dynamodb", ResourceId=f"table/{table_name}" - )["ScalingPolicies"] - scaling_policy_dimensions = [i["ScalableDimension"] for i in scaling_policies] - if ( - "dynamodb:table:ReadCapacityUnits" in scaling_policy_dimensions - and "dynamodb:table:WriteCapacityUnits" in scaling_policy_dimensions - ): - compliant_resources.append(table["TableArn"]) - else: - non_compliant_resources.append(table["TableArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def dynamodb_last_backup_recovery_point_created(): - compliant_resources = [] - non_compliant_resources = [] - table_names = client.list_tables()["TableNames"] - - for table_name in table_names: - table = client.describe_table(TableName=table_name)["Table"] - recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=table["TableArn"])[ - "RecoveryPoints" + @cached_property + def tables(self): + table_names = self.client.list_tables()["TableNames"] + return [ + self.client.describe_table(TableName=table_name)["Table"] + for table_name in table_names ] - recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points]) - if len(recovery_point_creation_dates) == 0: - non_compliant_resources.append(table["TableArn"]) - continue + def dynamodb_autoscaling_enabled(self): + compliant_resources = [] + non_compliant_resources = [] - if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1): - compliant_resources.append(table["TableArn"]) - else: - non_compliant_resources.append(table["TableArn"]) + for table in self.tables: + if ( + table.get("BillingModeSummary", {}).get("BillingMode") + == "PAY_PER_REQUEST" + ): + compliant_resources.append(table["TableArn"]) + continue - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + scaling_policies = self.autoscaling_client.describe_scaling_policies( + ServiceNamespace="dynamodb", ResourceId=f"table/{table['TableName']}" + )["ScalingPolicies"] + scaling_policy_dimensions = [ + policy["ScalableDimension"] for policy in scaling_policies + ] + + if ( + "dynamodb:table:ReadCapacityUnits" in scaling_policy_dimensions + and "dynamodb:table:WriteCapacityUnits" in scaling_policy_dimensions + ): + compliant_resources.append(table["TableArn"]) + else: + non_compliant_resources.append(table["TableArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def dynamodb_last_backup_recovery_point_created(self): + compliant_resources = [] + non_compliant_resources = [] + + for table in self.tables: + recovery_points = self.backup_client.list_recovery_points_by_resource( + ResourceArn=table["TableArn"] + )["RecoveryPoints"] + if not recovery_points: + non_compliant_resources.append(table["TableArn"]) + continue + + latest_recovery_point = sorted( + [recovery_point["CreationDate"] for recovery_point in recovery_points] + )[-1] + + if datetime.now(tz=tzlocal()) - latest_recovery_point > timedelta(days=1): + non_compliant_resources.append(table["TableArn"]) + else: + compliant_resources.append(table["TableArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def dynamodb_pitr_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for table in self.tables: + backup = self.client.describe_continuous_backups( + TableName=table["TableName"] + )["ContinuousBackupsDescription"] + + if ( + backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"] + == "ENABLED" + ): + compliant_resources.append(table["TableArn"]) + else: + non_compliant_resources.append(table["TableArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def dynamodb_table_deletion_protection_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for table in self.tables: + if table["DeletionProtectionEnabled"] == True: + compliant_resources.append(table["TableArn"]) + else: + non_compliant_resources.append(table["TableArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def dynamodb_table_encrypted_kms(self): + compliant_resources = [] + non_compliant_resources = [] + + for table in self.tables: + if ( + "SSEDescription" in table + and table["SSEDescription"]["Status"] == "ENABLED" + and table["SSEDescription"]["SSEType"] == "KMS" + ): + compliant_resources.append(table["TableArn"]) + else: + non_compliant_resources.append(table["TableArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def dynamodb_table_encryption_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for table in self.tables: + if ( + "SSEDescription" in table + and table["SSEDescription"]["Status"] == "ENABLED" + ): + compliant_resources.append(table["TableArn"]) + else: + non_compliant_resources.append(table["TableArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) -def dynamodb_pitr_enabled(): - compliant_resources = [] - non_compliant_resources = [] - table_names = client.list_tables()["TableNames"] - - for table_name in table_names: - backup = client.describe_continuous_backups(TableName=table_name)["ContinuousBackupsDescription"] - table = client.describe_table(TableName=table_name)["Table"] - - if backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"] == "ENABLED": - compliant_resources.append(table["TableArn"]) - else: - non_compliant_resources.append(table["TableArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def dynamodb_table_deletion_protection_enabled(): - compliant_resources = [] - non_compliant_resources = [] - table_names = client.list_tables()["TableNames"] - - for table_name in table_names: - table = client.describe_table(TableName=table_name)["Table"] - - if table["DeletionProtectionEnabled"] == True: - compliant_resources.append(table["TableArn"]) - else: - non_compliant_resources.append(table["TableArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def dynamodb_table_encrypted_kms(): - compliant_resources = [] - non_compliant_resources = [] - table_names = client.list_tables()["TableNames"] - - for table_name in table_names: - table = client.describe_table(TableName=table_name)["Table"] - - if ( - "SSEDescription" in table - and table["SSEDescription"]["Status"] == "ENABLED" - and table["SSEDescription"]["SSEType"] == "KMS" - ): - compliant_resources.append(table["TableArn"]) - else: - non_compliant_resources.append(table["TableArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def dynamodb_table_encryption_enabled(): - compliant_resources = [] - non_compliant_resources = [] - table_names = client.list_tables()["TableNames"] - - for table_name in table_names: - table = client.describe_table(TableName=table_name)["Table"] - - if "SSEDescription" in table and table["SSEDescription"]["Status"] == "ENABLED": - compliant_resources.append(table["TableArn"]) - else: - non_compliant_resources.append(table["TableArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = DynamoDBRuleChecker diff --git a/services/ec2.py b/services/ec2.py index 143135d..d7cb6e2 100644 --- a/services/ec2.py +++ b/services/ec2.py @@ -1,192 +1,158 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("ec2") -autoscaling_client = boto3.client("autoscaling") -ssm_client = boto3.client("ssm") +class EC2RuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("ec2") + self.ssm_client = boto3.client("ssm") + @cached_property + def instances(self): + valid_instances = [ + instance + for reservation in self.client.describe_instances()["Reservations"] + for instance in reservation["Instances"] + if instance["State"]["Name"] != "terminated" + ] + return valid_instances -def autoscaling_launch_template(): - compliant_resources = [] - non_compliant_resources = [] - asgs = autoscaling_client.describe_auto_scaling_groups()["AutoScalingGroups"] + def ec2_ebs_encryption_by_default(self): + compliant_resources = [] + non_compliant_resources = [] - for asg in asgs: - if "LaunchConfigurationName" in asg: - non_compliant_resources.append(asg["AutoScalingGroupARN"]) - else: - compliant_resources.append(asg["AutoScalingGroupARN"]) + volumes = self.client.describe_volumes()["Volumes"] + for volume in volumes: + if volume["Encrypted"]: + compliant_resources.append(volume["VolumeId"]) + else: + non_compliant_resources.append(volume["VolumeId"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ec2_imdsv2_check(self): + compliant_resources = [] + non_compliant_resources = [] -def ec2_ebs_encryption_by_default(): - compliant_resources = [] - non_compliant_resources = [] - ebses = client.describe_volumes()["Volumes"] - - for ebs in ebses: - if ebs["Encrypted"] == True: - compliant_resources.append(ebs["VolumeId"]) - else: - non_compliant_resources.append(ebs["VolumeId"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def ec2_imdsv2_check(): - compliant_resources = [] - non_compliant_resources = [] - reservations = client.describe_instances()["Reservations"] - - for reservation in reservations: - for instance in reservation["Instances"]: - if instance["State"]["Name"] == "terminated": - continue + for instance in self.instances: if instance["MetadataOptions"]["HttpTokens"] == "required": compliant_resources.append(instance["InstanceId"]) else: non_compliant_resources.append(instance["InstanceId"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ec2_instance_detailed_monitoring_enabled(self): + compliant_resources = [] + non_compliant_resources = [] -def ec2_instance_detailed_monitoring_enabled(): - compliant_resources = [] - non_compliant_resources = [] - reservations = client.describe_instances()["Reservations"] - - for reservation in reservations: - for instance in reservation["Instances"]: - if instance["State"]["Name"] == "terminated": - continue + for instance in self.instances: if instance["Monitoring"]["State"] == "enabled": compliant_resources.append(instance["InstanceId"]) else: non_compliant_resources.append(instance["InstanceId"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ec2_instance_managed_by_systems_manager(self): + compliant_resources = [] + non_compliant_resources = [] -def ec2_instance_managed_by_systems_manager(): - compliant_resources = [] - non_compliant_resources = [] - reservations = client.describe_instances()["Reservations"] - informations = ssm_client.describe_instance_information()["InstanceInformationList"] - managed_instance_ids = [i["InstanceId"] for i in informations if i["PingStatus"]] + informations = self.ssm_client.describe_instance_information()[ + "InstanceInformationList" + ] + managed_instance_ids = [ + info["InstanceId"] for info in informations if info["PingStatus"] + ] - for reservation in reservations: - for instance in reservation["Instances"]: - if instance["State"]["Name"] == "terminated": - continue + for instance in self.instances: if instance["InstanceId"] in managed_instance_ids: compliant_resources.append(instance["InstanceId"]) else: non_compliant_resources.append(instance["InstanceId"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ec2_instance_profile_attached(self): + compliant_resources = [] + non_compliant_resources = [] -def ec2_instance_profile_attached(): - compliant_resources = [] - non_compliant_resources = [] - reservations = client.describe_instances()["Reservations"] - - for reservation in reservations: - for instance in reservation["Instances"]: - if instance["State"]["Name"] == "terminated": - continue + for instance in self.instances: if "IamInstanceProfile" in instance: compliant_resources.append(instance["InstanceId"]) else: non_compliant_resources.append(instance["InstanceId"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ec2_no_amazon_key_pair(self): + compliant_resources = [] + non_compliant_resources = [] -def ec2_no_amazon_key_pair(): - compliant_resources = [] - non_compliant_resources = [] - reservations = client.describe_instances()["Reservations"] - - for reservation in reservations: - for instance in reservation["Instances"]: - if instance["State"]["Name"] == "terminated": - continue + for instance in self.instances: if "KeyName" in instance: non_compliant_resources.append(instance["InstanceId"]) else: compliant_resources.append(instance["InstanceId"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ec2_stopped_instance(self): + compliant_resources = [] + non_compliant_resources = [] -def ec2_stopped_instance(): - compliant_resources = [] - non_compliant_resources = [] - reservations = client.describe_instances()["Reservations"] - - for reservation in reservations: - for instance in reservation["Instances"]: - if instance["State"]["Name"] == "terminated": - continue + for instance in self.instances: if instance["State"]["Name"] != "stopped": compliant_resources.append(instance["InstanceId"]) else: non_compliant_resources.append(instance["InstanceId"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ec2_token_hop_limit_check(self): + compliant_resources = [] + non_compliant_resources = [] -def ec2_token_hop_limit_check(): - compliant_resources = [] - non_compliant_resources = [] - reservations = client.describe_instances()["Reservations"] - - for reservation in reservations: - for instance in reservation["Instances"]: - if instance["State"]["Name"] == "terminated": - continue + for instance in self.instances: if instance["MetadataOptions"]["HttpPutResponseHopLimit"] < 2: compliant_resources.append(instance["InstanceId"]) else: non_compliant_resources.append(instance["InstanceId"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = EC2RuleChecker diff --git a/services/ecr.py b/services/ecr.py index 87c1c8e..3b32b87 100644 --- a/services/ecr.py +++ b/services/ecr.py @@ -1,85 +1,86 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -import botocore -client = boto3.client("ecr") +class ECRRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("ecr") + @cached_property + def repositories(self): + return self.client.describe_repositories()["repositories"] -def ecr_private_image_scanning_enabled(): - repositories = client.describe_repositories() - compliant_resource = [] - non_compliant_resources = [] + def ecr_private_image_scanning_enabled(self): + compliant_resource = [] + non_compliant_resources = [] - for repository in repositories["repositories"]: - if repository["imageScanningConfiguration"]["scanOnPush"] == True: - compliant_resource.append(repository["repositoryArn"]) - else: - non_compliant_resources.append(repository["repositoryArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def ecr_private_lifecycle_policy_configured(): - repositories = client.describe_repositories() - compliant_resource = [] - non_compliant_resources = [] - - for repository in repositories["repositories"]: - try: - response = client.get_lifecycle_policy( - registryId=repository["registryId"], - repositoryName=repository["repositoryName"], - ) - compliant_resource.append(repository["repositoryArn"]) - except Exception as e: - if e.__class__.__name__ == "LifecyclePolicyNotFoundException": - non_compliant_resources.append(repository["repositoryArn"]) + for repository in self.repositories: + if repository["imageScanningConfiguration"]["scanOnPush"] == True: + compliant_resource.append(repository["repositoryArn"]) else: - raise e + non_compliant_resources.append(repository["repositoryArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def ecr_private_lifecycle_policy_configured(self): + compliant_resource = [] + non_compliant_resources = [] + + for repository in self.repositories: + try: + response = self.client.get_lifecycle_policy( + registryId=repository["registryId"], + repositoryName=repository["repositoryName"], + ) + compliant_resource.append(repository["repositoryArn"]) + except Exception as e: + if e.__class__.__name__ == "LifecyclePolicyNotFoundException": + non_compliant_resources.append(repository["repositoryArn"]) + else: + raise e + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def ecr_private_tag_immutability_enabled(self): + compliant_resource = [] + non_compliant_resources = [] + + for repository in self.repositories: + if repository["imageTagMutability"] == "IMMUTABLE": + compliant_resource.append(repository["repositoryArn"]) + else: + non_compliant_resources.append(repository["repositoryArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def ecr_kms_encryption_1(self): + compliant_resource = [] + non_compliant_resources = [] + + for repository in self.repositories: + if repository["encryptionConfiguration"]["encryptionType"] == "KMS": + compliant_resource.append(repository["repositoryArn"]) + else: + non_compliant_resources.append(repository["repositoryArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) -def ecr_private_tag_immutability_enabled(): - repositories = client.describe_repositories() - compliant_resource = [] - non_compliant_resources = [] - - for repository in repositories["repositories"]: - if repository["imageTagMutability"] == "IMMUTABLE": - compliant_resource.append(repository["repositoryArn"]) - else: - non_compliant_resources.append(repository["repositoryArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def ecr_kms_encryption_1(): - repositories = client.describe_repositories() - compliant_resource = [] - non_compliant_resources = [] - - for repository in repositories["repositories"]: - if repository["encryptionConfiguration"]["encryptionType"] == "KMS": - compliant_resource.append(repository["repositoryArn"]) - else: - non_compliant_resources.append(repository["repositoryArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = ECRRuleChecker diff --git a/services/ecs.py b/services/ecs.py index 0345a08..2a48893 100644 --- a/services/ecs.py +++ b/services/ecs.py @@ -1,219 +1,222 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("ecs") +class ECSRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("ecs") + @cached_property + def task_definitions(self): + task_definition_arns = self.client.list_task_definitions(status="ACTIVE")[ + "taskDefinitionArns" + ] + latest_task_definitions = {} -def ecs_awsvpc_networking_enabled(): - compliant_resources = [] - non_compliant_resources = [] - task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"] - latest_task_definitions = {} + # Filter latest task definition arns + for task_definition_arn in task_definition_arns: + family, revision = task_definition_arn.rsplit(":", 1) + latest_task_definitions[family] = max( + latest_task_definitions.get(family, 0), int(revision) + ) - for task_definition in task_definitions: - family, revision = task_definition.rsplit(":", 1) - latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision)) + # Fetch latest task definition details + task_definitions = [ + self.client.describe_task_definition(taskDefinition=f"{family}:{revision}")[ + "taskDefinition" + ] + for family, revision in latest_task_definitions.items() + ] - for family, revision in latest_task_definitions.items(): - task_definition_arn = f"{family}:{revision}" - task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"] + return task_definitions - if task_definition.get("networkMode") == "awsvpc": - compliant_resources.append(task_definition["taskDefinitionArn"]) - else: - non_compliant_resources.append(task_definition["taskDefinitionArn"]) + @cached_property + def clusters(self): + return self.client.describe_clusters(include=["SETTINGS"])["clusters"] - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + @cached_property + def services(self): + services = [] + for cluster in self.clusters: + service_arns = self.client.list_services( + cluster=cluster["clusterArn"], launchType="FARGATE" + )["serviceArns"] + services += self.client.describe_services( + cluster=cluster["clusterArn"], services=service_arns + )["services"] + return services + def ecs_awsvpc_networking_enabled(self): + compliant_resources = [] + non_compliant_resources = [] -def ecs_containers_nonprivileged(): - compliant_resources = [] - non_compliant_resources = [] - task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"] - latest_task_definitions = {} - - for task_definition in task_definitions: - family, revision = task_definition.rsplit(":", 1) - latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision)) - - for family, revision in latest_task_definitions.items(): - task_definition_arn = f"{family}:{revision}" - task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"] - containers = task_definition["containerDefinitions"] - - for container in containers: - if container.get("privileged"): + for task_definition in self.task_definitions: + if task_definition.get("networkMode") == "awsvpc": + compliant_resources.append(task_definition["taskDefinitionArn"]) + else: non_compliant_resources.append(task_definition["taskDefinitionArn"]) - break - else: - compliant_resources.append(task_definition["taskDefinitionArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ecs_containers_nonprivileged(self): + compliant_resources = [] + non_compliant_resources = [] -def ecs_containers_readonly_access(): - compliant_resources = [] - non_compliant_resources = [] - task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"] - latest_task_definitions = {} + for task_definition in self.task_definitions: + containers = task_definition["containerDefinitions"] + privileged_containers = [ + container for container in containers if container.get("privileged") + ] - for task_definition in task_definitions: - family, revision = task_definition.rsplit(":", 1) - latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision)) - - for family, revision in latest_task_definitions.items(): - task_definition_arn = f"{family}:{revision}" - task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"] - containers = task_definition["containerDefinitions"] - - for container in containers: - if not container.get("readonlyRootFilesystem"): + if privileged_containers: non_compliant_resources.append(task_definition["taskDefinitionArn"]) - break - else: - compliant_resources.append(task_definition["taskDefinitionArn"]) + else: + compliant_resources.append(task_definition["taskDefinitionArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ecs_containers_readonly_access(self): + compliant_resources = [] + non_compliant_resources = [] -def ecs_container_insights_enabled(): - compliant_resources = [] - non_compliant_resources = [] + for task_definition in self.task_definitions: + containers = task_definition["containerDefinitions"] + not_readonly_containers = [ + container + for container in containers + if not container.get("readonlyRootFilesystem") + ] - clusters = client.describe_clusters(include=["SETTINGS"])["clusters"] + if not_readonly_containers: + non_compliant_resources.append(task_definition["taskDefinitionArn"]) + else: + compliant_resources.append(task_definition["taskDefinitionArn"]) - for cluster in clusters: - container_insights_setting = [setting for setting in cluster["settings"] if setting["name"] == "containerInsights"] + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) - if container_insights_setting and container_insights_setting[0]["value"] == "enabled": - compliant_resources.append(cluster["clusterArn"]) - else: - non_compliant_resources.append(cluster["clusterArn"]) + def ecs_container_insights_enabled(self): + compliant_resources = [] + non_compliant_resources = [] - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + for cluster in self.clusters: + container_insights_setting = [ + setting + for setting in cluster["settings"] + if setting["name"] == "containerInsights" + ] + if ( + container_insights_setting + and container_insights_setting[0]["value"] == "enabled" + ): + compliant_resources.append(cluster["clusterArn"]) + else: + non_compliant_resources.append(cluster["clusterArn"]) -def ecs_fargate_latest_platform_version(): - compliant_resources = [] - non_compliant_resources = [] - cluster_arns = client.list_clusters()["clusterArns"] + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) - for cluster_arn in cluster_arns: - service_arns = client.list_services(cluster=cluster_arn, launchType="FARGATE")["serviceArns"] - services = client.describe_services(cluster=cluster_arn, services=service_arns)["services"] - - for service in services: + def ecs_fargate_latest_platform_version(self): + compliant_resources = [] + non_compliant_resources = [] + + for service in self.services: if service["platformVersion"] == "LATEST": compliant_resources.append(service["serviceArn"]) else: non_compliant_resources.append(service["serviceArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ecs_task_definition_log_configuration(self): + compliant_resources = [] + non_compliant_resources = [] -def ecs_task_definition_log_configuration(): - compliant_resources = [] - non_compliant_resources = [] - task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"] - latest_task_definitions = {} + for task_definition in self.task_definitions: + containers = task_definition["containerDefinitions"] - for task_definition in task_definitions: - family, revision = task_definition.rsplit(":", 1) - latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision)) + log_disabled_containers = [ + container + for container in containers + if "logConfiguration" not in container + ] - for family, revision in latest_task_definitions.items(): - task_definition_arn = f"{family}:{revision}" - task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"] - containers = task_definition["containerDefinitions"] - - for container in containers: - if "logConfiguration" not in container: + if log_disabled_containers: non_compliant_resources.append(task_definition["taskDefinitionArn"]) - break - else: - compliant_resources.append(task_definition["taskDefinitionArn"]) + else: + compliant_resources.append(task_definition["taskDefinitionArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ecs_task_definition_memory_hard_limit(self): + compliant_resources = [] + non_compliant_resources = [] -def ecs_task_definition_memory_hard_limit(): - compliant_resources = [] - non_compliant_resources = [] - task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"] - latest_task_definitions = {} + for task_definition in self.task_definitions: + containers = task_definition["containerDefinitions"] - for task_definition in task_definitions: - family, revision = task_definition.rsplit(":", 1) - latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision)) + containers_without_memory_limit = [ + container for container in containers if "memory" not in container + ] - for family, revision in latest_task_definitions.items(): - task_definition_arn = f"{family}:{revision}" - task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"] - containers = task_definition["containerDefinitions"] - - for container in containers: - if "memory" not in container: + if containers_without_memory_limit: non_compliant_resources.append(task_definition["taskDefinitionArn"]) - break - else: - compliant_resources.append(task_definition["taskDefinitionArn"]) + else: + compliant_resources.append(task_definition["taskDefinitionArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def ecs_task_definition_nonroot_user(self): + compliant_resources = [] + non_compliant_resources = [] -def ecs_task_definition_nonroot_user(): - compliant_resources = [] - non_compliant_resources = [] - task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"] - latest_task_definitions = {} + for task_definition in self.task_definitions: + containers = task_definition["containerDefinitions"] - for task_definition in task_definitions: - family, revision = task_definition.rsplit(":", 1) - latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision)) + privileged_containers = [ + container + for container in containers + if container.get("user") in [None, "root"] + ] - for family, revision in latest_task_definitions.items(): - task_definition_arn = f"{family}:{revision}" - task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"] - containers = task_definition["containerDefinitions"] - - for container in containers: - if container.get("user") in [None, "root"]: + if privileged_containers: non_compliant_resources.append(task_definition["taskDefinitionArn"]) - break - else: - compliant_resources.append(task_definition["taskDefinitionArn"]) + else: + compliant_resources.append(task_definition["taskDefinitionArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = ECSRuleChecker diff --git a/services/efs.py b/services/efs.py index 0b05116..40793e1 100644 --- a/services/efs.py +++ b/services/efs.py @@ -1,118 +1,124 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("efs") -ec2_client = boto3.client("ec2") +class EFSRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("efs") + self.ec2_client = boto3.client("ec2") + @cached_property + def access_points(self): + return self.client.describe_access_points()["AccessPoints"] -def efs_access_point_enforce_root_directory(): - access_points = client.describe_access_points()["AccessPoints"] - compliant_resource = [] - non_compliant_resources = [] + @cached_property + def file_systems(self): + return self.client.describe_file_systems()["FileSystems"] - for access_point in access_points: - if access_point["RootDirectory"]["Path"] != "/": - compliant_resource.append(access_point["AccessPointArn"]) - else: - non_compliant_resources.append(access_point["AccessPointArn"]) + def efs_access_point_enforce_root_directory(self): + compliant_resource = [] + non_compliant_resources = [] - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def efs_access_point_enforce_user_identity(): - access_points = client.describe_access_points()["AccessPoints"] - compliant_resource = [] - non_compliant_resources = [] - - for access_point in access_points: - if "PosixUser" in access_point: - compliant_resource.append(access_point["AccessPointArn"]) - else: - non_compliant_resources.append(access_point["AccessPointArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def efs_automatic_backups_enabled(): - file_systems = client.describe_file_systems()["FileSystems"] - compliant_resource = [] - non_compliant_resources = [] - - for file_system in file_systems: - response = client.describe_backup_policy( - FileSystemId=file_system["FileSystemId"] - ) - if response["BackupPolicy"]["Status"] == "ENABLED": - compliant_resource.append(file_system["FileSystemArn"]) - else: - non_compliant_resources.append(file_system["FileSystemArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def efs_encrypted_check(): - file_systems = client.describe_file_systems()["FileSystems"] - compliant_resource = [] - non_compliant_resources = [] - - for file_system in file_systems: - if file_system["Encrypted"] == True: - compliant_resource.append(file_system["FileSystemArn"]) - else: - non_compliant_resources.append(file_system["FileSystemArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def efs_mount_target_public_accessible(): - file_systems = client.describe_file_systems()["FileSystems"] - compliant_resource = [] - non_compliant_resources = [] - - for file_system in file_systems: - mount_targets = client.describe_mount_targets( - FileSystemId=file_system["FileSystemId"] - )["MountTargets"] - for mount_target in mount_targets: - subnet_id = mount_target["SubnetId"] - routes = ec2_client.describe_route_tables( - Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}] - )["RouteTables"][0]["Routes"] - - for route in routes: - if ( - "DestinationCidrBlock" in route - and route["DestinationCidrBlock"] == "0.0.0.0/0" - and "GatewayId" in route - and route["GatewayId"].startswith("igw-") - ): - non_compliant_resources.append(file_system["FileSystemArn"]) - break + for access_point in self.access_points: + if access_point["RootDirectory"]["Path"] != "/": + compliant_resource.append(access_point["AccessPointArn"]) else: + non_compliant_resources.append(access_point["AccessPointArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def efs_access_point_enforce_user_identity(self): + compliant_resource = [] + non_compliant_resources = [] + + for access_point in self.access_points: + if "PosixUser" in access_point: + compliant_resource.append(access_point["AccessPointArn"]) + else: + non_compliant_resources.append(access_point["AccessPointArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def efs_automatic_backups_enabled(self): + compliant_resource = [] + non_compliant_resources = [] + + for file_system in self.file_systems: + response = self.client.describe_backup_policy( + FileSystemId=file_system["FileSystemId"] + ) + + if response["BackupPolicy"]["Status"] == "ENABLED": compliant_resource.append(file_system["FileSystemArn"]) + else: + non_compliant_resources.append(file_system["FileSystemArn"]) - compliant_resource = list(set(compliant_resource)) - non_compliant_resources = list(set(non_compliant_resources)) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) + def efs_encrypted_check(self): + compliant_resource = [] + non_compliant_resources = [] + + for file_system in self.file_systems: + if file_system["Encrypted"]: + compliant_resource.append(file_system["FileSystemArn"]) + else: + non_compliant_resources.append(file_system["FileSystemArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def efs_mount_target_public_accessible(self): + compliant_resource = [] + non_compliant_resources = [] + + for file_system in self.file_systems: + mount_targets = self.client.describe_mount_targets( + FileSystemId=file_system["FileSystemId"] + )["MountTargets"] + + for mount_target in mount_targets: + subnet_id = mount_target["SubnetId"] + routes = self.ec2_client.describe_route_tables( + Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}] + )["RouteTables"][0]["Routes"] + + for route in routes: + if ( + "DestinationCidrBlock" in route + and route["DestinationCidrBlock"] == "0.0.0.0/0" + and "GatewayId" in route + and route["GatewayId"].startswith("igw-") + ): + non_compliant_resources.append(file_system["FileSystemArn"]) + break + + non_compliant_resources = list(set(non_compliant_resources)) + compliant_resource = list( + set(compliant_resource) - set(non_compliant_resources) + ) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = EFSRuleChecker diff --git a/services/eks.py b/services/eks.py index 0fd18d3..527da2f 100644 --- a/services/eks.py +++ b/services/eks.py @@ -1,68 +1,73 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("eks") +class EKSRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("eks") + + @cached_property + def clusters(self): + cluster_names = self.client.list_clusters()["clusters"] + return [ + self.client.describe_cluster(name=cluster_name)["cluster"] + for cluster_name in cluster_names + ] + + def eks_cluster_logging_enabled(self): + compliant_resource = [] + non_compliant_resources = [] + + for cluster in self.clusters: + if ( + cluster["logging"]["clusterLogging"][0]["enabled"] + and len(cluster["logging"]["clusterLogging"][0]["types"]) == 5 + ): + compliant_resource.append(cluster["arn"]) + else: + non_compliant_resources.append(cluster["arn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def eks_cluster_secrets_encrypted(self): + compliant_resource = [] + non_compliant_resources = [] + + for cluster in self.clusters: + if ( + "encryptionConfig" in cluster + and "secrets" in cluster["encryptionConfig"][0]["resources"] + ): + compliant_resource.append(cluster["arn"]) + else: + non_compliant_resources.append(cluster["arn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def eks_endpoint_no_public_access(self): + compliant_resource = [] + non_compliant_resources = [] + + for cluster in self.clusters: + if cluster["resourcesVpcConfig"]["endpointPublicAccess"]: + non_compliant_resources.append(cluster["arn"]) + else: + compliant_resource.append(cluster["arn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) -def eks_cluster_logging_enabled(): - clusters = client.list_clusters()["clusters"] - compliant_resource = [] - non_compliant_resources = [] - - for cluster in clusters: - response = client.describe_cluster(name=cluster)["cluster"] - if ( - len(response["logging"]["clusterLogging"][0]["types"]) == 5 - and response["logging"]["clusterLogging"][0]["enabled"] == True - ): - compliant_resource.append(response["arn"]) - else: - non_compliant_resources.append(response["arn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def eks_cluster_secrets_encrypted(): - clusters = client.list_clusters()["clusters"] - compliant_resource = [] - non_compliant_resources = [] - - for cluster in clusters: - response = client.describe_cluster(name=cluster)["cluster"] - if ( - "encryptionConfig" in response - and "secrets" in response["encryptionConfig"][0]["resources"] - ): - compliant_resource.append(response["arn"]) - else: - non_compliant_resources.append(response["arn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def eks_endpoint_no_public_access(): - clusters = client.list_clusters()["clusters"] - compliant_resource = [] - non_compliant_resources = [] - - for cluster in clusters: - response = client.describe_cluster(name=cluster)["cluster"] - if response["resourcesVpcConfig"]["endpointPublicAccess"] == False: - compliant_resource.append(response["arn"]) - else: - non_compliant_resources.append(response["arn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = EKSRuleChecker diff --git a/services/elasticache.py b/services/elasticache.py index 35baec7..d88f4a7 100644 --- a/services/elasticache.py +++ b/services/elasticache.py @@ -1,113 +1,115 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("elasticache") +class ElastiCacheRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("elasticache") + + @cached_property + def clusters(self): + return self.client.describe_cache_clusters()["CacheClusters"] + + @cached_property + def replication_groups(self): + return self.client.describe_replication_groups()["ReplicationGroups"] + + def elasticache_auto_minor_version_upgrade_check(self): + compliant_resource = [] + non_compliant_resources = [] + + for cluster in self.clusters: + if cluster["AutoMinorVersionUpgrade"]: + compliant_resource.append(cluster["ARN"]) + else: + non_compliant_resources.append(cluster["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def elasticache_redis_cluster_automatic_backup_check(self): + compliant_resource = [] + non_compliant_resources = [] + + for replication_group in self.replication_groups: + if "SnapshottingClusterId" in replication_group: + compliant_resource.append(replication_group["ARN"]) + else: + non_compliant_resources.append(replication_group["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def elasticache_repl_grp_auto_failover_enabled(self): + compliant_resource = [] + non_compliant_resources = [] + + for replication_group in self.replication_groups: + if replication_group["AutomaticFailover"] == "enabled": + compliant_resource.append(replication_group["ARN"]) + else: + non_compliant_resources.append(replication_group["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def elasticache_repl_grp_encrypted_at_rest(self): + compliant_resource = [] + non_compliant_resources = [] + + for replication_group in self.replication_groups: + if replication_group["AtRestEncryptionEnabled"] == True: + compliant_resource.append(replication_group["ARN"]) + else: + non_compliant_resources.append(replication_group["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def elasticache_repl_grp_encrypted_in_transit(self): + compliant_resource = [] + non_compliant_resources = [] + + for replication_group in self.replication_groups: + if replication_group["TransitEncryptionEnabled"] == True: + compliant_resource.append(replication_group["ARN"]) + else: + non_compliant_resources.append(replication_group["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def elasticache_subnet_group_check(self): + compliant_resource = [] + non_compliant_resources = [] + + for cluster in self.clusters: + if cluster["CacheSubnetGroupName"] != "default": + compliant_resource.append(cluster["ARN"]) + else: + non_compliant_resources.append(cluster["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) -def elasticache_auto_minor_version_upgrade_check(): - clusters = client.describe_cache_clusters()["CacheClusters"] - compliant_resource = [] - non_compliant_resources = [] - - for cluster in clusters: - if cluster["AutoMinorVersionUpgrade"] == True: - compliant_resource.append(cluster["ARN"]) - else: - non_compliant_resources.append(cluster["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def elasticache_redis_cluster_automatic_backup_check(): - replication_groups = client.describe_replication_groups()["ReplicationGroups"] - compliant_resource = [] - non_compliant_resources = [] - - for replication_group in replication_groups: - if "SnapshottingClusterId" in replication_group: - compliant_resource.append(replication_group["ARN"]) - else: - non_compliant_resources.append(replication_group["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def elasticache_repl_grp_auto_failover_enabled(): - replication_groups = client.describe_replication_groups()["ReplicationGroups"] - compliant_resource = [] - non_compliant_resources = [] - - for replication_group in replication_groups: - if replication_group["AutomaticFailover"] == "enabled": - compliant_resource.append(replication_group["ARN"]) - else: - non_compliant_resources.append(replication_group["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def elasticache_repl_grp_encrypted_at_rest(): - replication_groups = client.describe_replication_groups()["ReplicationGroups"] - compliant_resource = [] - non_compliant_resources = [] - - for replication_group in replication_groups: - if replication_group["AtRestEncryptionEnabled"] == True: - compliant_resource.append(replication_group["ARN"]) - else: - non_compliant_resources.append(replication_group["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def elasticache_repl_grp_encrypted_in_transit(): - replication_groups = client.describe_replication_groups()["ReplicationGroups"] - compliant_resource = [] - non_compliant_resources = [] - - for replication_group in replication_groups: - if replication_group["TransitEncryptionEnabled"] == True: - compliant_resource.append(replication_group["ARN"]) - else: - non_compliant_resources.append(replication_group["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def elasticache_subnet_group_check(): - clusters = client.describe_cache_clusters()["CacheClusters"] - compliant_resource = [] - non_compliant_resources = [] - - for cluster in clusters: - if cluster["CacheSubnetGroupName"] != "default": - compliant_resource.append(cluster["ARN"]) - else: - non_compliant_resources.append(cluster["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = ElastiCacheRuleChecker diff --git a/services/iam.py b/services/iam.py index d49b7af..47d0ce9 100644 --- a/services/iam.py +++ b/services/iam.py @@ -1,83 +1,104 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("iam") +class IAMRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("iam") + @cached_property + def policies(self): + return self.client.list_policies(Scope="Local")["Policies"] -def iam_policy_no_statements_with_admin_access(): - compliant_resource = [] - non_compliant_resources = [] - policies = client.list_policies(Scope="Local")["Policies"] - - for policy in policies: - policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[ - "PolicyVersion" + @cached_property + def policy_default_versions(self): + responses = [ + self.client.get_policy_version( + PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"] + )["PolicyVersion"] + for policy in self.policies ] - for statement in policy_version["Document"]["Statement"]: + return { + policy["Arn"]: response + for policy, response in zip(self.policies, responses) + } + + def iam_policy_no_statements_with_admin_access(self): + compliant_resource = [] + non_compliant_resources = [] + + for policy in self.policies: + policy_version = self.policy_default_versions[policy["Arn"]] + + for statement in policy_version["Document"]["Statement"]: + if ( + statement["Action"] == "*" + and statement["Resource"] == "*" + and statement["Effect"] == "Allow" + ): + non_compliant_resources.append(policy["Arn"]) + break + else: + compliant_resource.append(policy["Arn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def iam_policy_no_statements_with_full_access(self): + compliant_resource = [] + non_compliant_resources = [] + + for policy in self.policies: + policy_version = self.policy_default_versions[policy["Arn"]] + + for statement in policy_version["Document"]["Statement"]: + if statement["Effect"] == "Deny": + continue + + if type(statement["Action"]) == str: + statement["Action"] = [statement["Action"]] + + full_access_actions = [ + action for action in statement["Action"] if action.endswith(":*") + ] + if full_access_actions: + non_compliant_resources.append(policy["Arn"]) + break + else: + compliant_resource.append(policy["Arn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) + + def iam_role_managed_policy_check(self): + compliant_resource = [] + non_compliant_resources = [] + policy_arns = [] # 검사할 managed policy arn 목록 + + for policy in policy_arns: + response = self.client.list_entities_for_policy(PolicyArn=policy) if ( - statement["Action"] == "*" - and statement["Resource"] == "*" - and statement["Effect"] == "Allow" + response["PolicyGroups"] == [] + and response["PolicyUsers"] == [] + and response["PolicyRoles"] == [] ): - non_compliant_resources.append(policy["Arn"]) - break - else: - compliant_resource.append(policy["Arn"]) + non_compliant_resources.append(policy) + else: + compliant_resource.append(policy) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not compliant_resource, + compliant_resources=compliant_resource, + non_compliant_resources=non_compliant_resources, + ) -def iam_policy_no_statements_with_full_access(): - compliant_resource = [] - non_compliant_resources = [] - policies = client.list_policies(Scope="Local")["Policies"] - - for policy in policies: - policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[ - "PolicyVersion" - ] - - for statement in policy_version["Document"]["Statement"]: - if statement["Effect"] == "Deny": - continue - - if type(statement["Action"]) == str: - statement["Action"] = [statement["Action"]] - - full_access_actions = [action for action in statement["Action"] if action.endswith(":*")] - if full_access_actions: - non_compliant_resources.append(policy["Arn"]) - break - else: - compliant_resource.append(policy["Arn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) - - -def iam_role_managed_policy_check(): - compliant_resource = [] - non_compliant_resources = [] - policy_arns = [] # 검사할 managed policy arn 목록 - - for policy in policy_arns: - response = client.list_entities_for_policy(PolicyArn=policy) - if response["PolicyGroups"] == [] and response["PolicyUsers"] == [] and response["PolicyRoles"] == []: - non_compliant_resources.append(policy) - else: - compliant_resource.append(policy) - - return RuleCheckResult( - passed=not compliant_resource, - compliant_resources=compliant_resource, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = IAMRuleChecker diff --git a/services/kms.py b/services/kms.py index bd16a19..00d2bb2 100644 --- a/services/kms.py +++ b/services/kms.py @@ -1,25 +1,29 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker import boto3 -client = boto3.client("kms") +class KMSRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("kms") + + def cmk_backing_key_rotation_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + keys = self.client.list_keys()["Keys"] + + for key in keys: + response = self.client.get_key_rotation_status(KeyId=key["KeyId"]) + + if response["KeyRotationEnabled"] == True: + compliant_resources.append(response["KeyId"]) + else: + non_compliant_resources.append(response["KeyId"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) -def cmk_backing_key_rotation_enabled(): - compliant_resources = [] - non_compliant_resources = [] - keys = client.list_keys()["Keys"] - - for key in keys: - response = client.get_key_rotation_status(KeyId=key["KeyId"]) - - if response["KeyRotationEnabled"] == True: - compliant_resources.append(response["KeyId"]) - else: - non_compliant_resources.append(response["KeyId"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = KMSRuleChecker diff --git a/services/rds.py b/services/rds.py index f6af868..fe70cd5 100644 --- a/services/rds.py +++ b/services/rds.py @@ -1,278 +1,298 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import datetime from dateutil.tz import tzlocal import boto3 -client = boto3.client("rds") -backup_client = boto3.client("backup") -ec2_client = boto3.client("ec2") +class RDSRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("rds") + self.backup_client = boto3.client("backup") + self.ec2_client = boto3.client("ec2") -def aurora_last_backup_recovery_point_created(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] + @cached_property + def db_clusters(self): + return self.client.describe_db_clusters()["DBClusters"] - for cluster in clusters: - recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=cluster["DBClusterArn"])[ - "RecoveryPoints" - ] - recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points]) - if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1): - compliant_resources.append(cluster["DBClusterArn"]) - else: - non_compliant_resources.append(cluster["DBClusterArn"]) + @cached_property + def db_instances(self): + return self.client.describe_db_instances()["DBInstances"] - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + def aurora_last_backup_recovery_point_created(self): + compliant_resources = [] + non_compliant_resources = [] - -def aurora_mysql_backtracking_enabled(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - for cluster in clusters: - if cluster["Engine"] == "aurora-mysql" and cluster.get("EarliestBacktrackTime", None) == None: - non_compliant_resources.append(cluster["DBClusterArn"]) - else: - compliant_resources.append(cluster["DBClusterArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def db_instance_backup_enabled(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - for cluster in clusters: - if "BackupRetentionPeriod" in cluster: - compliant_resources.append(cluster["DBClusterArn"]) - else: - non_compliant_resources.append(cluster["DBClusterArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_cluster_auto_minor_version_upgrade_enable(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - for cluster in clusters: - if cluster["Engine"] == "docdb" or cluster.get("AutoMinorVersionUpgrade"): - compliant_resources.append(cluster["DBClusterArn"]) - else: - non_compliant_resources.append(cluster["DBClusterArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_cluster_default_admin_check(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - for cluster in clusters: - if cluster["MasterUsername"] not in ["admin", "postgres"]: - compliant_resources.append(cluster["DBClusterArn"]) - else: - non_compliant_resources.append(cluster["DBClusterArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_cluster_deletion_protection_enabled(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - for cluster in clusters: - if cluster["DeletionProtection"]: - compliant_resources.append(cluster["DBClusterArn"]) - else: - non_compliant_resources.append(cluster["DBClusterArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_cluster_encrypted_at_rest(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - for cluster in clusters: - if cluster["StorageEncrypted"]: - compliant_resources.append(cluster["DBClusterArn"]) - else: - non_compliant_resources.append(cluster["DBClusterArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_cluster_iam_authentication_enabled(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - for cluster in clusters: - if cluster["Engine"] == "docdb" or cluster.get("IAMDatabaseAuthenticationEnabled"): - compliant_resources.append(cluster["DBClusterArn"]) - else: - non_compliant_resources.append(cluster["DBClusterArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_cluster_multi_az_enabled(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - for cluster in clusters: - if len(cluster.get("AvailabilityZones", [])) > 1: - compliant_resources.append(cluster["DBClusterArn"]) - else: - non_compliant_resources.append(cluster["DBClusterArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_db_security_group_not_allowed(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - security_groups = ec2_client.describe_security_groups()["SecurityGroups"] - default_security_group_ids = [i["GroupId"] for i in security_groups if i["GroupName"] == "default"] - - for cluster in clusters: - db_security_groups = [i["VpcSecurityGroupId"] for i in cluster["VpcSecurityGroups"] if i["Status"] == "active"] - - for default_security_group_id in default_security_group_ids: - if default_security_group_id in db_security_groups: + clusters = self.db_clusters + for cluster in clusters: + recovery_points = self.backup_client.list_recovery_points_by_resource( + ResourceArn=cluster["DBClusterArn"] + )["RecoveryPoints"] + recovery_point_creation_dates = sorted( + [i["CreationDate"] for i in recovery_points] + ) + if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[ + -1 + ] < datetime.timedelta(days=1): + compliant_resources.append(cluster["DBClusterArn"]) + else: non_compliant_resources.append(cluster["DBClusterArn"]) - break - else: - compliant_resources.append(cluster["DBClusterArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def aurora_mysql_backtracking_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + for cluster in clusters: + if ( + cluster["Engine"] == "aurora-mysql" + and cluster.get("EarliestBacktrackTime", None) == None + ): + non_compliant_resources.append(cluster["DBClusterArn"]) + else: + compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def db_instance_backup_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + for cluster in clusters: + if "BackupRetentionPeriod" in cluster: + compliant_resources.append(cluster["DBClusterArn"]) + else: + non_compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_cluster_auto_minor_version_upgrade_enable(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + for cluster in clusters: + if cluster["Engine"] == "docdb" or cluster.get("AutoMinorVersionUpgrade"): + compliant_resources.append(cluster["DBClusterArn"]) + else: + non_compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_cluster_default_admin_check(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + for cluster in clusters: + if cluster["MasterUsername"] not in ["admin", "postgres"]: + compliant_resources.append(cluster["DBClusterArn"]) + else: + non_compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_cluster_deletion_protection_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + for cluster in clusters: + if cluster["DeletionProtection"]: + compliant_resources.append(cluster["DBClusterArn"]) + else: + non_compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_cluster_encrypted_at_rest(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + for cluster in clusters: + if cluster["StorageEncrypted"]: + compliant_resources.append(cluster["DBClusterArn"]) + else: + non_compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_cluster_iam_authentication_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + for cluster in clusters: + if cluster["Engine"] == "docdb" or cluster.get( + "IAMDatabaseAuthenticationEnabled" + ): + compliant_resources.append(cluster["DBClusterArn"]) + else: + non_compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_cluster_multi_az_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + for cluster in clusters: + if len(cluster.get("AvailabilityZones", [])) > 1: + compliant_resources.append(cluster["DBClusterArn"]) + else: + non_compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_db_security_group_not_allowed(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + security_groups = self.ec2_client.describe_security_groups()["SecurityGroups"] + default_security_group_ids = [ + i["GroupId"] for i in security_groups if i["GroupName"] == "default" + ] + + for cluster in clusters: + db_security_groups = [ + i["VpcSecurityGroupId"] + for i in cluster["VpcSecurityGroups"] + if i["Status"] == "active" + ] + + for default_security_group_id in default_security_group_ids: + if default_security_group_id in db_security_groups: + non_compliant_resources.append(cluster["DBClusterArn"]) + break + else: + compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_enhanced_monitoring_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + instances = self.db_instances + for instance in instances: + if instance.get("MonitoringInterval", 0): + compliant_resources.append(instance["DBInstanceArn"]) + else: + non_compliant_resources.append(instance["DBInstanceArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_instance_public_access_check(self): + compliant_resources = [] + non_compliant_resources = [] + + instances = self.db_instances + for instance in instances: + if instance["PubliclyAccessible"]: + non_compliant_resources.append(instance["DBInstanceArn"]) + else: + compliant_resources.append(instance["DBInstanceArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_logging_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + clusters = self.db_clusters + logs_for_engine = { + "aurora-mysql": ["audit", "error", "general", "slowquery"], + "aurora-postgresql": ["postgresql"], + "docdb": ["audit", "profiler"], + } + + for cluster in clusters: + if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get( + cluster["Engine"] + ): + compliant_resources.append(cluster["DBClusterArn"]) + else: + non_compliant_resources.append(cluster["DBClusterArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def rds_snapshot_encrypted(self): + compliant_resources = [] + non_compliant_resources = [] + + cluster_snapshots = self.client.describe_db_cluster_snapshots()[ + "DBClusterSnapshots" + ] + + for snapshot in cluster_snapshots: + if snapshot.get("StorageEncrypted") == True: + compliant_resources.append(snapshot["DBClusterSnapshotArn"]) + else: + non_compliant_resources.append(snapshot["DBClusterSnapshotArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) -def rds_enhanced_monitoring_enabled(): - compliant_resources = [] - non_compliant_resources = [] - instances = client.describe_db_instances()["DBInstances"] - - for instance in instances: - if instance.get("MonitoringInterval", 0): - compliant_resources.append(instance["DBInstanceArn"]) - else: - non_compliant_resources.append(instance["DBInstanceArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_instance_public_access_check(): - compliant_resources = [] - non_compliant_resources = [] - instances = client.describe_db_instances()["DBInstances"] - - for instance in instances: - if instance["PubliclyAccessible"]: - non_compliant_resources.append(instance["DBInstanceArn"]) - else: - compliant_resources.append(instance["DBInstanceArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_logging_enabled(): - compliant_resources = [] - non_compliant_resources = [] - clusters = client.describe_db_clusters()["DBClusters"] - - logs_for_engine = { - "aurora-mysql": ["audit", "error", "general", "slowquery"], - "aurora-postgresql": ["postgresql"], - "docdb": ["audit", "profiler"] - } - - for cluster in clusters: - if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get(cluster["Engine"]): - compliant_resources.append(cluster["DBClusterArn"]) - else: - non_compliant_resources.append(cluster["DBClusterArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def rds_snapshot_encrypted(): - compliant_resources = [] - non_compliant_resources = [] - - cluster_snapshots = client.describe_db_cluster_snapshots()["DBClusterSnapshots"] - - for snapshot in cluster_snapshots: - if snapshot.get("StorageEncrypted") == True: - compliant_resources.append(snapshot["DBClusterSnapshotArn"]) - else: - non_compliant_resources.append(snapshot["DBClusterSnapshotArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = RDSRuleChecker diff --git a/services/s3.py b/services/s3.py index e167f63..8ce8f23 100644 --- a/services/s3.py +++ b/services/s3.py @@ -1,211 +1,225 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 import botocore.exceptions -client = boto3.client("s3") -sts_client = boto3.client("sts") -s3control_client = boto3.client("s3control") -backup_client = boto3.client("backup") +class S3RuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("s3") + self.sts_client = boto3.client("sts") + self.s3control_client = boto3.client("s3control") + self.backup_client = boto3.client("backup") + @cached_property + def account_id(self): + return self.sts_client.get_caller_identity().get("Account") -def s3_access_point_in_vpc_only(): - compliant_resources = [] - non_compliant_resources = [] - account_id = sts_client.get_caller_identity().get("Account") - access_points = s3control_client.list_access_points(AccountId=account_id)["AccessPointList"] + @cached_property + def buckets(self): + return self.client.list_buckets()["Buckets"] - for access_point in access_points: - if access_point["NetworkOrigin"] == "VPC": - compliant_resources.append(access_point["AccessPointArn"]) - else: - non_compliant_resources.append(access_point["AccessPointArn"]) + def s3_access_point_in_vpc_only(self): + compliant_resources = [] + non_compliant_resources = [] - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def s3_bucket_default_lock_enabled(): - compliant_resources = [] - non_compliant_resources = [] - buckets = client.list_buckets()["Buckets"] - - for bucket in buckets: - try: - response = client.get_object_lock_configuration(Bucket=bucket["Name"]) - compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == "ObjectLockConfigurationNotFoundError": - non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + access_points = self.s3control_client.list_access_points( + AccountId=self.account_id + )["AccessPointList"] + for access_point in access_points: + if access_point["NetworkOrigin"] == "VPC": + compliant_resources.append(access_point["AccessPointArn"]) else: - raise e + non_compliant_resources.append(access_point["AccessPointArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + def s3_bucket_default_lock_enabled(self): + compliant_resources = [] + non_compliant_resources = [] -def s3_bucket_level_public_access_prohibited(): - compliant_resources = [] - non_compliant_resources = [] - buckets = client.list_buckets()["Buckets"] + for bucket in self.buckets: + try: + response = self.client.get_object_lock_configuration( + Bucket=bucket["Name"] + ) + compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + except botocore.exceptions.ClientError as e: + if ( + e.response["Error"]["Code"] + == "ObjectLockConfigurationNotFoundError" + ): + non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + else: + raise e - for bucket in buckets: - response = client.get_public_access_block(Bucket=bucket["Name"]) - if False not in response["PublicAccessBlockConfiguration"].values(): - compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - else: - non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + def s3_bucket_level_public_access_prohibited(self): + compliant_resources = [] + non_compliant_resources = [] - -def s3_bucket_logging_enabled(): - compliant_resources = [] - non_compliant_resources = [] - buckets = client.list_buckets()["Buckets"] - - for bucket in buckets: - response = client.get_bucket_logging(Bucket=bucket["Name"]) - if "LoggingEnabled" in response: - compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - else: - non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def s3_bucket_ssl_requests_only(): - compliant_resources = [] - non_compliant_resources = [] - buckets = client.list_buckets()["Buckets"] - - for bucket in buckets: - policy = client.get_bucket_policy(Bucket=bucket["Name"])["Policy"] - if "aws:SecureTransport" in policy: - compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - else: - non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def s3_bucket_versioning_enabled(): - compliant_resources = [] - non_compliant_resources = [] - buckets = client.list_buckets()["Buckets"] - - for bucket in buckets: - response = client.get_bucket_versioning(Bucket=bucket["Name"]) - if "Status" in response and response["Status"] == "Enabled": - compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - else: - non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def s3_default_encryption_kms(): - compliant_resources = [] - non_compliant_resources = [] - buckets = client.list_buckets()["Buckets"] - - for bucket in buckets: - configuration = client.get_bucket_encryption(Bucket=bucket["Name"])["ServerSideEncryptionConfiguration"] - - if configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] == "aws:kms": - compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - else: - non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def s3_event_notifications_enabled(): - compliant_resources = [] - non_compliant_resources = [] - buckets = client.list_buckets()["Buckets"] - - for bucket in buckets: - configuration = client.get_bucket_notification_configuration(Bucket=bucket["Name"]) - if ( - "LambdaFunctionConfigurations" in configuration - or "QueueConfigurations" in configuration - or "TopicConfigurations" in configuration - ): - compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - else: - non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def s3_last_backup_recovery_point_created(): - compliant_resources = [] - non_compliant_resources = [] - buckets = client.list_buckets()["Buckets"] - - for bucket in buckets: - backups = backup_client.list_recovery_points_by_resource(ResourceArn=f"arn:aws:s3:::{bucket['Name']}") - - if backups["RecoveryPoints"] != []: - compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - else: - non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def s3_lifecycle_policy_check(): - compliant_resources = [] - non_compliant_resources = [] - buckets = client.list_buckets()["Buckets"] - - for bucket in buckets: - try: - configuration = client.get_bucket_lifecycle_configuration(Bucket=bucket["Name"]) - compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == "NoSuchLifecycleConfiguration": - non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + for bucket in self.buckets: + response = self.client.get_public_access_block(Bucket=bucket["Name"]) + if False not in response["PublicAccessBlockConfiguration"].values(): + compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") else: - raise e + non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def s3_bucket_logging_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for bucket in self.buckets: + response = self.client.get_bucket_logging(Bucket=bucket["Name"]) + if "LoggingEnabled" in response: + compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + else: + non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def s3_bucket_ssl_requests_only(self): + compliant_resources = [] + non_compliant_resources = [] + + for bucket in self.buckets: + policy = self.client.get_bucket_policy(Bucket=bucket["Name"])["Policy"] + if "aws:SecureTransport" in policy: + compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + else: + non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def s3_bucket_versioning_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for bucket in self.buckets: + response = self.client.get_bucket_versioning(Bucket=bucket["Name"]) + if "Status" in response and response["Status"] == "Enabled": + compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + else: + non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def s3_default_encryption_kms(self): + compliant_resources = [] + non_compliant_resources = [] + + for bucket in self.buckets: + configuration = self.client.get_bucket_encryption(Bucket=bucket["Name"])[ + "ServerSideEncryptionConfiguration" + ] + + if ( + configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"][ + "SSEAlgorithm" + ] + == "aws:kms" + ): + compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + else: + non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def s3_event_notifications_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for bucket in self.buckets: + configuration = self.client.get_bucket_notification_configuration( + Bucket=bucket["Name"] + ) + if ( + "LambdaFunctionConfigurations" in configuration + or "QueueConfigurations" in configuration + or "TopicConfigurations" in configuration + ): + compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + else: + non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def s3_last_backup_recovery_point_created(self): + compliant_resources = [] + non_compliant_resources = [] + + for bucket in self.buckets: + backups = self.backup_client.list_recovery_points_by_resource( + ResourceArn=f"arn:aws:s3:::{bucket['Name']}" + ) + + if backups["RecoveryPoints"] != []: + compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + else: + non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def s3_lifecycle_policy_check(self): + compliant_resources = [] + non_compliant_resources = [] + + for bucket in self.buckets: + try: + configuration = self.client.get_bucket_lifecycle_configuration( + Bucket=bucket["Name"] + ) + compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + except botocore.exceptions.ClientError as e: + if e.response["Error"]["Code"] == "NoSuchLifecycleConfiguration": + non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") + else: + raise e + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = S3RuleChecker diff --git a/services/secrets_manager.py b/services/secrets_manager.py index b78018c..4f06a6e 100644 --- a/services/secrets_manager.py +++ b/services/secrets_manager.py @@ -1,80 +1,84 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -import datetime +from datetime import datetime, timedelta from dateutil.tz import tzlocal -client = boto3.client("secretsmanager") +class SecretsManagerRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("secretsmanager") + @cached_property + def secrets(self): + return self.client.list_secrets()["SecretList"] -def secretsmanager_rotation_enabled_check(): - compliant_resources = [] - non_compliant_resources = [] - secrets = client.list_secrets()["SecretList"] + def secretsmanager_rotation_enabled_check(self): + compliant_resources = [] + non_compliant_resources = [] - for secret in secrets: - if secret.get("RotationEnabled") == True: - compliant_resources.append(secret["ARN"]) - else: - non_compliant_resources.append(secret["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def secretsmanager_scheduled_rotation_success_check(): - compliant_resources = [] - non_compliant_resources = [] - secrets = client.list_secrets()["SecretList"] - - for secret in secrets: - if secret.get("RotationEnabled") == True: - if 'LastRotatedDate' not in secret: - non_compliant_resources.append(secret["ARN"]) - continue - - now = datetime.datetime.now(tz=tzlocal()) - rotation_period = datetime.timedelta( - days=secret["RotationRules"]["AutomaticallyAfterDays"] + 2 - ) # 최대 2일 지연 가능 (aws) - elapsed_time_after_rotation = now - secret["LastRotatedDate"] - - if elapsed_time_after_rotation > rotation_period: - non_compliant_resources.append(secret["ARN"]) - else: + for secret in self.secrets: + if secret.get("RotationEnabled", False): compliant_resources.append(secret["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def secretsmanager_secret_periodic_rotation(): - compliant_resources = [] - non_compliant_resources = [] - secrets = client.list_secrets()["SecretList"] - - for secret in secrets: - if secret.get("RotationEnabled") == True: - if 'LastRotatedDate' not in secret: - non_compliant_resources.append(secret["ARN"]) - continue - - now = datetime.datetime.now(tz=tzlocal()) - elapsed_time_after_rotation = now - secret["LastRotatedDate"] - - if elapsed_time_after_rotation > datetime.timedelta(days=90): - non_compliant_resources.append(secret["ARN"]) else: - compliant_resources.append(secret["ARN"]) + non_compliant_resources.append(secret["ARN"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def secretsmanager_scheduled_rotation_success_check(self): + compliant_resources = [] + non_compliant_resources = [] + + for secret in self.secrets: + if secret.get("RotationEnabled", False): + if "LastRotatedDate" not in secret: + non_compliant_resources.append(secret["ARN"]) + continue + + now = datetime.now(tz=tzlocal()) + rotation_period = timedelta( + days=secret["RotationRules"]["AutomaticallyAfterDays"] + 2 + ) # 최대 2일 지연 가능 (aws) + elapsed_time_after_rotation = now - secret["LastRotatedDate"] + + if elapsed_time_after_rotation > rotation_period: + non_compliant_resources.append(secret["ARN"]) + else: + compliant_resources.append(secret["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def secretsmanager_secret_periodic_rotation(self): + compliant_resources = [] + non_compliant_resources = [] + + for secret in self.secrets: + if secret.get("RotationEnabled") == True: + if "LastRotatedDate" not in secret: + non_compliant_resources.append(secret["ARN"]) + continue + + now = datetime.now(tz=tzlocal()) + elapsed_time_after_rotation = now - secret["LastRotatedDate"] + + if elapsed_time_after_rotation > timedelta(days=90): + non_compliant_resources.append(secret["ARN"]) + else: + compliant_resources.append(secret["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = SecretsManagerRuleChecker diff --git a/services/security_hub.py b/services/security_hub.py index 678c820..3466f67 100644 --- a/services/security_hub.py +++ b/services/security_hub.py @@ -1,28 +1,31 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker import boto3 -client = boto3.client("securityhub") +class SecurityHubRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("securityhub") + self.sts_client = boto3.client("sts") -sts_client = boto3.client("sts") + def securityhub_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + aws_account_id = self.sts_client.get_caller_identity()["Account"] + + try: + hub = self.client.describe_hub() + compliant_resources.append(aws_account_id) + except Exception as e: + if e.__class__.__name__ == "InvalidAccessException": + non_compliant_resources.append(aws_account_id) + else: + raise e + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) -def securityhub_enabled(): - compliant_resources = [] - non_compliant_resources = [] - aws_account_id = sts_client.get_caller_identity()["Account"] - - try: - hub = client.describe_hub() - compliant_resources.append(aws_account_id) - except Exception as e: - if e.__class__.__name__ == "InvalidAccessException": - non_compliant_resources.append(aws_account_id) - else: - raise e - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = SecurityHubRuleChecker diff --git a/services/sns.py b/services/sns.py index 9297b14..5f49275 100644 --- a/services/sns.py +++ b/services/sns.py @@ -1,46 +1,57 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("sns") +class SNSRuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("sns") + @cached_property + def topics(self): + topics = self.client.list_topics()["Topics"] + return [ + self.client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"] + for topic in topics + ] -def sns_encrypted_kms(): - compliant_resources = [] - non_compliant_resources = [] - topics = client.list_topics()["Topics"] + def sns_encrypted_kms(self): + compliant_resources = [] + non_compliant_resources = [] - for topic in topics: - topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"] - if "KmsMasterKeyId" in topic: - compliant_resources.append(topic["TopicArn"]) - else: - non_compliant_resources.append(topic["TopicArn"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def sns_topic_message_delivery_notification_enabled(): - compliant_resources = [] - non_compliant_resources = [] - topics = client.list_topics()["Topics"] - - for topic in topics: - topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"] - - for key in topic.keys(): - if key.endswith("FeedbackRoleArn") == True: + for topic in self.topics: + if "KmsMasterKeyId" in topic: compliant_resources.append(topic["TopicArn"]) - break - else: - non_compliant_resources.append(topic["TopicArn"]) + else: + non_compliant_resources.append(topic["TopicArn"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def sns_topic_message_delivery_notification_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for topic in self.topics: + notification_roles = [ + attribute + for attribute in topic.keys() + if attribute.endswith("FeedbackRoleArn") + ] + + if notification_roles: + compliant_resources.append(topic["TopicArn"]) + else: + non_compliant_resources.append(topic["TopicArn"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = SNSRuleChecker diff --git a/services/tags.py b/services/tags.py deleted file mode 100644 index e27b50e..0000000 --- a/services/tags.py +++ /dev/null @@ -1,11 +0,0 @@ -from models import RuleCheckResult -import boto3 - - -# client = boto3.client("") - - -def required_tags(): - return RuleCheckResult( - passed=False, compliant_resources=[], non_compliant_resources=[] - ) diff --git a/services/vpc.py b/services/vpc.py index ed9dae3..679c378 100644 --- a/services/vpc.py +++ b/services/vpc.py @@ -1,257 +1,261 @@ -from models import RuleCheckResult -from pprint import pprint +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -ec2 = boto3.client("ec2") +class VPCRuleChecker(RuleChecker): + def __init__(self): + self.ec2 = boto3.client("ec2") + @cached_property + def security_group_rules(self): + return self.ec2.describe_security_group_rules()["SecurityGroupRules"] -def ec2_transit_gateway_auto_vpc_attach_disabled(): - response = ec2.describe_transit_gateways() + def ec2_transit_gateway_auto_vpc_attach_disabled(self): + response = self.ec2.describe_transit_gateways() - non_compliant_resources = [ - resource["TransitGatewayArn"] - for resource in filter( - lambda x: x["Options"]["AutoAcceptSharedAttachments"] == "enable", - response["TransitGateways"], + non_compliant_resources = [ + resource["TransitGatewayArn"] + for resource in filter( + lambda x: x["Options"]["AutoAcceptSharedAttachments"] == "enable", + response["TransitGateways"], + ) + ] + + compliant_resources = list( + set( + [ + resource["TransitGatewayArn"] + for resource in response["TransitGateways"] + ] + ) + - set(non_compliant_resources) ) - ] - compliant_resources = list( - set([resource["TransitGatewayArn"] for resource in response["TransitGateways"]]) - - set(non_compliant_resources) - ) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def restricted_ssh(): - response = ec2.describe_security_group_rules() - - non_compliant_resources = [ - f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' - for resource in filter( - lambda x: x["IsEgress"] == False - and x["FromPort"] <= 22 - and x["ToPort"] >= 22 - and x.get("CidrIpv4") == "0.0.0.0/0", - response["SecurityGroupRules"], + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, ) - ] - compliant_resources = list( - set( - [ + def restricted_ssh(self): + non_compliant_resources = [ + f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' + for resource in filter( + lambda x: x["IsEgress"] == False + and x["FromPort"] <= 22 + and x["ToPort"] >= 22 + and x.get("CidrIpv4") == "0.0.0.0/0", + self.security_group_rules, + ) + ] + + compliant_resources = list( + set( + [ + f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' + for resource in self.security_group_rules + ] + ) + - set(non_compliant_resources) + ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def restricted_common_ports(self): + common_ports = [ + 22, # SSH + 80, # HTTP + 3306, # MySQL + 3389, # RDP + 5432, # PostgreSQL + 6379, # Redis + 11211, # Memcached + ] + + non_compliant_resources = [ + f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' + for resource in filter( + lambda x: x["IsEgress"] == False + and x["FromPort"] in common_ports + and x["ToPort"] in common_ports + and x.get("PrefixListId") is None, + self.security_group_rules, + ) + ] + + compliant_resources = list( + set( f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' - for resource in response["SecurityGroupRules"] - ] + for resource in self.security_group_rules + ) + - set(non_compliant_resources) ) - - set(non_compliant_resources) - ) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - -def restricted_common_ports(): - common_ports = [ - 22, # SSH - 80, # HTTP - 3306, # MySQL - 3389, # RDP - 5432, # PostgreSQL - 6379, # Redis - 11211, # Memcached - ] - response = ec2.describe_security_group_rules() - - non_compliant_resources = [ - f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' - for resource in filter( - lambda x: x["IsEgress"] == False - and x["FromPort"] in common_ports - and x["ToPort"] in common_ports - and x.get("PrefixListId") is None, - response["SecurityGroupRules"], + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, ) - ] - compliant_resources = list( - set( - f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' - for resource in response["SecurityGroupRules"] + def subnet_auto_assign_public_ip_disabled(self): + response = self.ec2.describe_subnets() + + non_compliant_resources = [ + resource["SubnetId"] + for resource in filter( + lambda x: x["MapPublicIpOnLaunch"], response["Subnets"] + ) + ] + + compliant_resources = list( + set(resource["SubnetId"] for resource in response["Subnets"]) + - set(non_compliant_resources) ) - - set(non_compliant_resources) - ) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def subnet_auto_assign_public_ip_disabled(): - response = ec2.describe_subnets() - - non_compliant_resources = [ - resource["SubnetId"] - for resource in filter(lambda x: x["MapPublicIpOnLaunch"], response["Subnets"]) - ] - - compliant_resources = list( - set(resource["SubnetId"] for resource in response["Subnets"]) - - set(non_compliant_resources) - ) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def vpc_default_security_group_closed(): - response = ec2.describe_security_groups( - Filters=[{"Name": "group-name", "Values": ["default"]}] - ) - - non_compliant_resources = [ - resource["GroupId"] - for resource in filter( - lambda x: x["IpPermissions"] or x["IpPermissionsEgress"], - response["SecurityGroups"], + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, ) - ] - compliant_resources = list( - set(resource["GroupId"] for resource in response["SecurityGroups"]) - - set(non_compliant_resources) - ) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def vpc_flow_logs_enabled(): - response = ec2.describe_flow_logs() - flow_log_enabled_vpcs = [ - resource["ResourceId"] for resource in response["FlowLogs"] - ] - - response = ec2.describe_vpcs() - - non_compliant_resources = [ - resource["VpcId"] - for resource in filter( - lambda x: x["VpcId"] not in flow_log_enabled_vpcs, response["Vpcs"] + def vpc_default_security_group_closed(self): + response = self.ec2.describe_security_groups( + Filters=[{"Name": "group-name", "Values": ["default"]}] ) - ] - compliant_resources = list( - set(resource["VpcId"] for resource in response["Vpcs"]) - - set(non_compliant_resources) - ) + non_compliant_resources = [ + resource["GroupId"] + for resource in filter( + lambda x: x["IpPermissions"] or x["IpPermissionsEgress"], + response["SecurityGroups"], + ) + ] - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def vpc_network_acl_unused_check(): - response = ec2.describe_network_acls() - - non_compliant_resources = [ - resource["NetworkAclId"] - for resource in filter(lambda x: not x["Associations"], response["NetworkAcls"]) - ] - - compliant_resources = list( - set(resource["NetworkAclId"] for resource in response["NetworkAcls"]) - - set(non_compliant_resources) - ) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def vpc_peering_dns_resolution_check(): - response = ec2.describe_vpc_peering_connections() - - non_compliant_resources = [ - resource["VpcPeeringConnectionId"] - for resource in filter( - lambda x: x["Status"]["Code"] not in ["deleted", "deleting"] - and ( - not x["AccepterVpcInfo"].get("PeeringOptions") - or not x["AccepterVpcInfo"]["PeeringOptions"][ - "AllowDnsResolutionFromRemoteVpc" - ] - or not x["RequesterVpcInfo"]["PeeringOptions"][ - "AllowDnsResolutionFromRemoteVpc" - ] - ), - response["VpcPeeringConnections"], + compliant_resources = list( + set(resource["GroupId"] for resource in response["SecurityGroups"]) + - set(non_compliant_resources) ) - ] - compliant_resources = list( - set( + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def vpc_flow_logs_enabled(self): + response = self.ec2.describe_flow_logs() + flow_log_enabled_vpcs = [ + resource["ResourceId"] for resource in response["FlowLogs"] + ] + + response = self.ec2.describe_vpcs() + + non_compliant_resources = [ + resource["VpcId"] + for resource in filter( + lambda x: x["VpcId"] not in flow_log_enabled_vpcs, response["Vpcs"] + ) + ] + + compliant_resources = list( + set(resource["VpcId"] for resource in response["Vpcs"]) + - set(non_compliant_resources) + ) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def vpc_network_acl_unused_check(self): + response = self.ec2.describe_network_acls() + + non_compliant_resources = [ + resource["NetworkAclId"] + for resource in filter( + lambda x: not x["Associations"], response["NetworkAcls"] + ) + ] + + compliant_resources = list( + set(resource["NetworkAclId"] for resource in response["NetworkAcls"]) + - set(non_compliant_resources) + ) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def vpc_peering_dns_resolution_check(self): + response = self.ec2.describe_vpc_peering_connections() + + non_compliant_resources = [ resource["VpcPeeringConnectionId"] - for resource in response["VpcPeeringConnections"] + for resource in filter( + lambda x: x["Status"]["Code"] not in ["deleted", "deleting"] + and ( + not x["AccepterVpcInfo"].get("PeeringOptions") + or not x["AccepterVpcInfo"]["PeeringOptions"][ + "AllowDnsResolutionFromRemoteVpc" + ] + or not x["RequesterVpcInfo"]["PeeringOptions"][ + "AllowDnsResolutionFromRemoteVpc" + ] + ), + response["VpcPeeringConnections"], + ) + ] + + compliant_resources = list( + set( + resource["VpcPeeringConnectionId"] + for resource in response["VpcPeeringConnections"] + ) + - set(non_compliant_resources) ) - - set(non_compliant_resources) - ) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def vpc_sg_open_only_to_authorized_ports(): - response = ec2.describe_security_group_rules() - - authorized_port = [ - # 80 - ] - - non_compliant_resources = [ - f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' - for resource in filter( - lambda x: x["IsEgress"] == False - and (x.get("CidrIpv4") == "0.0.0.0/0" or x.get("CidrIpv6") == "::/0") - and x["FromPort"] not in authorized_port - and x["ToPort"] not in authorized_port, - response["SecurityGroupRules"], + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, ) - ] - compliant_resources = list( - set( + def vpc_sg_open_only_to_authorized_ports(self): + authorized_port = [ + # 80 + ] + + non_compliant_resources = [ f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' - for resource in response["SecurityGroupRules"] - ) - - set(non_compliant_resources) - ) + for resource in filter( + lambda x: x["IsEgress"] == False + and (x.get("CidrIpv4") == "0.0.0.0/0" or x.get("CidrIpv6") == "::/0") + and x["FromPort"] not in authorized_port + and x["ToPort"] not in authorized_port, + self.security_group_rules, + ) + ] - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + compliant_resources = list( + set( + f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' + for resource in self.security_group_rules + ) + - set(non_compliant_resources) + ) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + +rule_checker = VPCRuleChecker diff --git a/services/wafv2.py b/services/wafv2.py index 6fc4292..bfcacbf 100644 --- a/services/wafv2.py +++ b/services/wafv2.py @@ -1,120 +1,144 @@ -from models import RuleCheckResult +from models import RuleCheckResult, RuleChecker +from functools import cached_property import boto3 -client = boto3.client("wafv2") -global_client = boto3.client("wafv2", region_name="us-east-1") +class WAFv2RuleChecker(RuleChecker): + def __init__(self): + self.client = boto3.client("wafv2") + self.global_client = boto3.client("wafv2", region_name="us-east-1") + @cached_property + def regional_web_acls(self): + return self.client.list_web_acls(Scope="REGIONAL")["WebACLs"] -def wafv2_logging_enabled(): - compliant_resources = [] - non_compliant_resources = [] - regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"] - cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"] + @cached_property + def cloudfront_web_acls(self): + return self.global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"] - for web_acl in regional_web_acls: - try: - configuration = client.get_logging_configuration(ResourceArn=web_acl["ARN"]) - compliant_resources.append(web_acl["ARN"]) - except Exception as e: - if e.__class__.__name__ == "WAFNonexistentItemException": - non_compliant_resources.append(web_acl["ARN"]) + @cached_property + def regional_rule_groups(self): + rule_groups = self.client.list_rule_groups(Scope="REGIONAL")["RuleGroups"] + return [ + self.client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"] + for rule_group in rule_groups + ] + + @cached_property + def cloudfront_rule_groups(self): + rule_groups = self.global_client.list_rule_groups(Scope="CLOUDFRONT")[ + "RuleGroups" + ] + return [ + self.global_client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"] + for rule_group in rule_groups + ] + + def wafv2_logging_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for web_acl in self.regional_web_acls: + try: + configuration = self.client.get_logging_configuration( + ResourceArn=web_acl["ARN"] + ) + compliant_resources.append(web_acl["ARN"]) + except Exception as e: + if e.__class__.__name__ == "WAFNonexistentItemException": + non_compliant_resources.append(web_acl["ARN"]) + else: + raise e + + for web_acl in self.cloudfront_web_acls: + try: + configuration = self.global_client.get_logging_configuration( + ResourceArn=web_acl["ARN"] + ) + compliant_resources.append(web_acl["ARN"]) + except Exception as e: + if e.__class__.__name__ == "WAFNonexistentItemException": + non_compliant_resources.append(web_acl["ARN"]) + else: + raise e + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def wafv2_rulegroup_logging_enabled(self): + compliant_resources = [] + non_compliant_resources = [] + + for rule_group in self.regional_rule_groups: + if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True: + compliant_resources.append(rule_group["ARN"]) else: - raise e + non_compliant_resources.append(rule_group["ARN"]) - for web_acl in cloudfront_web_acls: - try: - configuration = global_client.get_logging_configuration(ResourceArn=web_acl["ARN"]) - compliant_resources.append(web_acl["ARN"]) - except Exception as e: - if e.__class__.__name__ == "WAFNonexistentItemException": - non_compliant_resources.append(web_acl["ARN"]) + for rule_group in self.cloudfront_rule_groups: + if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True: + compliant_resources.append(rule_group["ARN"]) else: - raise e + non_compliant_resources.append(rule_group["ARN"]) - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def wafv2_rulegroup_not_empty(self): + compliant_resources = [] + non_compliant_resources = [] + + for rule_group in self.regional_rule_groups: + if len(rule_group["Rules"]) > 0: + compliant_resources.append(rule_group["ARN"]) + else: + non_compliant_resources.append(rule_group["ARN"]) + + for rule_group in self.cloudfront_rule_groups: + if len(rule_group["Rules"]) > 0: + compliant_resources.append(rule_group["ARN"]) + else: + non_compliant_resources.append(rule_group["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) + + def wafv2_webacl_not_empty(self): + compliant_resources = [] + non_compliant_resources = [] + + for web_acl in self.regional_web_acls: + response = self.client.get_web_acl( + Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL" + ) + if len(response["WebACL"]["Rules"]) > 0: + compliant_resources.append(web_acl["ARN"]) + else: + non_compliant_resources.append(web_acl["ARN"]) + + for web_acl in self.cloudfront_web_acls: + response = self.global_client.get_web_acl( + Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT" + ) + if len(response["WebACL"]["Rules"]) > 0: + compliant_resources.append(web_acl["ARN"]) + else: + non_compliant_resources.append(web_acl["ARN"]) + + return RuleCheckResult( + passed=not non_compliant_resources, + compliant_resources=compliant_resources, + non_compliant_resources=non_compliant_resources, + ) -def wafv2_rulegroup_logging_enabled(): - compliant_resources = [] - non_compliant_resources = [] - regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"] - cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"] - - - for rule_group in regional_rule_groups: - configuration = client.get_rule_group(ARN=rule_group["ARN"]) - if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True: - compliant_resources.append(rule_group["ARN"]) - else: - non_compliant_resources.append(rule_group["ARN"]) - - for rule_group in cloudfront_rule_groups: - configuration = global_client.get_rule_group(ARN=rule_group["ARN"]) - if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True: - compliant_resources.append(rule_group["ARN"]) - else: - non_compliant_resources.append(rule_group["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def wafv2_rulegroup_not_empty(): - compliant_resources = [] - non_compliant_resources = [] - regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"] - cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"] - - for rule_group in regional_rule_groups: - configuration = client.get_rule_group(ARN=rule_group["ARN"]) - if len(configuration["RuleGroup"]["Rules"]) > 0: - compliant_resources.append(rule_group["ARN"]) - else: - non_compliant_resources.append(rule_group["ARN"]) - - for rule_group in cloudfront_rule_groups: - configuration = global_client.get_rule_group(ARN=rule_group["ARN"]) - if len(configuration["RuleGroup"]["Rules"]) > 0: - compliant_resources.append(rule_group["ARN"]) - else: - non_compliant_resources.append(rule_group["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) - - -def wafv2_webacl_not_empty(): - compliant_resources = [] - non_compliant_resources = [] - regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"] - cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"] - - for web_acl in regional_web_acls: - response = client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL") - if len(response["WebACL"]["Rules"]) > 0: - compliant_resources.append(web_acl["ARN"]) - else: - non_compliant_resources.append(web_acl["ARN"]) - for web_acl in cloudfront_web_acls: - response = global_client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT") - if len(response["WebACL"]["Rules"]) > 0: - compliant_resources.append(web_acl["ARN"]) - else: - non_compliant_resources.append(web_acl["ARN"]) - - return RuleCheckResult( - passed=not non_compliant_resources, - compliant_resources=compliant_resources, - non_compliant_resources=non_compliant_resources, - ) +rule_checker = WAFv2RuleChecker