Refactor to cache AWS resources

This commit is contained in:
EC2 Default User 2024-08-14 01:05:06 +00:00
parent 4854f11021
commit fb94b40c23
28 changed files with 2676 additions and 2474 deletions

View File

@ -124,16 +124,16 @@
"autoscaling-multiple-az": {
"enabled": true,
"level": 2
},
"autoscaling-launch-template": {
"enabled": true,
"level": 2
}
}
},
"EC2": {
"enabled": true,
"rules": {
"autoscaling-launch-template": {
"enabled": true,
"level": 2
},
"ec2-ebs-encryption-by-default": {
"enabled": true,
"level": 2
@ -432,15 +432,6 @@
}
}
},
"Tags": {
"enabled": true,
"rules": {
"required-tags": {
"enabled": true,
"level": 2
}
}
},
"S3": {
"enabled": true,
"rules": {

12
main.py
View File

@ -2,6 +2,8 @@ from InquirerLib import prompt
from InquirerLib.InquirerPy.utils import InquirerPyKeybindings
from InquirerLib.InquirerPy.base import Choice
from colorama import Style, Fore
from datetime import datetime
from importlib import import_module
from utils import *
import services
@ -40,12 +42,18 @@ def perform_bp_rules_check(bp):
if service_name == "Lambda":
service_name = "_lambda"
module = getattr(services, convert_snake_case(service_name))
now = datetime.now()
rule_checker = getattr(
services, convert_snake_case(service_name)
).rule_checker()
for rule_name, rule in service["rules"].items():
if not rule["enabled"]:
continue
rule["result"] = rule_checker.check_rule(convert_snake_case(rule_name))
rule["result"] = getattr(module, convert_snake_case(rule_name))()
elapsed_time = datetime.now() - now
print(convert_snake_case(service_name), elapsed_time.total_seconds())
return bp

View File

@ -1,4 +1,5 @@
from pydantic import BaseModel
from utils import convert_snake_case
from typing import List
@ -6,3 +7,12 @@ class RuleCheckResult(BaseModel):
passed: bool
compliant_resources: List[str]
non_compliant_resources: List[str]
class RuleChecker:
def __init__(self):
pass
def check_rule(self, rule_name) -> RuleCheckResult:
check_func = getattr(self, convert_snake_case(rule_name))
return check_func()

View File

@ -16,7 +16,6 @@ from . import (
elasticache,
iam,
_lambda,
tags,
s3,
secrets_manager,
security_hub,

View File

@ -1,18 +1,24 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
import json
client = boto3.client("lambda")
iam_client = boto3.client("iam")
class LambdaRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("lambda")
self.iam_client = boto3.client("iam")
@cached_property
def functions(self):
return self.client.list_functions()["Functions"]
def lambda_dlq_check():
def lambda_dlq_check(self):
compliant_resource = []
non_compliant_resources = []
functions = client.list_functions()["Functions"]
for function in functions:
for function in self.functions:
if "DeadLetterConfig" in function:
compliant_resource.append(function["FunctionArn"])
else:
@ -24,17 +30,24 @@ def lambda_dlq_check():
non_compliant_resources=non_compliant_resources,
)
def lambda_function_public_access_prohibited():
def lambda_function_public_access_prohibited(self):
compliant_resource = []
non_compliant_resources = []
functions = client.list_functions()["Functions"]
for function in functions:
for function in self.functions:
try:
policy = json.loads(client.get_policy(FunctionName=function["FunctionName"])["Policy"])
policy = json.loads(
self.client.get_policy(FunctionName=function["FunctionName"])[
"Policy"
]
)
for statement in policy["Statement"]:
if statement["Principal"] in ["*", "", '{"AWS": ""}', '{"AWS": "*"}']:
if statement["Principal"] in [
"*",
"",
'{"AWS": ""}',
'{"AWS": "*"}',
]:
non_compliant_resources.append(function["FunctionArn"])
break
else:
@ -51,17 +64,18 @@ def lambda_function_public_access_prohibited():
non_compliant_resources=non_compliant_resources,
)
def lambda_function_settings_check():
def lambda_function_settings_check(self):
compliant_resource = []
non_compliant_resources = []
functions = client.list_functions()["Functions"]
default_timeout = 3
default_memory_size = 128
for function in functions:
if function["Timeout"] == default_timeout or function["MemorySize"] == default_memory_size:
for function in self.functions:
if (
function["Timeout"] == default_timeout
or function["MemorySize"] == default_memory_size
):
non_compliant_resources.append(function["FunctionArn"])
else:
compliant_resource.append(function["FunctionArn"])
@ -72,13 +86,11 @@ def lambda_function_settings_check():
non_compliant_resources=non_compliant_resources,
)
def lambda_inside_vpc():
def lambda_inside_vpc(self):
compliant_resource = []
non_compliant_resources = []
functions = client.list_functions()["Functions"]
for function in functions:
for function in self.functions:
if "VpcConfig" in function:
compliant_resource.append(function["FunctionArn"])
else:
@ -89,3 +101,6 @@ def lambda_inside_vpc():
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
rule_checker = LambdaRuleChecker

View File

@ -1,123 +1,150 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("elbv2")
wafv2_client = boto3.client("wafv2")
class ALBRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("elbv2")
self.wafv2_client = boto3.client("wafv2")
def alb_http_drop_invalid_header_enabled():
load_balancers = client.describe_load_balancers()
@cached_property
def load_balancers(self):
return self.client.describe_load_balancers()["LoadBalancers"]
@cached_property
def load_balancer_attributes(self):
responses = [
self.client.describe_load_balancer_attributes(
LoadBalancerArn=load_balancer["LoadBalancerArn"]
)
for load_balancer in self.load_balancers
]
return {
load_balancer["LoadBalancerArn"]: response
for load_balancer, response in zip(self.load_balancers, responses)
}
def alb_http_drop_invalid_header_enabled(self):
compliant_resource = []
non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = client.describe_load_balancer_attributes(
LoadBalancerArn=load_balancer['LoadBalancerArn']
)
for load_balancer in self.load_balancers:
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
result = [
attribute
for attribute in filter(
lambda x: x['Key'] == "routing.http.drop_invalid_header_fields.enabled"
and x['Value'] == "true",
response['Attributes'],
lambda x: x["Key"]
== "routing.http.drop_invalid_header_fields.enabled"
and x["Value"] == "true",
response["Attributes"],
)
]
if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
if result:
compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
def alb_waf_enabled():
load_balancers = client.describe_load_balancers()
def alb_waf_enabled(self):
compliant_resource = []
non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = wafv2_client.get_web_acl_for_resource(
ResourceArn=load_balancer['LoadBalancerArn']
for load_balancer in self.load_balancers:
response = self.wafv2_client.get_web_acl_for_resource(
ResourceArn=load_balancer["LoadBalancerArn"]
)
if 'WebACL' in response: compliant_resource.append(load_balancer['LoadBalancerArn'])
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
if "WebACL" in response:
compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
def elb_cross_zone_load_balancing_enabled():
load_balancers = client.describe_load_balancers()
def elb_cross_zone_load_balancing_enabled(self):
compliant_resource = []
non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = client.describe_load_balancer_attributes(
LoadBalancerArn=load_balancer['LoadBalancerArn']
)
for load_balancer in self.load_balancers:
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
result = [
attribute
for attribute in filter(
lambda x: x['Key'] == "load_balancing.cross_zone.enabled"
and x['Value'] == "true",
response['Attributes'],
lambda x: x["Key"] == "load_balancing.cross_zone.enabled"
and x["Value"] == "true",
response["Attributes"],
)
]
if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
if result:
compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
def elb_deletion_protection_enabled():
load_balancers = client.describe_load_balancers()
def elb_deletion_protection_enabled(self):
compliant_resource = []
non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = client.describe_load_balancer_attributes(
LoadBalancerArn=load_balancer['LoadBalancerArn']
)
for load_balancer in self.load_balancers:
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
result = [
attribute
for attribute in filter(
lambda x: x['Key'] == "deletion_protection.enabled"
and x['Value'] == "true",
response['Attributes'],
lambda x: x["Key"] == "deletion_protection.enabled"
and x["Value"] == "true",
response["Attributes"],
)
]
if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
if result:
compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
def elb_logging_enabled():
load_balancers = client.describe_load_balancers()
def elb_logging_enabled(self):
compliant_resource = []
non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = client.describe_load_balancer_attributes(
LoadBalancerArn=load_balancer['LoadBalancerArn']
)
for load_balancer in self.load_balancers:
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
result = [
attribute
for attribute in filter(
lambda x: x['Key'] == "access_logs.s3.enabled"
and x['Value'] == "true",
response['Attributes'],
lambda x: x["Key"] == "access_logs.s3.enabled"
and x["Value"] == "true",
response["Attributes"],
)
]
if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
if result:
compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
rule_checker = ALBRuleChecker

View File

@ -1,18 +1,37 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
v1_client = boto3.client("apigateway")
v2_client = boto3.client("apigatewayv2")
class APIGatewayRuleChecker(RuleChecker):
def __init__(self):
self.v1_client = boto3.client("apigateway")
self.v2_client = boto3.client("apigatewayv2")
@cached_property
def http_apis(self):
return self.v2_client.get_apis()["Items"]
def api_gwv2_access_logs_enabled():
apis = v2_client.get_apis()
@cached_property
def rest_apis(self):
return self.v1_client.get_rest_apis()["items"]
@cached_property
def rest_api_stages(self):
responses = [
self.v1_client.get_stages(
restApiId=api["id"],
)
for api in self.rest_apis
]
return {api["id"]: response for api, response in zip(self.rest_apis, responses)}
def api_gwv2_access_logs_enabled(self):
compliant_resources = []
non_compliant_resources = []
for api in apis["Items"]:
stages = v2_client.get_stages(
for api in self.http_apis:
stages = self.v2_client.get_stages(
ApiId=api["ApiId"],
)
@ -23,7 +42,12 @@ def api_gwv2_access_logs_enabled():
]
compliant_resources += list(
set([f"{api['Name']} / {stage['StageName']}" for stage in stages["Items"]])
set(
[
f"{api['Name']} / {stage['StageName']}"
for stage in stages["Items"]
]
)
- set(non_compliant_resources)
)
@ -33,14 +57,12 @@ def api_gwv2_access_logs_enabled():
non_compliant_resources=non_compliant_resources,
)
def api_gwv2_authorization_type_configured():
apis = v2_client.get_apis()
def api_gwv2_authorization_type_configured(self):
compliant_resources = []
non_compliant_resources = []
for api in apis["Items"]:
response = v2_client.get_routes(
for api in self.http_apis:
response = self.v2_client.get_routes(
ApiId=api["ApiId"],
)
@ -51,7 +73,12 @@ def api_gwv2_authorization_type_configured():
]
compliant_resources += list(
set([f"{api['Name']} / {route['RouteKey']}" for route in response["Items"]])
set(
[
f"{api['Name']} / {route['RouteKey']}"
for route in response["Items"]
]
)
- set(non_compliant_resources)
)
@ -61,19 +88,15 @@ def api_gwv2_authorization_type_configured():
non_compliant_resources=non_compliant_resources,
)
def api_gw_associated_with_waf():
apis = v1_client.get_rest_apis()
def api_gw_associated_with_waf(self):
compliant_resources = []
non_compliant_resources = []
for api in apis["items"]:
stages = v1_client.get_stages(
restApiId=api["id"],
)
for api in self.rest_apis:
stages = self.rest_api_stages[api["id"]]
for stage in stages["item"]:
stage_arn = f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
stage_arn = f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
if "webAclArn" in stage:
compliant_resources.append(stage_arn)
@ -86,19 +109,15 @@ def api_gw_associated_with_waf():
non_compliant_resources=non_compliant_resources,
)
def api_gw_cache_enabled_and_encrypted():
apis = v1_client.get_rest_apis()
def api_gw_cache_enabled_and_encrypted(self):
compliant_resources = []
non_compliant_resources = []
for api in apis["items"]:
stages = v1_client.get_stages(
restApiId=api["id"],
)
for api in self.rest_apis:
stages = self.rest_api_stages[api["id"]]
non_compliant_resources += [
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
if not "*/*" in stage["methodSettings"]
or (
@ -109,7 +128,7 @@ def api_gw_cache_enabled_and_encrypted():
compliant_resources += list(
set(
[
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
]
)
@ -122,18 +141,14 @@ def api_gw_cache_enabled_and_encrypted():
non_compliant_resources=non_compliant_resources,
)
def api_gw_execution_logging_enabled():
apis = v1_client.get_rest_apis()
def api_gw_execution_logging_enabled(self):
compliant_resources = []
non_compliant_resources = []
for api in apis["items"]:
stages = v1_client.get_stages(
restApiId=api["id"],
)
for api in self.rest_apis:
stages = self.rest_api_stages[api["id"]]
non_compliant_resources += [
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
if not "*/*" in stage["methodSettings"]
or (
@ -144,7 +159,7 @@ def api_gw_execution_logging_enabled():
compliant_resources += list(
set(
[
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
]
)
@ -157,25 +172,21 @@ def api_gw_execution_logging_enabled():
non_compliant_resources=non_compliant_resources,
)
def api_gw_xray_enabled():
apis = v1_client.get_rest_apis()
def api_gw_xray_enabled(self):
compliant_resources = []
non_compliant_resources = []
for api in apis["items"]:
stages = v1_client.get_stages(
restApiId=api["id"],
)
for api in self.rest_apis:
stages = self.rest_api_stages[api["id"]]
non_compliant_resources += [
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
if not stage["tracingEnabled"]
]
compliant_resources += list(
set(
[
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
]
)
@ -187,3 +198,6 @@ def api_gw_xray_enabled():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = APIGatewayRuleChecker

View File

@ -1,17 +1,26 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("autoscaling")
class ASGRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("autoscaling")
@cached_property
def asgs(self):
return self.client.describe_auto_scaling_groups()["AutoScalingGroups"]
def autoscaling_group_elb_healthcheck_required():
def autoscaling_group_elb_healthcheck_required(self):
compliant_resources = []
non_compliant_resources = []
asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"]
for asg in asgs:
if asg["LoadBalancerNames"] or asg["TargetGroupARNs"] and asg["HealthCheckType"] != "ELB":
for asg in self.asgs:
if (
asg["LoadBalancerNames"]
or asg["TargetGroupARNs"]
and asg["HealthCheckType"] != "ELB"
):
non_compliant_resources.append(asg["AutoScalingGroupARN"])
else:
compliant_resources.append(asg["AutoScalingGroupARN"])
@ -22,13 +31,11 @@ def autoscaling_group_elb_healthcheck_required():
non_compliant_resources=non_compliant_resources,
)
def autoscaling_multiple_az():
def autoscaling_multiple_az(self):
compliant_resources = []
non_compliant_resources = []
asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"]
for asg in asgs:
for asg in self.asgs:
if len(asg["AvailabilityZones"]) > 1:
compliant_resources.append(asg["AutoScalingGroupARN"])
else:
@ -39,3 +46,22 @@ def autoscaling_multiple_az():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def autoscaling_launch_template(self):
compliant_resources = []
non_compliant_resources = []
for asg in self.asgs:
if "LaunchConfigurationName" in asg:
non_compliant_resources.append(asg["AutoScalingGroupARN"])
else:
compliant_resources.append(asg["AutoScalingGroupARN"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = ASGRuleChecker

View File

@ -1,17 +1,33 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("cloudfront")
class CloudFrontRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("cloudfront")
@cached_property
def distributions(self):
return self.client.list_distributions()["DistributionList"]["Items"]
def cloudfront_accesslogs_enabled():
@cached_property
def distribution_details(self):
responses = [
self.client.get_distribution(Id=distribution["Id"])["Distribution"]
for distribution in self.distributions
]
return {
distribution["Id"]: response
for distribution, response in zip(self.distributions, responses)
}
def cloudfront_accesslogs_enabled(self):
compliant_resources = []
non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions:
distribution = client.get_distribution(Id=distribution["Id"])["Distribution"]
for distribution in self.distributions:
distribution = self.distribution_details[distribution["Id"]]
if (
"Logging" in distribution["DistributionConfig"]
and distribution["DistributionConfig"]["Logging"]["Enabled"] == True
@ -26,13 +42,11 @@ def cloudfront_accesslogs_enabled():
non_compliant_resources=non_compliant_resources,
)
def cloudfront_associated_with_waf():
def cloudfront_associated_with_waf(self):
compliant_resources = []
non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions:
for distribution in self.distributions:
if "WebACLId" in distribution and distribution["WebACLId"] != "":
compliant_resources.append(distribution["ARN"])
else:
@ -44,14 +58,12 @@ def cloudfront_associated_with_waf():
non_compliant_resources=non_compliant_resources,
)
def cloudfront_default_root_object_configured():
def cloudfront_default_root_object_configured(self):
compliant_resources = []
non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions:
distribution = client.get_distribution(Id=distribution["Id"])["Distribution"]
for distribution in self.distributions:
distribution = self.distribution_details[distribution["Id"]]
if distribution["DistributionConfig"]["DefaultRootObject"] != "":
compliant_resources.append(distribution["ARN"])
@ -64,18 +76,18 @@ def cloudfront_default_root_object_configured():
non_compliant_resources=non_compliant_resources,
)
def cloudfront_no_deprecated_ssl_protocols():
def cloudfront_no_deprecated_ssl_protocols(self):
compliant_resources = []
non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions:
for distribution in self.distributions:
for origin in distribution["Origins"]["Items"]:
if (
"CustomOriginConfig" in origin
and origin["CustomOriginConfig"]["OriginProtocolPolicy"] in ["https-only", "match-viewer"]
and "SSLv3" in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"]
and origin["CustomOriginConfig"]["OriginProtocolPolicy"]
in ["https-only", "match-viewer"]
and "SSLv3"
in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"]
):
non_compliant_resources.append(distribution["ARN"])
@ -89,13 +101,11 @@ def cloudfront_no_deprecated_ssl_protocols():
non_compliant_resources=non_compliant_resources,
)
def cloudfront_s3_origin_access_control_enabled():
def cloudfront_s3_origin_access_control_enabled(self):
compliant_resources = []
non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]
for distribution in distributions["Items"]:
for distribution in self.distributions:
for origin in distribution["Origins"]["Items"]:
if "S3OriginConfig" in origin and origin["OriginAccessControlId"] == "":
non_compliant_resources.append(distribution["ARN"])
@ -109,14 +119,15 @@ def cloudfront_s3_origin_access_control_enabled():
non_compliant_resources=non_compliant_resources,
)
def cloudfront_viewer_policy_https():
def cloudfront_viewer_policy_https(self):
compliant_resources = []
non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions:
if distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"] == "allow-all":
for distribution in self.distributions:
if (
distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"]
== "allow-all"
):
non_compliant_resources.append(distribution["ARN"])
continue
@ -136,3 +147,6 @@ def cloudfront_viewer_policy_https():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = CloudFrontRuleChecker

View File

@ -1,15 +1,16 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
import boto3
client = boto3.client("cloudwatch")
logs_client = boto3.client("logs")
class CloudWatchRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("cloudwatch")
self.logs_client = boto3.client("logs")
def cw_loggroup_retention_period_check():
def cw_loggroup_retention_period_check(self):
compliant_resources = []
non_compliant_resources = []
log_groups = logs_client.describe_log_groups()["logGroups"]
log_groups = self.logs_client.describe_log_groups()["logGroups"]
# This rule should check if `retentionInDays` is less than n days.
# But, instead of that, this will check if the retention setting is set to "Never expire" or not
@ -25,11 +26,10 @@ def cw_loggroup_retention_period_check():
non_compliant_resources=non_compliant_resources,
)
def cloudwatch_alarm_settings_check():
def cloudwatch_alarm_settings_check(self):
compliant_resources = []
non_compliant_resources = []
alarms = client.describe_alarms()["MetricAlarms"]
alarms = self.client.describe_alarms()["MetricAlarms"]
parameters = {
"MetricName": "", # required
"Threshold": None,
@ -55,3 +55,6 @@ def cloudwatch_alarm_settings_check():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = CloudWatchRuleChecker

View File

@ -1,20 +1,23 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
build_client = boto3.client("codebuild")
class CodeSeriesChecker(RuleChecker):
def __init__(self):
self.build_client = boto3.client("codebuild")
self.deploy_client = boto3.client("codedeploy")
deploy_client = boto3.client("codedeploy")
@cached_property
def projects(self):
project_names = self.build_client.list_projects()["projects"]
return self.build_client.batch_get_projects(names=project_names)["projects"]
def codebuild_project_environment_privileged_check():
def codebuild_project_environment_privileged_check(self):
compliant_resources = []
non_compliant_resources = []
projects = build_client.list_projects()["projects"]
for project in projects:
project = build_client.batch_get_projects(names=[project])["projects"][0]
for project in self.projects:
if not project["environment"]["privilegedMode"]:
compliant_resources.append(project["arn"])
else:
@ -26,17 +29,17 @@ def codebuild_project_environment_privileged_check():
non_compliant_resources=non_compliant_resources,
)
def codebuild_project_logging_enabled():
def codebuild_project_logging_enabled(self):
compliant_resources = []
non_compliant_resources = []
projects = build_client.list_projects()["projects"]
for project in projects:
project = build_client.batch_get_projects(names=[project])["projects"][0]
for project in self.projects:
logs_config = project["logsConfig"]
if logs_config["cloudWatchLogs"]["status"] == "ENABLED" or logs_config["s3Logs"]["status"] == "ENABLED":
if (
logs_config["cloudWatchLogs"]["status"] == "ENABLED"
or logs_config["s3Logs"]["status"] == "ENABLED"
):
compliant_resources.append(project["arn"])
else:
non_compliant_resources.append(project["arn"])
@ -47,18 +50,20 @@ def codebuild_project_logging_enabled():
non_compliant_resources=non_compliant_resources,
)
def codedeploy_auto_rollback_monitor_enabled():
def codedeploy_auto_rollback_monitor_enabled(self):
compliant_resources = []
non_compliant_resources = []
applications = deploy_client.list_applications()["applications"]
applications = self.deploy_client.list_applications()["applications"]
for application in applications:
deployment_groups = deploy_client.list_deployment_groups(applicationName=application)["deploymentGroups"]
deployment_group_names = self.deploy_client.list_deployment_groups(
applicationName=application
)["deploymentGroups"]
deployment_groups = self.deploy_client.batch_get_deployment_groups(
applicationName=application, deploymentGroupNames=deployment_group_names
)["deploymentGroupsInfo"]
for deployment_group in deployment_groups:
deployment_group = deploy_client.get_deployment_group(
applicationName=application, deploymentGroupName=deployment_group
)["deploymentGroupInfo"]
if (
deployment_group["alarmConfiguration"]["enabled"]
@ -66,10 +71,15 @@ def codedeploy_auto_rollback_monitor_enabled():
):
compliant_resources.append(deployment_group["deploymentGroupId"])
else:
non_compliant_resources.append(deployment_group["deploymentGroupId"])
non_compliant_resources.append(
deployment_group["deploymentGroupId"]
)
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = CodeSeriesChecker

View File

@ -1,30 +1,43 @@
from models import RuleCheckResult
import datetime
from models import RuleCheckResult, RuleChecker
from functools import cached_property
from datetime import datetime, timedelta
from dateutil.tz import tzlocal
import boto3
client = boto3.client("dynamodb")
backup_client = boto3.client("backup")
autoscaling_client = boto3.client("application-autoscaling")
class DynamoDBRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("dynamodb")
self.backup_client = boto3.client("backup")
self.autoscaling_client = boto3.client("application-autoscaling")
@cached_property
def tables(self):
table_names = self.client.list_tables()["TableNames"]
return [
self.client.describe_table(TableName=table_name)["Table"]
for table_name in table_names
]
def dynamodb_autoscaling_enabled():
def dynamodb_autoscaling_enabled(self):
compliant_resources = []
non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
table = client.describe_table(TableName=table_name)["Table"]
if table.get("BillingModeSummary", {}).get("BillingMode") == "PAY_PER_REQUEST":
for table in self.tables:
if (
table.get("BillingModeSummary", {}).get("BillingMode")
== "PAY_PER_REQUEST"
):
compliant_resources.append(table["TableArn"])
continue
scaling_policies = autoscaling_client.describe_scaling_policies(
ServiceNamespace="dynamodb", ResourceId=f"table/{table_name}"
scaling_policies = self.autoscaling_client.describe_scaling_policies(
ServiceNamespace="dynamodb", ResourceId=f"table/{table['TableName']}"
)["ScalingPolicies"]
scaling_policy_dimensions = [i["ScalableDimension"] for i in scaling_policies]
scaling_policy_dimensions = [
policy["ScalableDimension"] for policy in scaling_policies
]
if (
"dynamodb:table:ReadCapacityUnits" in scaling_policy_dimensions
and "dynamodb:table:WriteCapacityUnits" in scaling_policy_dimensions
@ -39,24 +52,46 @@ def dynamodb_autoscaling_enabled():
non_compliant_resources=non_compliant_resources,
)
def dynamodb_last_backup_recovery_point_created():
def dynamodb_last_backup_recovery_point_created(self):
compliant_resources = []
non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
table = client.describe_table(TableName=table_name)["Table"]
recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=table["TableArn"])[
"RecoveryPoints"
]
recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points])
if len(recovery_point_creation_dates) == 0:
for table in self.tables:
recovery_points = self.backup_client.list_recovery_points_by_resource(
ResourceArn=table["TableArn"]
)["RecoveryPoints"]
if not recovery_points:
non_compliant_resources.append(table["TableArn"])
continue
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1):
latest_recovery_point = sorted(
[recovery_point["CreationDate"] for recovery_point in recovery_points]
)[-1]
if datetime.now(tz=tzlocal()) - latest_recovery_point > timedelta(days=1):
non_compliant_resources.append(table["TableArn"])
else:
compliant_resources.append(table["TableArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def dynamodb_pitr_enabled(self):
compliant_resources = []
non_compliant_resources = []
for table in self.tables:
backup = self.client.describe_continuous_backups(
TableName=table["TableName"]
)["ContinuousBackupsDescription"]
if (
backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"]
== "ENABLED"
):
compliant_resources.append(table["TableArn"])
else:
non_compliant_resources.append(table["TableArn"])
@ -67,36 +102,11 @@ def dynamodb_last_backup_recovery_point_created():
non_compliant_resources=non_compliant_resources,
)
def dynamodb_pitr_enabled():
def dynamodb_table_deletion_protection_enabled(self):
compliant_resources = []
non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
backup = client.describe_continuous_backups(TableName=table_name)["ContinuousBackupsDescription"]
table = client.describe_table(TableName=table_name)["Table"]
if backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"] == "ENABLED":
compliant_resources.append(table["TableArn"])
else:
non_compliant_resources.append(table["TableArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def dynamodb_table_deletion_protection_enabled():
compliant_resources = []
non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
table = client.describe_table(TableName=table_name)["Table"]
for table in self.tables:
if table["DeletionProtectionEnabled"] == True:
compliant_resources.append(table["TableArn"])
else:
@ -108,15 +118,11 @@ def dynamodb_table_deletion_protection_enabled():
non_compliant_resources=non_compliant_resources,
)
def dynamodb_table_encrypted_kms():
def dynamodb_table_encrypted_kms(self):
compliant_resources = []
non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
table = client.describe_table(TableName=table_name)["Table"]
for table in self.tables:
if (
"SSEDescription" in table
and table["SSEDescription"]["Status"] == "ENABLED"
@ -132,16 +138,15 @@ def dynamodb_table_encrypted_kms():
non_compliant_resources=non_compliant_resources,
)
def dynamodb_table_encryption_enabled():
def dynamodb_table_encryption_enabled(self):
compliant_resources = []
non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
table = client.describe_table(TableName=table_name)["Table"]
if "SSEDescription" in table and table["SSEDescription"]["Status"] == "ENABLED":
for table in self.tables:
if (
"SSEDescription" in table
and table["SSEDescription"]["Status"] == "ENABLED"
):
compliant_resources.append(table["TableArn"])
else:
non_compliant_resources.append(table["TableArn"])
@ -151,3 +156,6 @@ def dynamodb_table_encryption_enabled():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = DynamoDBRuleChecker

View File

@ -1,22 +1,33 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("ec2")
autoscaling_client = boto3.client("autoscaling")
ssm_client = boto3.client("ssm")
class EC2RuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("ec2")
self.ssm_client = boto3.client("ssm")
@cached_property
def instances(self):
valid_instances = [
instance
for reservation in self.client.describe_instances()["Reservations"]
for instance in reservation["Instances"]
if instance["State"]["Name"] != "terminated"
]
return valid_instances
def autoscaling_launch_template():
def ec2_ebs_encryption_by_default(self):
compliant_resources = []
non_compliant_resources = []
asgs = autoscaling_client.describe_auto_scaling_groups()["AutoScalingGroups"]
for asg in asgs:
if "LaunchConfigurationName" in asg:
non_compliant_resources.append(asg["AutoScalingGroupARN"])
volumes = self.client.describe_volumes()["Volumes"]
for volume in volumes:
if volume["Encrypted"]:
compliant_resources.append(volume["VolumeId"])
else:
compliant_resources.append(asg["AutoScalingGroupARN"])
non_compliant_resources.append(volume["VolumeId"])
return RuleCheckResult(
passed=not non_compliant_resources,
@ -24,34 +35,11 @@ def autoscaling_launch_template():
non_compliant_resources=non_compliant_resources,
)
def ec2_ebs_encryption_by_default():
def ec2_imdsv2_check(self):
compliant_resources = []
non_compliant_resources = []
ebses = client.describe_volumes()["Volumes"]
for ebs in ebses:
if ebs["Encrypted"] == True:
compliant_resources.append(ebs["VolumeId"])
else:
non_compliant_resources.append(ebs["VolumeId"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ec2_imdsv2_check():
compliant_resources = []
non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
for instance in self.instances:
if instance["MetadataOptions"]["HttpTokens"] == "required":
compliant_resources.append(instance["InstanceId"])
else:
@ -63,16 +51,11 @@ def ec2_imdsv2_check():
non_compliant_resources=non_compliant_resources,
)
def ec2_instance_detailed_monitoring_enabled():
def ec2_instance_detailed_monitoring_enabled(self):
compliant_resources = []
non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
for instance in self.instances:
if instance["Monitoring"]["State"] == "enabled":
compliant_resources.append(instance["InstanceId"])
else:
@ -84,18 +67,18 @@ def ec2_instance_detailed_monitoring_enabled():
non_compliant_resources=non_compliant_resources,
)
def ec2_instance_managed_by_systems_manager():
def ec2_instance_managed_by_systems_manager(self):
compliant_resources = []
non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
informations = ssm_client.describe_instance_information()["InstanceInformationList"]
managed_instance_ids = [i["InstanceId"] for i in informations if i["PingStatus"]]
for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
informations = self.ssm_client.describe_instance_information()[
"InstanceInformationList"
]
managed_instance_ids = [
info["InstanceId"] for info in informations if info["PingStatus"]
]
for instance in self.instances:
if instance["InstanceId"] in managed_instance_ids:
compliant_resources.append(instance["InstanceId"])
else:
@ -107,16 +90,11 @@ def ec2_instance_managed_by_systems_manager():
non_compliant_resources=non_compliant_resources,
)
def ec2_instance_profile_attached():
def ec2_instance_profile_attached(self):
compliant_resources = []
non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
for instance in self.instances:
if "IamInstanceProfile" in instance:
compliant_resources.append(instance["InstanceId"])
else:
@ -128,16 +106,11 @@ def ec2_instance_profile_attached():
non_compliant_resources=non_compliant_resources,
)
def ec2_no_amazon_key_pair():
def ec2_no_amazon_key_pair(self):
compliant_resources = []
non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
for instance in self.instances:
if "KeyName" in instance:
non_compliant_resources.append(instance["InstanceId"])
else:
@ -149,16 +122,11 @@ def ec2_no_amazon_key_pair():
non_compliant_resources=non_compliant_resources,
)
def ec2_stopped_instance():
def ec2_stopped_instance(self):
compliant_resources = []
non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
for instance in self.instances:
if instance["State"]["Name"] != "stopped":
compliant_resources.append(instance["InstanceId"])
else:
@ -170,16 +138,11 @@ def ec2_stopped_instance():
non_compliant_resources=non_compliant_resources,
)
def ec2_token_hop_limit_check():
def ec2_token_hop_limit_check(self):
compliant_resources = []
non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
for instance in self.instances:
if instance["MetadataOptions"]["HttpPutResponseHopLimit"] < 2:
compliant_resources.append(instance["InstanceId"])
else:
@ -190,3 +153,6 @@ def ec2_token_hop_limit_check():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = EC2RuleChecker

View File

@ -1,17 +1,21 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
import botocore
client = boto3.client("ecr")
class ECRRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("ecr")
@cached_property
def repositories(self):
return self.client.describe_repositories()["repositories"]
def ecr_private_image_scanning_enabled():
repositories = client.describe_repositories()
def ecr_private_image_scanning_enabled(self):
compliant_resource = []
non_compliant_resources = []
for repository in repositories["repositories"]:
for repository in self.repositories:
if repository["imageScanningConfiguration"]["scanOnPush"] == True:
compliant_resource.append(repository["repositoryArn"])
else:
@ -23,15 +27,13 @@ def ecr_private_image_scanning_enabled():
non_compliant_resources=non_compliant_resources,
)
def ecr_private_lifecycle_policy_configured():
repositories = client.describe_repositories()
def ecr_private_lifecycle_policy_configured(self):
compliant_resource = []
non_compliant_resources = []
for repository in repositories["repositories"]:
for repository in self.repositories:
try:
response = client.get_lifecycle_policy(
response = self.client.get_lifecycle_policy(
registryId=repository["registryId"],
repositoryName=repository["repositoryName"],
)
@ -48,13 +50,11 @@ def ecr_private_lifecycle_policy_configured():
non_compliant_resources=non_compliant_resources,
)
def ecr_private_tag_immutability_enabled():
repositories = client.describe_repositories()
def ecr_private_tag_immutability_enabled(self):
compliant_resource = []
non_compliant_resources = []
for repository in repositories["repositories"]:
for repository in self.repositories:
if repository["imageTagMutability"] == "IMMUTABLE":
compliant_resource.append(repository["repositoryArn"])
else:
@ -66,13 +66,11 @@ def ecr_private_tag_immutability_enabled():
non_compliant_resources=non_compliant_resources,
)
def ecr_kms_encryption_1():
repositories = client.describe_repositories()
def ecr_kms_encryption_1(self):
compliant_resource = []
non_compliant_resources = []
for repository in repositories["repositories"]:
for repository in self.repositories:
if repository["encryptionConfiguration"]["encryptionType"] == "KMS":
compliant_resource.append(repository["repositoryArn"])
else:
@ -83,3 +81,6 @@ def ecr_kms_encryption_1():
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
rule_checker = ECRRuleChecker

View File

@ -1,24 +1,57 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("ecs")
class ECSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("ecs")
def ecs_awsvpc_networking_enabled():
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
@cached_property
def task_definitions(self):
task_definition_arns = self.client.list_task_definitions(status="ACTIVE")[
"taskDefinitionArns"
]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
# Filter latest task definition arns
for task_definition_arn in task_definition_arns:
family, revision = task_definition_arn.rsplit(":", 1)
latest_task_definitions[family] = max(
latest_task_definitions.get(family, 0), int(revision)
)
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
# Fetch latest task definition details
task_definitions = [
self.client.describe_task_definition(taskDefinition=f"{family}:{revision}")[
"taskDefinition"
]
for family, revision in latest_task_definitions.items()
]
return task_definitions
@cached_property
def clusters(self):
return self.client.describe_clusters(include=["SETTINGS"])["clusters"]
@cached_property
def services(self):
services = []
for cluster in self.clusters:
service_arns = self.client.list_services(
cluster=cluster["clusterArn"], launchType="FARGATE"
)["serviceArns"]
services += self.client.describe_services(
cluster=cluster["clusterArn"], services=service_arns
)["services"]
return services
def ecs_awsvpc_networking_enabled(self):
compliant_resources = []
non_compliant_resources = []
for task_definition in self.task_definitions:
if task_definition.get("networkMode") == "awsvpc":
compliant_resources.append(task_definition["taskDefinitionArn"])
else:
@ -30,26 +63,18 @@ def ecs_awsvpc_networking_enabled():
non_compliant_resources=non_compliant_resources,
)
def ecs_containers_nonprivileged():
def ecs_containers_nonprivileged(self):
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
for task_definition in self.task_definitions:
containers = task_definition["containerDefinitions"]
privileged_containers = [
container for container in containers if container.get("privileged")
]
for container in containers:
if container.get("privileged"):
if privileged_containers:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
@ -59,26 +84,20 @@ def ecs_containers_nonprivileged():
non_compliant_resources=non_compliant_resources,
)
def ecs_containers_readonly_access():
def ecs_containers_readonly_access(self):
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
for task_definition in self.task_definitions:
containers = task_definition["containerDefinitions"]
not_readonly_containers = [
container
for container in containers
if not container.get("readonlyRootFilesystem")
]
for container in containers:
if not container.get("readonlyRootFilesystem"):
if not_readonly_containers:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
@ -88,17 +107,21 @@ def ecs_containers_readonly_access():
non_compliant_resources=non_compliant_resources,
)
def ecs_container_insights_enabled():
def ecs_container_insights_enabled(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_clusters(include=["SETTINGS"])["clusters"]
for cluster in self.clusters:
container_insights_setting = [
setting
for setting in cluster["settings"]
if setting["name"] == "containerInsights"
]
for cluster in clusters:
container_insights_setting = [setting for setting in cluster["settings"] if setting["name"] == "containerInsights"]
if container_insights_setting and container_insights_setting[0]["value"] == "enabled":
if (
container_insights_setting
and container_insights_setting[0]["value"] == "enabled"
):
compliant_resources.append(cluster["clusterArn"])
else:
non_compliant_resources.append(cluster["clusterArn"])
@ -109,17 +132,11 @@ def ecs_container_insights_enabled():
non_compliant_resources=non_compliant_resources,
)
def ecs_fargate_latest_platform_version():
def ecs_fargate_latest_platform_version(self):
compliant_resources = []
non_compliant_resources = []
cluster_arns = client.list_clusters()["clusterArns"]
for cluster_arn in cluster_arns:
service_arns = client.list_services(cluster=cluster_arn, launchType="FARGATE")["serviceArns"]
services = client.describe_services(cluster=cluster_arn, services=service_arns)["services"]
for service in services:
for service in self.services:
if service["platformVersion"] == "LATEST":
compliant_resources.append(service["serviceArn"])
else:
@ -131,26 +148,67 @@ def ecs_fargate_latest_platform_version():
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_log_configuration():
def ecs_task_definition_log_configuration(self):
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
for task_definition in self.task_definitions:
containers = task_definition["containerDefinitions"]
for container in containers:
if "logConfiguration" not in container:
log_disabled_containers = [
container
for container in containers
if "logConfiguration" not in container
]
if log_disabled_containers:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_memory_hard_limit(self):
compliant_resources = []
non_compliant_resources = []
for task_definition in self.task_definitions:
containers = task_definition["containerDefinitions"]
containers_without_memory_limit = [
container for container in containers if "memory" not in container
]
if containers_without_memory_limit:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_nonroot_user(self):
compliant_resources = []
non_compliant_resources = []
for task_definition in self.task_definitions:
containers = task_definition["containerDefinitions"]
privileged_containers = [
container
for container in containers
if container.get("user") in [None, "root"]
]
if privileged_containers:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
@ -161,59 +219,4 @@ def ecs_task_definition_log_configuration():
)
def ecs_task_definition_memory_hard_limit():
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"]
for container in containers:
if "memory" not in container:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_nonroot_user():
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"]
for container in containers:
if container.get("user") in [None, "root"]:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = ECSRuleChecker

View File

@ -1,17 +1,26 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("efs")
ec2_client = boto3.client("ec2")
class EFSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("efs")
self.ec2_client = boto3.client("ec2")
@cached_property
def access_points(self):
return self.client.describe_access_points()["AccessPoints"]
def efs_access_point_enforce_root_directory():
access_points = client.describe_access_points()["AccessPoints"]
@cached_property
def file_systems(self):
return self.client.describe_file_systems()["FileSystems"]
def efs_access_point_enforce_root_directory(self):
compliant_resource = []
non_compliant_resources = []
for access_point in access_points:
for access_point in self.access_points:
if access_point["RootDirectory"]["Path"] != "/":
compliant_resource.append(access_point["AccessPointArn"])
else:
@ -23,13 +32,11 @@ def efs_access_point_enforce_root_directory():
non_compliant_resources=non_compliant_resources,
)
def efs_access_point_enforce_user_identity():
access_points = client.describe_access_points()["AccessPoints"]
def efs_access_point_enforce_user_identity(self):
compliant_resource = []
non_compliant_resources = []
for access_point in access_points:
for access_point in self.access_points:
if "PosixUser" in access_point:
compliant_resource.append(access_point["AccessPointArn"])
else:
@ -41,16 +48,15 @@ def efs_access_point_enforce_user_identity():
non_compliant_resources=non_compliant_resources,
)
def efs_automatic_backups_enabled():
file_systems = client.describe_file_systems()["FileSystems"]
def efs_automatic_backups_enabled(self):
compliant_resource = []
non_compliant_resources = []
for file_system in file_systems:
response = client.describe_backup_policy(
for file_system in self.file_systems:
response = self.client.describe_backup_policy(
FileSystemId=file_system["FileSystemId"]
)
if response["BackupPolicy"]["Status"] == "ENABLED":
compliant_resource.append(file_system["FileSystemArn"])
else:
@ -62,14 +68,12 @@ def efs_automatic_backups_enabled():
non_compliant_resources=non_compliant_resources,
)
def efs_encrypted_check():
file_systems = client.describe_file_systems()["FileSystems"]
def efs_encrypted_check(self):
compliant_resource = []
non_compliant_resources = []
for file_system in file_systems:
if file_system["Encrypted"] == True:
for file_system in self.file_systems:
if file_system["Encrypted"]:
compliant_resource.append(file_system["FileSystemArn"])
else:
non_compliant_resources.append(file_system["FileSystemArn"])
@ -80,19 +84,18 @@ def efs_encrypted_check():
non_compliant_resources=non_compliant_resources,
)
def efs_mount_target_public_accessible():
file_systems = client.describe_file_systems()["FileSystems"]
def efs_mount_target_public_accessible(self):
compliant_resource = []
non_compliant_resources = []
for file_system in file_systems:
mount_targets = client.describe_mount_targets(
for file_system in self.file_systems:
mount_targets = self.client.describe_mount_targets(
FileSystemId=file_system["FileSystemId"]
)["MountTargets"]
for mount_target in mount_targets:
subnet_id = mount_target["SubnetId"]
routes = ec2_client.describe_route_tables(
routes = self.ec2_client.describe_route_tables(
Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}]
)["RouteTables"][0]["Routes"]
@ -105,14 +108,17 @@ def efs_mount_target_public_accessible():
):
non_compliant_resources.append(file_system["FileSystemArn"])
break
else:
compliant_resource.append(file_system["FileSystemArn"])
compliant_resource = list(set(compliant_resource))
non_compliant_resources = list(set(non_compliant_resources))
compliant_resource = list(
set(compliant_resource) - set(non_compliant_resources)
)
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
rule_checker = EFSRuleChecker

View File

@ -1,24 +1,32 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("eks")
class EKSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("eks")
@cached_property
def clusters(self):
cluster_names = self.client.list_clusters()["clusters"]
return [
self.client.describe_cluster(name=cluster_name)["cluster"]
for cluster_name in cluster_names
]
def eks_cluster_logging_enabled():
clusters = client.list_clusters()["clusters"]
def eks_cluster_logging_enabled(self):
compliant_resource = []
non_compliant_resources = []
for cluster in clusters:
response = client.describe_cluster(name=cluster)["cluster"]
for cluster in self.clusters:
if (
len(response["logging"]["clusterLogging"][0]["types"]) == 5
and response["logging"]["clusterLogging"][0]["enabled"] == True
cluster["logging"]["clusterLogging"][0]["enabled"]
and len(cluster["logging"]["clusterLogging"][0]["types"]) == 5
):
compliant_resource.append(response["arn"])
compliant_resource.append(cluster["arn"])
else:
non_compliant_resources.append(response["arn"])
non_compliant_resources.append(cluster["arn"])
return RuleCheckResult(
passed=not non_compliant_resources,
@ -26,21 +34,18 @@ def eks_cluster_logging_enabled():
non_compliant_resources=non_compliant_resources,
)
def eks_cluster_secrets_encrypted():
clusters = client.list_clusters()["clusters"]
def eks_cluster_secrets_encrypted(self):
compliant_resource = []
non_compliant_resources = []
for cluster in clusters:
response = client.describe_cluster(name=cluster)["cluster"]
for cluster in self.clusters:
if (
"encryptionConfig" in response
and "secrets" in response["encryptionConfig"][0]["resources"]
"encryptionConfig" in cluster
and "secrets" in cluster["encryptionConfig"][0]["resources"]
):
compliant_resource.append(response["arn"])
compliant_resource.append(cluster["arn"])
else:
non_compliant_resources.append(response["arn"])
non_compliant_resources.append(cluster["arn"])
return RuleCheckResult(
passed=not non_compliant_resources,
@ -48,21 +53,21 @@ def eks_cluster_secrets_encrypted():
non_compliant_resources=non_compliant_resources,
)
def eks_endpoint_no_public_access():
clusters = client.list_clusters()["clusters"]
def eks_endpoint_no_public_access(self):
compliant_resource = []
non_compliant_resources = []
for cluster in clusters:
response = client.describe_cluster(name=cluster)["cluster"]
if response["resourcesVpcConfig"]["endpointPublicAccess"] == False:
compliant_resource.append(response["arn"])
for cluster in self.clusters:
if cluster["resourcesVpcConfig"]["endpointPublicAccess"]:
non_compliant_resources.append(cluster["arn"])
else:
non_compliant_resources.append(response["arn"])
compliant_resource.append(cluster["arn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
rule_checker = EKSRuleChecker

View File

@ -1,17 +1,26 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("elasticache")
class ElastiCacheRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("elasticache")
@cached_property
def clusters(self):
return self.client.describe_cache_clusters()["CacheClusters"]
def elasticache_auto_minor_version_upgrade_check():
clusters = client.describe_cache_clusters()["CacheClusters"]
@cached_property
def replication_groups(self):
return self.client.describe_replication_groups()["ReplicationGroups"]
def elasticache_auto_minor_version_upgrade_check(self):
compliant_resource = []
non_compliant_resources = []
for cluster in clusters:
if cluster["AutoMinorVersionUpgrade"] == True:
for cluster in self.clusters:
if cluster["AutoMinorVersionUpgrade"]:
compliant_resource.append(cluster["ARN"])
else:
non_compliant_resources.append(cluster["ARN"])
@ -22,13 +31,11 @@ def elasticache_auto_minor_version_upgrade_check():
non_compliant_resources=non_compliant_resources,
)
def elasticache_redis_cluster_automatic_backup_check():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
def elasticache_redis_cluster_automatic_backup_check(self):
compliant_resource = []
non_compliant_resources = []
for replication_group in replication_groups:
for replication_group in self.replication_groups:
if "SnapshottingClusterId" in replication_group:
compliant_resource.append(replication_group["ARN"])
else:
@ -40,13 +47,11 @@ def elasticache_redis_cluster_automatic_backup_check():
non_compliant_resources=non_compliant_resources,
)
def elasticache_repl_grp_auto_failover_enabled():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
def elasticache_repl_grp_auto_failover_enabled(self):
compliant_resource = []
non_compliant_resources = []
for replication_group in replication_groups:
for replication_group in self.replication_groups:
if replication_group["AutomaticFailover"] == "enabled":
compliant_resource.append(replication_group["ARN"])
else:
@ -58,13 +63,11 @@ def elasticache_repl_grp_auto_failover_enabled():
non_compliant_resources=non_compliant_resources,
)
def elasticache_repl_grp_encrypted_at_rest():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
def elasticache_repl_grp_encrypted_at_rest(self):
compliant_resource = []
non_compliant_resources = []
for replication_group in replication_groups:
for replication_group in self.replication_groups:
if replication_group["AtRestEncryptionEnabled"] == True:
compliant_resource.append(replication_group["ARN"])
else:
@ -76,13 +79,11 @@ def elasticache_repl_grp_encrypted_at_rest():
non_compliant_resources=non_compliant_resources,
)
def elasticache_repl_grp_encrypted_in_transit():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
def elasticache_repl_grp_encrypted_in_transit(self):
compliant_resource = []
non_compliant_resources = []
for replication_group in replication_groups:
for replication_group in self.replication_groups:
if replication_group["TransitEncryptionEnabled"] == True:
compliant_resource.append(replication_group["ARN"])
else:
@ -94,13 +95,11 @@ def elasticache_repl_grp_encrypted_in_transit():
non_compliant_resources=non_compliant_resources,
)
def elasticache_subnet_group_check():
clusters = client.describe_cache_clusters()["CacheClusters"]
def elasticache_subnet_group_check(self):
compliant_resource = []
non_compliant_resources = []
for cluster in clusters:
for cluster in self.clusters:
if cluster["CacheSubnetGroupName"] != "default":
compliant_resource.append(cluster["ARN"])
else:
@ -111,3 +110,6 @@ def elasticache_subnet_group_check():
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
rule_checker = ElastiCacheRuleChecker

View File

@ -1,19 +1,36 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("iam")
class IAMRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("iam")
@cached_property
def policies(self):
return self.client.list_policies(Scope="Local")["Policies"]
def iam_policy_no_statements_with_admin_access():
@cached_property
def policy_default_versions(self):
responses = [
self.client.get_policy_version(
PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"]
)["PolicyVersion"]
for policy in self.policies
]
return {
policy["Arn"]: response
for policy, response in zip(self.policies, responses)
}
def iam_policy_no_statements_with_admin_access(self):
compliant_resource = []
non_compliant_resources = []
policies = client.list_policies(Scope="Local")["Policies"]
for policy in policies:
policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[
"PolicyVersion"
]
for policy in self.policies:
policy_version = self.policy_default_versions[policy["Arn"]]
for statement in policy_version["Document"]["Statement"]:
if (
@ -32,16 +49,12 @@ def iam_policy_no_statements_with_admin_access():
non_compliant_resources=non_compliant_resources,
)
def iam_policy_no_statements_with_full_access():
def iam_policy_no_statements_with_full_access(self):
compliant_resource = []
non_compliant_resources = []
policies = client.list_policies(Scope="Local")["Policies"]
for policy in policies:
policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[
"PolicyVersion"
]
for policy in self.policies:
policy_version = self.policy_default_versions[policy["Arn"]]
for statement in policy_version["Document"]["Statement"]:
if statement["Effect"] == "Deny":
@ -50,7 +63,9 @@ def iam_policy_no_statements_with_full_access():
if type(statement["Action"]) == str:
statement["Action"] = [statement["Action"]]
full_access_actions = [action for action in statement["Action"] if action.endswith(":*")]
full_access_actions = [
action for action in statement["Action"] if action.endswith(":*")
]
if full_access_actions:
non_compliant_resources.append(policy["Arn"])
break
@ -63,15 +78,18 @@ def iam_policy_no_statements_with_full_access():
non_compliant_resources=non_compliant_resources,
)
def iam_role_managed_policy_check():
def iam_role_managed_policy_check(self):
compliant_resource = []
non_compliant_resources = []
policy_arns = [] # 검사할 managed policy arn 목록
for policy in policy_arns:
response = client.list_entities_for_policy(PolicyArn=policy)
if response["PolicyGroups"] == [] and response["PolicyUsers"] == [] and response["PolicyRoles"] == []:
response = self.client.list_entities_for_policy(PolicyArn=policy)
if (
response["PolicyGroups"] == []
and response["PolicyUsers"] == []
and response["PolicyRoles"] == []
):
non_compliant_resources.append(policy)
else:
compliant_resource.append(policy)
@ -81,3 +99,6 @@ def iam_role_managed_policy_check():
compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources,
)
rule_checker = IAMRuleChecker

View File

@ -1,17 +1,18 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
import boto3
client = boto3.client("kms")
class KMSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("kms")
def cmk_backing_key_rotation_enabled():
def cmk_backing_key_rotation_enabled(self):
compliant_resources = []
non_compliant_resources = []
keys = client.list_keys()["Keys"]
keys = self.client.list_keys()["Keys"]
for key in keys:
response = client.get_key_rotation_status(KeyId=key["KeyId"])
response = self.client.get_key_rotation_status(KeyId=key["KeyId"])
if response["KeyRotationEnabled"] == True:
compliant_resources.append(response["KeyId"])
@ -23,3 +24,6 @@ def cmk_backing_key_rotation_enabled():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = KMSRuleChecker

View File

@ -1,24 +1,39 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import datetime
from dateutil.tz import tzlocal
import boto3
client = boto3.client("rds")
backup_client = boto3.client("backup")
ec2_client = boto3.client("ec2")
class RDSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("rds")
self.backup_client = boto3.client("backup")
self.ec2_client = boto3.client("ec2")
def aurora_last_backup_recovery_point_created():
@cached_property
def db_clusters(self):
return self.client.describe_db_clusters()["DBClusters"]
@cached_property
def db_instances(self):
return self.client.describe_db_instances()["DBInstances"]
def aurora_last_backup_recovery_point_created(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters:
recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=cluster["DBClusterArn"])[
"RecoveryPoints"
]
recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points])
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1):
recovery_points = self.backup_client.list_recovery_points_by_resource(
ResourceArn=cluster["DBClusterArn"]
)["RecoveryPoints"]
recovery_point_creation_dates = sorted(
[i["CreationDate"] for i in recovery_points]
)
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[
-1
] < datetime.timedelta(days=1):
compliant_resources.append(cluster["DBClusterArn"])
else:
non_compliant_resources.append(cluster["DBClusterArn"])
@ -29,14 +44,16 @@ def aurora_last_backup_recovery_point_created():
non_compliant_resources=non_compliant_resources,
)
def aurora_mysql_backtracking_enabled():
def aurora_mysql_backtracking_enabled(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters:
if cluster["Engine"] == "aurora-mysql" and cluster.get("EarliestBacktrackTime", None) == None:
if (
cluster["Engine"] == "aurora-mysql"
and cluster.get("EarliestBacktrackTime", None) == None
):
non_compliant_resources.append(cluster["DBClusterArn"])
else:
compliant_resources.append(cluster["DBClusterArn"])
@ -47,12 +64,11 @@ def aurora_mysql_backtracking_enabled():
non_compliant_resources=non_compliant_resources,
)
def db_instance_backup_enabled():
def db_instance_backup_enabled(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters:
if "BackupRetentionPeriod" in cluster:
compliant_resources.append(cluster["DBClusterArn"])
@ -65,12 +81,11 @@ def db_instance_backup_enabled():
non_compliant_resources=non_compliant_resources,
)
def rds_cluster_auto_minor_version_upgrade_enable():
def rds_cluster_auto_minor_version_upgrade_enable(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters:
if cluster["Engine"] == "docdb" or cluster.get("AutoMinorVersionUpgrade"):
compliant_resources.append(cluster["DBClusterArn"])
@ -83,12 +98,11 @@ def rds_cluster_auto_minor_version_upgrade_enable():
non_compliant_resources=non_compliant_resources,
)
def rds_cluster_default_admin_check():
def rds_cluster_default_admin_check(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters:
if cluster["MasterUsername"] not in ["admin", "postgres"]:
compliant_resources.append(cluster["DBClusterArn"])
@ -101,12 +115,11 @@ def rds_cluster_default_admin_check():
non_compliant_resources=non_compliant_resources,
)
def rds_cluster_deletion_protection_enabled():
def rds_cluster_deletion_protection_enabled(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters:
if cluster["DeletionProtection"]:
compliant_resources.append(cluster["DBClusterArn"])
@ -119,12 +132,11 @@ def rds_cluster_deletion_protection_enabled():
non_compliant_resources=non_compliant_resources,
)
def rds_cluster_encrypted_at_rest():
def rds_cluster_encrypted_at_rest(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters:
if cluster["StorageEncrypted"]:
compliant_resources.append(cluster["DBClusterArn"])
@ -137,14 +149,15 @@ def rds_cluster_encrypted_at_rest():
non_compliant_resources=non_compliant_resources,
)
def rds_cluster_iam_authentication_enabled():
def rds_cluster_iam_authentication_enabled(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters:
if cluster["Engine"] == "docdb" or cluster.get("IAMDatabaseAuthenticationEnabled"):
if cluster["Engine"] == "docdb" or cluster.get(
"IAMDatabaseAuthenticationEnabled"
):
compliant_resources.append(cluster["DBClusterArn"])
else:
non_compliant_resources.append(cluster["DBClusterArn"])
@ -155,12 +168,11 @@ def rds_cluster_iam_authentication_enabled():
non_compliant_resources=non_compliant_resources,
)
def rds_cluster_multi_az_enabled():
def rds_cluster_multi_az_enabled(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters:
if len(cluster.get("AvailabilityZones", [])) > 1:
compliant_resources.append(cluster["DBClusterArn"])
@ -173,17 +185,22 @@ def rds_cluster_multi_az_enabled():
non_compliant_resources=non_compliant_resources,
)
def rds_db_security_group_not_allowed():
def rds_db_security_group_not_allowed(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
security_groups = ec2_client.describe_security_groups()["SecurityGroups"]
default_security_group_ids = [i["GroupId"] for i in security_groups if i["GroupName"] == "default"]
clusters = self.db_clusters
security_groups = self.ec2_client.describe_security_groups()["SecurityGroups"]
default_security_group_ids = [
i["GroupId"] for i in security_groups if i["GroupName"] == "default"
]
for cluster in clusters:
db_security_groups = [i["VpcSecurityGroupId"] for i in cluster["VpcSecurityGroups"] if i["Status"] == "active"]
db_security_groups = [
i["VpcSecurityGroupId"]
for i in cluster["VpcSecurityGroups"]
if i["Status"] == "active"
]
for default_security_group_id in default_security_group_ids:
if default_security_group_id in db_security_groups:
@ -198,12 +215,11 @@ def rds_db_security_group_not_allowed():
non_compliant_resources=non_compliant_resources,
)
def rds_enhanced_monitoring_enabled():
def rds_enhanced_monitoring_enabled(self):
compliant_resources = []
non_compliant_resources = []
instances = client.describe_db_instances()["DBInstances"]
instances = self.db_instances
for instance in instances:
if instance.get("MonitoringInterval", 0):
compliant_resources.append(instance["DBInstanceArn"])
@ -216,12 +232,11 @@ def rds_enhanced_monitoring_enabled():
non_compliant_resources=non_compliant_resources,
)
def rds_instance_public_access_check():
def rds_instance_public_access_check(self):
compliant_resources = []
non_compliant_resources = []
instances = client.describe_db_instances()["DBInstances"]
instances = self.db_instances
for instance in instances:
if instance["PubliclyAccessible"]:
non_compliant_resources.append(instance["DBInstanceArn"])
@ -234,20 +249,21 @@ def rds_instance_public_access_check():
non_compliant_resources=non_compliant_resources,
)
def rds_logging_enabled():
def rds_logging_enabled(self):
compliant_resources = []
non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
logs_for_engine = {
"aurora-mysql": ["audit", "error", "general", "slowquery"],
"aurora-postgresql": ["postgresql"],
"docdb": ["audit", "profiler"]
"docdb": ["audit", "profiler"],
}
for cluster in clusters:
if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get(cluster["Engine"]):
if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get(
cluster["Engine"]
):
compliant_resources.append(cluster["DBClusterArn"])
else:
non_compliant_resources.append(cluster["DBClusterArn"])
@ -258,12 +274,13 @@ def rds_logging_enabled():
non_compliant_resources=non_compliant_resources,
)
def rds_snapshot_encrypted():
def rds_snapshot_encrypted(self):
compliant_resources = []
non_compliant_resources = []
cluster_snapshots = client.describe_db_cluster_snapshots()["DBClusterSnapshots"]
cluster_snapshots = self.client.describe_db_cluster_snapshots()[
"DBClusterSnapshots"
]
for snapshot in cluster_snapshots:
if snapshot.get("StorageEncrypted") == True:
@ -276,3 +293,6 @@ def rds_snapshot_encrypted():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = RDSRuleChecker

View File

@ -1,20 +1,31 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
import botocore.exceptions
client = boto3.client("s3")
sts_client = boto3.client("sts")
s3control_client = boto3.client("s3control")
backup_client = boto3.client("backup")
class S3RuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("s3")
self.sts_client = boto3.client("sts")
self.s3control_client = boto3.client("s3control")
self.backup_client = boto3.client("backup")
@cached_property
def account_id(self):
return self.sts_client.get_caller_identity().get("Account")
def s3_access_point_in_vpc_only():
@cached_property
def buckets(self):
return self.client.list_buckets()["Buckets"]
def s3_access_point_in_vpc_only(self):
compliant_resources = []
non_compliant_resources = []
account_id = sts_client.get_caller_identity().get("Account")
access_points = s3control_client.list_access_points(AccountId=account_id)["AccessPointList"]
access_points = self.s3control_client.list_access_points(
AccountId=self.account_id
)["AccessPointList"]
for access_point in access_points:
if access_point["NetworkOrigin"] == "VPC":
compliant_resources.append(access_point["AccessPointArn"])
@ -27,18 +38,21 @@ def s3_access_point_in_vpc_only():
non_compliant_resources=non_compliant_resources,
)
def s3_bucket_default_lock_enabled():
def s3_bucket_default_lock_enabled(self):
compliant_resources = []
non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets:
for bucket in self.buckets:
try:
response = client.get_object_lock_configuration(Bucket=bucket["Name"])
response = self.client.get_object_lock_configuration(
Bucket=bucket["Name"]
)
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "ObjectLockConfigurationNotFoundError":
if (
e.response["Error"]["Code"]
== "ObjectLockConfigurationNotFoundError"
):
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else:
raise e
@ -49,14 +63,12 @@ def s3_bucket_default_lock_enabled():
non_compliant_resources=non_compliant_resources,
)
def s3_bucket_level_public_access_prohibited():
def s3_bucket_level_public_access_prohibited(self):
compliant_resources = []
non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets:
response = client.get_public_access_block(Bucket=bucket["Name"])
for bucket in self.buckets:
response = self.client.get_public_access_block(Bucket=bucket["Name"])
if False not in response["PublicAccessBlockConfiguration"].values():
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else:
@ -68,14 +80,12 @@ def s3_bucket_level_public_access_prohibited():
non_compliant_resources=non_compliant_resources,
)
def s3_bucket_logging_enabled():
def s3_bucket_logging_enabled(self):
compliant_resources = []
non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets:
response = client.get_bucket_logging(Bucket=bucket["Name"])
for bucket in self.buckets:
response = self.client.get_bucket_logging(Bucket=bucket["Name"])
if "LoggingEnabled" in response:
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else:
@ -87,14 +97,12 @@ def s3_bucket_logging_enabled():
non_compliant_resources=non_compliant_resources,
)
def s3_bucket_ssl_requests_only():
def s3_bucket_ssl_requests_only(self):
compliant_resources = []
non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets:
policy = client.get_bucket_policy(Bucket=bucket["Name"])["Policy"]
for bucket in self.buckets:
policy = self.client.get_bucket_policy(Bucket=bucket["Name"])["Policy"]
if "aws:SecureTransport" in policy:
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else:
@ -106,14 +114,12 @@ def s3_bucket_ssl_requests_only():
non_compliant_resources=non_compliant_resources,
)
def s3_bucket_versioning_enabled():
def s3_bucket_versioning_enabled(self):
compliant_resources = []
non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets:
response = client.get_bucket_versioning(Bucket=bucket["Name"])
for bucket in self.buckets:
response = self.client.get_bucket_versioning(Bucket=bucket["Name"])
if "Status" in response and response["Status"] == "Enabled":
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else:
@ -125,16 +131,21 @@ def s3_bucket_versioning_enabled():
non_compliant_resources=non_compliant_resources,
)
def s3_default_encryption_kms():
def s3_default_encryption_kms(self):
compliant_resources = []
non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets:
configuration = client.get_bucket_encryption(Bucket=bucket["Name"])["ServerSideEncryptionConfiguration"]
for bucket in self.buckets:
configuration = self.client.get_bucket_encryption(Bucket=bucket["Name"])[
"ServerSideEncryptionConfiguration"
]
if configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] == "aws:kms":
if (
configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"][
"SSEAlgorithm"
]
== "aws:kms"
):
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else:
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
@ -145,14 +156,14 @@ def s3_default_encryption_kms():
non_compliant_resources=non_compliant_resources,
)
def s3_event_notifications_enabled():
def s3_event_notifications_enabled(self):
compliant_resources = []
non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets:
configuration = client.get_bucket_notification_configuration(Bucket=bucket["Name"])
for bucket in self.buckets:
configuration = self.client.get_bucket_notification_configuration(
Bucket=bucket["Name"]
)
if (
"LambdaFunctionConfigurations" in configuration
or "QueueConfigurations" in configuration
@ -168,14 +179,14 @@ def s3_event_notifications_enabled():
non_compliant_resources=non_compliant_resources,
)
def s3_last_backup_recovery_point_created():
def s3_last_backup_recovery_point_created(self):
compliant_resources = []
non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets:
backups = backup_client.list_recovery_points_by_resource(ResourceArn=f"arn:aws:s3:::{bucket['Name']}")
for bucket in self.buckets:
backups = self.backup_client.list_recovery_points_by_resource(
ResourceArn=f"arn:aws:s3:::{bucket['Name']}"
)
if backups["RecoveryPoints"] != []:
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
@ -188,18 +199,18 @@ def s3_last_backup_recovery_point_created():
non_compliant_resources=non_compliant_resources,
)
def s3_lifecycle_policy_check():
def s3_lifecycle_policy_check(self):
compliant_resources = []
non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets:
for bucket in self.buckets:
try:
configuration = client.get_bucket_lifecycle_configuration(Bucket=bucket["Name"])
configuration = self.client.get_bucket_lifecycle_configuration(
Bucket=bucket["Name"]
)
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "NoSuchLifecycleConfiguration":
if e.response["Error"]["Code"] == "NoSuchLifecycleConfiguration":
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else:
raise e
@ -209,3 +220,6 @@ def s3_lifecycle_policy_check():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = S3RuleChecker

View File

@ -1,19 +1,24 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
import datetime
from datetime import datetime, timedelta
from dateutil.tz import tzlocal
client = boto3.client("secretsmanager")
class SecretsManagerRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("secretsmanager")
@cached_property
def secrets(self):
return self.client.list_secrets()["SecretList"]
def secretsmanager_rotation_enabled_check():
def secretsmanager_rotation_enabled_check(self):
compliant_resources = []
non_compliant_resources = []
secrets = client.list_secrets()["SecretList"]
for secret in secrets:
if secret.get("RotationEnabled") == True:
for secret in self.secrets:
if secret.get("RotationEnabled", False):
compliant_resources.append(secret["ARN"])
else:
non_compliant_resources.append(secret["ARN"])
@ -24,20 +29,18 @@ def secretsmanager_rotation_enabled_check():
non_compliant_resources=non_compliant_resources,
)
def secretsmanager_scheduled_rotation_success_check():
def secretsmanager_scheduled_rotation_success_check(self):
compliant_resources = []
non_compliant_resources = []
secrets = client.list_secrets()["SecretList"]
for secret in secrets:
if secret.get("RotationEnabled") == True:
if 'LastRotatedDate' not in secret:
for secret in self.secrets:
if secret.get("RotationEnabled", False):
if "LastRotatedDate" not in secret:
non_compliant_resources.append(secret["ARN"])
continue
now = datetime.datetime.now(tz=tzlocal())
rotation_period = datetime.timedelta(
now = datetime.now(tz=tzlocal())
rotation_period = timedelta(
days=secret["RotationRules"]["AutomaticallyAfterDays"] + 2
) # 최대 2일 지연 가능 (aws)
elapsed_time_after_rotation = now - secret["LastRotatedDate"]
@ -53,22 +56,20 @@ def secretsmanager_scheduled_rotation_success_check():
non_compliant_resources=non_compliant_resources,
)
def secretsmanager_secret_periodic_rotation():
def secretsmanager_secret_periodic_rotation(self):
compliant_resources = []
non_compliant_resources = []
secrets = client.list_secrets()["SecretList"]
for secret in secrets:
for secret in self.secrets:
if secret.get("RotationEnabled") == True:
if 'LastRotatedDate' not in secret:
if "LastRotatedDate" not in secret:
non_compliant_resources.append(secret["ARN"])
continue
now = datetime.datetime.now(tz=tzlocal())
now = datetime.now(tz=tzlocal())
elapsed_time_after_rotation = now - secret["LastRotatedDate"]
if elapsed_time_after_rotation > datetime.timedelta(days=90):
if elapsed_time_after_rotation > timedelta(days=90):
non_compliant_resources.append(secret["ARN"])
else:
compliant_resources.append(secret["ARN"])
@ -78,3 +79,6 @@ def secretsmanager_secret_periodic_rotation():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = SecretsManagerRuleChecker

View File

@ -1,19 +1,19 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
import boto3
client = boto3.client("securityhub")
class SecurityHubRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("securityhub")
self.sts_client = boto3.client("sts")
sts_client = boto3.client("sts")
def securityhub_enabled():
def securityhub_enabled(self):
compliant_resources = []
non_compliant_resources = []
aws_account_id = sts_client.get_caller_identity()["Account"]
aws_account_id = self.sts_client.get_caller_identity()["Account"]
try:
hub = client.describe_hub()
hub = self.client.describe_hub()
compliant_resources.append(aws_account_id)
except Exception as e:
if e.__class__.__name__ == "InvalidAccessException":
@ -26,3 +26,6 @@ def securityhub_enabled():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = SecurityHubRuleChecker

View File

@ -1,17 +1,25 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("sns")
class SNSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("sns")
@cached_property
def topics(self):
topics = self.client.list_topics()["Topics"]
return [
self.client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
for topic in topics
]
def sns_encrypted_kms():
def sns_encrypted_kms(self):
compliant_resources = []
non_compliant_resources = []
topics = client.list_topics()["Topics"]
for topic in topics:
topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
for topic in self.topics:
if "KmsMasterKeyId" in topic:
compliant_resources.append(topic["TopicArn"])
else:
@ -23,19 +31,19 @@ def sns_encrypted_kms():
non_compliant_resources=non_compliant_resources,
)
def sns_topic_message_delivery_notification_enabled():
def sns_topic_message_delivery_notification_enabled(self):
compliant_resources = []
non_compliant_resources = []
topics = client.list_topics()["Topics"]
for topic in topics:
topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
for topic in self.topics:
notification_roles = [
attribute
for attribute in topic.keys()
if attribute.endswith("FeedbackRoleArn")
]
for key in topic.keys():
if key.endswith("FeedbackRoleArn") == True:
if notification_roles:
compliant_resources.append(topic["TopicArn"])
break
else:
non_compliant_resources.append(topic["TopicArn"])
@ -44,3 +52,6 @@ def sns_topic_message_delivery_notification_enabled():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = SNSRuleChecker

View File

@ -1,11 +0,0 @@
from models import RuleCheckResult
import boto3
# client = boto3.client("")
def required_tags():
return RuleCheckResult(
passed=False, compliant_resources=[], non_compliant_resources=[]
)

View File

@ -1,13 +1,18 @@
from models import RuleCheckResult
from pprint import pprint
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
ec2 = boto3.client("ec2")
class VPCRuleChecker(RuleChecker):
def __init__(self):
self.ec2 = boto3.client("ec2")
@cached_property
def security_group_rules(self):
return self.ec2.describe_security_group_rules()["SecurityGroupRules"]
def ec2_transit_gateway_auto_vpc_attach_disabled():
response = ec2.describe_transit_gateways()
def ec2_transit_gateway_auto_vpc_attach_disabled(self):
response = self.ec2.describe_transit_gateways()
non_compliant_resources = [
resource["TransitGatewayArn"]
@ -18,7 +23,12 @@ def ec2_transit_gateway_auto_vpc_attach_disabled():
]
compliant_resources = list(
set([resource["TransitGatewayArn"] for resource in response["TransitGateways"]])
set(
[
resource["TransitGatewayArn"]
for resource in response["TransitGateways"]
]
)
- set(non_compliant_resources)
)
@ -28,10 +38,7 @@ def ec2_transit_gateway_auto_vpc_attach_disabled():
non_compliant_resources=non_compliant_resources,
)
def restricted_ssh():
response = ec2.describe_security_group_rules()
def restricted_ssh(self):
non_compliant_resources = [
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in filter(
@ -39,7 +46,7 @@ def restricted_ssh():
and x["FromPort"] <= 22
and x["ToPort"] >= 22
and x.get("CidrIpv4") == "0.0.0.0/0",
response["SecurityGroupRules"],
self.security_group_rules,
)
]
@ -47,7 +54,7 @@ def restricted_ssh():
set(
[
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in response["SecurityGroupRules"]
for resource in self.security_group_rules
]
)
- set(non_compliant_resources)
@ -58,8 +65,7 @@ def restricted_ssh():
non_compliant_resources=non_compliant_resources,
)
def restricted_common_ports():
def restricted_common_ports(self):
common_ports = [
22, # SSH
80, # HTTP
@ -69,7 +75,6 @@ def restricted_common_ports():
6379, # Redis
11211, # Memcached
]
response = ec2.describe_security_group_rules()
non_compliant_resources = [
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
@ -78,14 +83,14 @@ def restricted_common_ports():
and x["FromPort"] in common_ports
and x["ToPort"] in common_ports
and x.get("PrefixListId") is None,
response["SecurityGroupRules"],
self.security_group_rules,
)
]
compliant_resources = list(
set(
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in response["SecurityGroupRules"]
for resource in self.security_group_rules
)
- set(non_compliant_resources)
)
@ -96,13 +101,14 @@ def restricted_common_ports():
non_compliant_resources=non_compliant_resources,
)
def subnet_auto_assign_public_ip_disabled():
response = ec2.describe_subnets()
def subnet_auto_assign_public_ip_disabled(self):
response = self.ec2.describe_subnets()
non_compliant_resources = [
resource["SubnetId"]
for resource in filter(lambda x: x["MapPublicIpOnLaunch"], response["Subnets"])
for resource in filter(
lambda x: x["MapPublicIpOnLaunch"], response["Subnets"]
)
]
compliant_resources = list(
@ -116,9 +122,8 @@ def subnet_auto_assign_public_ip_disabled():
non_compliant_resources=non_compliant_resources,
)
def vpc_default_security_group_closed():
response = ec2.describe_security_groups(
def vpc_default_security_group_closed(self):
response = self.ec2.describe_security_groups(
Filters=[{"Name": "group-name", "Values": ["default"]}]
)
@ -141,14 +146,13 @@ def vpc_default_security_group_closed():
non_compliant_resources=non_compliant_resources,
)
def vpc_flow_logs_enabled():
response = ec2.describe_flow_logs()
def vpc_flow_logs_enabled(self):
response = self.ec2.describe_flow_logs()
flow_log_enabled_vpcs = [
resource["ResourceId"] for resource in response["FlowLogs"]
]
response = ec2.describe_vpcs()
response = self.ec2.describe_vpcs()
non_compliant_resources = [
resource["VpcId"]
@ -168,13 +172,14 @@ def vpc_flow_logs_enabled():
non_compliant_resources=non_compliant_resources,
)
def vpc_network_acl_unused_check():
response = ec2.describe_network_acls()
def vpc_network_acl_unused_check(self):
response = self.ec2.describe_network_acls()
non_compliant_resources = [
resource["NetworkAclId"]
for resource in filter(lambda x: not x["Associations"], response["NetworkAcls"])
for resource in filter(
lambda x: not x["Associations"], response["NetworkAcls"]
)
]
compliant_resources = list(
@ -188,9 +193,8 @@ def vpc_network_acl_unused_check():
non_compliant_resources=non_compliant_resources,
)
def vpc_peering_dns_resolution_check():
response = ec2.describe_vpc_peering_connections()
def vpc_peering_dns_resolution_check(self):
response = self.ec2.describe_vpc_peering_connections()
non_compliant_resources = [
resource["VpcPeeringConnectionId"]
@ -223,10 +227,7 @@ def vpc_peering_dns_resolution_check():
non_compliant_resources=non_compliant_resources,
)
def vpc_sg_open_only_to_authorized_ports():
response = ec2.describe_security_group_rules()
def vpc_sg_open_only_to_authorized_ports(self):
authorized_port = [
# 80
]
@ -238,14 +239,14 @@ def vpc_sg_open_only_to_authorized_ports():
and (x.get("CidrIpv4") == "0.0.0.0/0" or x.get("CidrIpv6") == "::/0")
and x["FromPort"] not in authorized_port
and x["ToPort"] not in authorized_port,
response["SecurityGroupRules"],
self.security_group_rules,
)
]
compliant_resources = list(
set(
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in response["SecurityGroupRules"]
for resource in self.security_group_rules
)
- set(non_compliant_resources)
)
@ -255,3 +256,6 @@ def vpc_sg_open_only_to_authorized_ports():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = VPCRuleChecker

View File

@ -1,20 +1,48 @@
from models import RuleCheckResult
from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3
client = boto3.client("wafv2")
global_client = boto3.client("wafv2", region_name="us-east-1")
class WAFv2RuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("wafv2")
self.global_client = boto3.client("wafv2", region_name="us-east-1")
@cached_property
def regional_web_acls(self):
return self.client.list_web_acls(Scope="REGIONAL")["WebACLs"]
def wafv2_logging_enabled():
@cached_property
def cloudfront_web_acls(self):
return self.global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
@cached_property
def regional_rule_groups(self):
rule_groups = self.client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
return [
self.client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"]
for rule_group in rule_groups
]
@cached_property
def cloudfront_rule_groups(self):
rule_groups = self.global_client.list_rule_groups(Scope="CLOUDFRONT")[
"RuleGroups"
]
return [
self.global_client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"]
for rule_group in rule_groups
]
def wafv2_logging_enabled(self):
compliant_resources = []
non_compliant_resources = []
regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"]
cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
for web_acl in regional_web_acls:
for web_acl in self.regional_web_acls:
try:
configuration = client.get_logging_configuration(ResourceArn=web_acl["ARN"])
configuration = self.client.get_logging_configuration(
ResourceArn=web_acl["ARN"]
)
compliant_resources.append(web_acl["ARN"])
except Exception as e:
if e.__class__.__name__ == "WAFNonexistentItemException":
@ -22,9 +50,11 @@ def wafv2_logging_enabled():
else:
raise e
for web_acl in cloudfront_web_acls:
for web_acl in self.cloudfront_web_acls:
try:
configuration = global_client.get_logging_configuration(ResourceArn=web_acl["ARN"])
configuration = self.global_client.get_logging_configuration(
ResourceArn=web_acl["ARN"]
)
compliant_resources.append(web_acl["ARN"])
except Exception as e:
if e.__class__.__name__ == "WAFNonexistentItemException":
@ -38,24 +68,18 @@ def wafv2_logging_enabled():
non_compliant_resources=non_compliant_resources,
)
def wafv2_rulegroup_logging_enabled():
def wafv2_rulegroup_logging_enabled(self):
compliant_resources = []
non_compliant_resources = []
regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"]
for rule_group in regional_rule_groups:
configuration = client.get_rule_group(ARN=rule_group["ARN"])
if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
for rule_group in self.regional_rule_groups:
if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
compliant_resources.append(rule_group["ARN"])
else:
non_compliant_resources.append(rule_group["ARN"])
for rule_group in cloudfront_rule_groups:
configuration = global_client.get_rule_group(ARN=rule_group["ARN"])
if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
for rule_group in self.cloudfront_rule_groups:
if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
compliant_resources.append(rule_group["ARN"])
else:
non_compliant_resources.append(rule_group["ARN"])
@ -66,23 +90,18 @@ def wafv2_rulegroup_logging_enabled():
non_compliant_resources=non_compliant_resources,
)
def wafv2_rulegroup_not_empty():
def wafv2_rulegroup_not_empty(self):
compliant_resources = []
non_compliant_resources = []
regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"]
for rule_group in regional_rule_groups:
configuration = client.get_rule_group(ARN=rule_group["ARN"])
if len(configuration["RuleGroup"]["Rules"]) > 0:
for rule_group in self.regional_rule_groups:
if len(rule_group["Rules"]) > 0:
compliant_resources.append(rule_group["ARN"])
else:
non_compliant_resources.append(rule_group["ARN"])
for rule_group in cloudfront_rule_groups:
configuration = global_client.get_rule_group(ARN=rule_group["ARN"])
if len(configuration["RuleGroup"]["Rules"]) > 0:
for rule_group in self.cloudfront_rule_groups:
if len(rule_group["Rules"]) > 0:
compliant_resources.append(rule_group["ARN"])
else:
non_compliant_resources.append(rule_group["ARN"])
@ -93,21 +112,23 @@ def wafv2_rulegroup_not_empty():
non_compliant_resources=non_compliant_resources,
)
def wafv2_webacl_not_empty():
def wafv2_webacl_not_empty(self):
compliant_resources = []
non_compliant_resources = []
regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"]
cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
for web_acl in regional_web_acls:
response = client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL")
for web_acl in self.regional_web_acls:
response = self.client.get_web_acl(
Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL"
)
if len(response["WebACL"]["Rules"]) > 0:
compliant_resources.append(web_acl["ARN"])
else:
non_compliant_resources.append(web_acl["ARN"])
for web_acl in cloudfront_web_acls:
response = global_client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT")
for web_acl in self.cloudfront_web_acls:
response = self.global_client.get_web_acl(
Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT"
)
if len(response["WebACL"]["Rules"]) > 0:
compliant_resources.append(web_acl["ARN"])
else:
@ -118,3 +139,6 @@ def wafv2_webacl_not_empty():
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = WAFv2RuleChecker