Refactor to cache AWS resources
This commit is contained in:
parent
4854f11021
commit
fb94b40c23
17
bp-base.json
17
bp-base.json
@ -124,16 +124,16 @@
|
|||||||
"autoscaling-multiple-az": {
|
"autoscaling-multiple-az": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"level": 2
|
"level": 2
|
||||||
|
},
|
||||||
|
"autoscaling-launch-template": {
|
||||||
|
"enabled": true,
|
||||||
|
"level": 2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"EC2": {
|
"EC2": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"rules": {
|
"rules": {
|
||||||
"autoscaling-launch-template": {
|
|
||||||
"enabled": true,
|
|
||||||
"level": 2
|
|
||||||
},
|
|
||||||
"ec2-ebs-encryption-by-default": {
|
"ec2-ebs-encryption-by-default": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"level": 2
|
"level": 2
|
||||||
@ -432,15 +432,6 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Tags": {
|
|
||||||
"enabled": true,
|
|
||||||
"rules": {
|
|
||||||
"required-tags": {
|
|
||||||
"enabled": true,
|
|
||||||
"level": 2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"S3": {
|
"S3": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"rules": {
|
"rules": {
|
||||||
|
12
main.py
12
main.py
@ -2,6 +2,8 @@ from InquirerLib import prompt
|
|||||||
from InquirerLib.InquirerPy.utils import InquirerPyKeybindings
|
from InquirerLib.InquirerPy.utils import InquirerPyKeybindings
|
||||||
from InquirerLib.InquirerPy.base import Choice
|
from InquirerLib.InquirerPy.base import Choice
|
||||||
from colorama import Style, Fore
|
from colorama import Style, Fore
|
||||||
|
from datetime import datetime
|
||||||
|
from importlib import import_module
|
||||||
|
|
||||||
from utils import *
|
from utils import *
|
||||||
import services
|
import services
|
||||||
@ -40,12 +42,18 @@ def perform_bp_rules_check(bp):
|
|||||||
if service_name == "Lambda":
|
if service_name == "Lambda":
|
||||||
service_name = "_lambda"
|
service_name = "_lambda"
|
||||||
|
|
||||||
module = getattr(services, convert_snake_case(service_name))
|
now = datetime.now()
|
||||||
|
rule_checker = getattr(
|
||||||
|
services, convert_snake_case(service_name)
|
||||||
|
).rule_checker()
|
||||||
|
|
||||||
for rule_name, rule in service["rules"].items():
|
for rule_name, rule in service["rules"].items():
|
||||||
if not rule["enabled"]:
|
if not rule["enabled"]:
|
||||||
continue
|
continue
|
||||||
|
rule["result"] = rule_checker.check_rule(convert_snake_case(rule_name))
|
||||||
|
|
||||||
rule["result"] = getattr(module, convert_snake_case(rule_name))()
|
elapsed_time = datetime.now() - now
|
||||||
|
print(convert_snake_case(service_name), elapsed_time.total_seconds())
|
||||||
return bp
|
return bp
|
||||||
|
|
||||||
|
|
||||||
|
10
models.py
10
models.py
@ -1,4 +1,5 @@
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
from utils import convert_snake_case
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
@ -6,3 +7,12 @@ class RuleCheckResult(BaseModel):
|
|||||||
passed: bool
|
passed: bool
|
||||||
compliant_resources: List[str]
|
compliant_resources: List[str]
|
||||||
non_compliant_resources: List[str]
|
non_compliant_resources: List[str]
|
||||||
|
|
||||||
|
|
||||||
|
class RuleChecker:
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def check_rule(self, rule_name) -> RuleCheckResult:
|
||||||
|
check_func = getattr(self, convert_snake_case(rule_name))
|
||||||
|
return check_func()
|
||||||
|
@ -16,7 +16,6 @@ from . import (
|
|||||||
elasticache,
|
elasticache,
|
||||||
iam,
|
iam,
|
||||||
_lambda,
|
_lambda,
|
||||||
tags,
|
|
||||||
s3,
|
s3,
|
||||||
secrets_manager,
|
secrets_manager,
|
||||||
security_hub,
|
security_hub,
|
||||||
|
@ -1,91 +1,106 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
|
|
||||||
import boto3
|
import boto3
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("lambda")
|
class LambdaRuleChecker(RuleChecker):
|
||||||
iam_client = boto3.client("iam")
|
def __init__(self):
|
||||||
|
self.client = boto3.client("lambda")
|
||||||
|
self.iam_client = boto3.client("iam")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def functions(self):
|
||||||
|
return self.client.list_functions()["Functions"]
|
||||||
|
|
||||||
def lambda_dlq_check():
|
def lambda_dlq_check(self):
|
||||||
compliant_resource = []
|
compliant_resource = []
|
||||||
non_compliant_resources = []
|
non_compliant_resources = []
|
||||||
functions = client.list_functions()["Functions"]
|
|
||||||
|
|
||||||
for function in functions:
|
for function in self.functions:
|
||||||
if "DeadLetterConfig" in function:
|
if "DeadLetterConfig" in function:
|
||||||
compliant_resource.append(function["FunctionArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(function["FunctionArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def lambda_function_public_access_prohibited():
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
functions = client.list_functions()["Functions"]
|
|
||||||
|
|
||||||
for function in functions:
|
|
||||||
try:
|
|
||||||
policy = json.loads(client.get_policy(FunctionName=function["FunctionName"])["Policy"])
|
|
||||||
for statement in policy["Statement"]:
|
|
||||||
if statement["Principal"] in ["*", "", '{"AWS": ""}', '{"AWS": "*"}']:
|
|
||||||
non_compliant_resources.append(function["FunctionArn"])
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
compliant_resource.append(function["FunctionArn"])
|
compliant_resource.append(function["FunctionArn"])
|
||||||
except Exception as e:
|
else:
|
||||||
if e.__class__.__name__ == "ResourceNotFoundException":
|
non_compliant_resources.append(function["FunctionArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def lambda_function_public_access_prohibited(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for function in self.functions:
|
||||||
|
try:
|
||||||
|
policy = json.loads(
|
||||||
|
self.client.get_policy(FunctionName=function["FunctionName"])[
|
||||||
|
"Policy"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
for statement in policy["Statement"]:
|
||||||
|
if statement["Principal"] in [
|
||||||
|
"*",
|
||||||
|
"",
|
||||||
|
'{"AWS": ""}',
|
||||||
|
'{"AWS": "*"}',
|
||||||
|
]:
|
||||||
|
non_compliant_resources.append(function["FunctionArn"])
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
compliant_resource.append(function["FunctionArn"])
|
||||||
|
except Exception as e:
|
||||||
|
if e.__class__.__name__ == "ResourceNotFoundException":
|
||||||
|
non_compliant_resources.append(function["FunctionArn"])
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def lambda_function_settings_check(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
default_timeout = 3
|
||||||
|
default_memory_size = 128
|
||||||
|
|
||||||
|
for function in self.functions:
|
||||||
|
if (
|
||||||
|
function["Timeout"] == default_timeout
|
||||||
|
or function["MemorySize"] == default_memory_size
|
||||||
|
):
|
||||||
non_compliant_resources.append(function["FunctionArn"])
|
non_compliant_resources.append(function["FunctionArn"])
|
||||||
else:
|
else:
|
||||||
raise e
|
compliant_resource.append(function["FunctionArn"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resource,
|
compliant_resources=compliant_resource,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def lambda_inside_vpc(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for function in self.functions:
|
||||||
|
if "VpcConfig" in function:
|
||||||
|
compliant_resource.append(function["FunctionArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(function["FunctionArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def lambda_function_settings_check():
|
rule_checker = LambdaRuleChecker
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
functions = client.list_functions()["Functions"]
|
|
||||||
|
|
||||||
default_timeout = 3
|
|
||||||
default_memory_size = 128
|
|
||||||
|
|
||||||
for function in functions:
|
|
||||||
if function["Timeout"] == default_timeout or function["MemorySize"] == default_memory_size:
|
|
||||||
non_compliant_resources.append(function["FunctionArn"])
|
|
||||||
else:
|
|
||||||
compliant_resource.append(function["FunctionArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def lambda_inside_vpc():
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
functions = client.list_functions()["Functions"]
|
|
||||||
|
|
||||||
for function in functions:
|
|
||||||
if "VpcConfig" in function:
|
|
||||||
compliant_resource.append(function["FunctionArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(function["FunctionArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
239
services/alb.py
239
services/alb.py
@ -1,123 +1,150 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("elbv2")
|
class ALBRuleChecker(RuleChecker):
|
||||||
wafv2_client = boto3.client("wafv2")
|
def __init__(self):
|
||||||
|
self.client = boto3.client("elbv2")
|
||||||
|
self.wafv2_client = boto3.client("wafv2")
|
||||||
|
|
||||||
def alb_http_drop_invalid_header_enabled():
|
@cached_property
|
||||||
load_balancers = client.describe_load_balancers()
|
def load_balancers(self):
|
||||||
compliant_resource = []
|
return self.client.describe_load_balancers()["LoadBalancers"]
|
||||||
non_compliant_resources = []
|
|
||||||
for load_balancer in load_balancers['LoadBalancers']:
|
@cached_property
|
||||||
response = client.describe_load_balancer_attributes(
|
def load_balancer_attributes(self):
|
||||||
LoadBalancerArn=load_balancer['LoadBalancerArn']
|
responses = [
|
||||||
)
|
self.client.describe_load_balancer_attributes(
|
||||||
result = [
|
LoadBalancerArn=load_balancer["LoadBalancerArn"]
|
||||||
attribute
|
|
||||||
for attribute in filter(
|
|
||||||
lambda x: x['Key'] == "routing.http.drop_invalid_header_fields.enabled"
|
|
||||||
and x['Value'] == "true",
|
|
||||||
response['Attributes'],
|
|
||||||
)
|
)
|
||||||
|
for load_balancer in self.load_balancers
|
||||||
]
|
]
|
||||||
if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
|
return {
|
||||||
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
|
load_balancer["LoadBalancerArn"]: response
|
||||||
return RuleCheckResult(
|
for load_balancer, response in zip(self.load_balancers, responses)
|
||||||
passed=not non_compliant_resources,
|
}
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
def alb_http_drop_invalid_header_enabled(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def alb_waf_enabled():
|
for load_balancer in self.load_balancers:
|
||||||
load_balancers = client.describe_load_balancers()
|
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
|
||||||
compliant_resource = []
|
result = [
|
||||||
non_compliant_resources = []
|
attribute
|
||||||
for load_balancer in load_balancers['LoadBalancers']:
|
for attribute in filter(
|
||||||
response = wafv2_client.get_web_acl_for_resource(
|
lambda x: x["Key"]
|
||||||
ResourceArn=load_balancer['LoadBalancerArn']
|
== "routing.http.drop_invalid_header_fields.enabled"
|
||||||
|
and x["Value"] == "true",
|
||||||
|
response["Attributes"],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
if result:
|
||||||
|
compliant_resource.append(load_balancer["LoadBalancerArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
if 'WebACL' in response: compliant_resource.append(load_balancer['LoadBalancerArn'])
|
def alb_waf_enabled(self):
|
||||||
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
|
compliant_resource = []
|
||||||
return RuleCheckResult(
|
non_compliant_resources = []
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
for load_balancer in self.load_balancers:
|
||||||
def elb_cross_zone_load_balancing_enabled():
|
response = self.wafv2_client.get_web_acl_for_resource(
|
||||||
load_balancers = client.describe_load_balancers()
|
ResourceArn=load_balancer["LoadBalancerArn"]
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
for load_balancer in load_balancers['LoadBalancers']:
|
|
||||||
response = client.describe_load_balancer_attributes(
|
|
||||||
LoadBalancerArn=load_balancer['LoadBalancerArn']
|
|
||||||
)
|
|
||||||
result = [
|
|
||||||
attribute
|
|
||||||
for attribute in filter(
|
|
||||||
lambda x: x['Key'] == "load_balancing.cross_zone.enabled"
|
|
||||||
and x['Value'] == "true",
|
|
||||||
response['Attributes'],
|
|
||||||
)
|
)
|
||||||
]
|
|
||||||
if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
|
|
||||||
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
if "WebACL" in response:
|
||||||
def elb_deletion_protection_enabled():
|
compliant_resource.append(load_balancer["LoadBalancerArn"])
|
||||||
load_balancers = client.describe_load_balancers()
|
else:
|
||||||
compliant_resource = []
|
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
|
||||||
non_compliant_resources = []
|
return RuleCheckResult(
|
||||||
for load_balancer in load_balancers['LoadBalancers']:
|
passed=not non_compliant_resources,
|
||||||
response = client.describe_load_balancer_attributes(
|
compliant_resources=compliant_resource,
|
||||||
LoadBalancerArn=load_balancer['LoadBalancerArn']
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
result = [
|
|
||||||
attribute
|
|
||||||
for attribute in filter(
|
|
||||||
lambda x: x['Key'] == "deletion_protection.enabled"
|
|
||||||
and x['Value'] == "true",
|
|
||||||
response['Attributes'],
|
|
||||||
)
|
|
||||||
]
|
|
||||||
if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
|
|
||||||
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
def elb_cross_zone_load_balancing_enabled(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def elb_logging_enabled():
|
for load_balancer in self.load_balancers:
|
||||||
load_balancers = client.describe_load_balancers()
|
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
|
||||||
compliant_resource = []
|
result = [
|
||||||
non_compliant_resources = []
|
attribute
|
||||||
for load_balancer in load_balancers['LoadBalancers']:
|
for attribute in filter(
|
||||||
response = client.describe_load_balancer_attributes(
|
lambda x: x["Key"] == "load_balancing.cross_zone.enabled"
|
||||||
LoadBalancerArn=load_balancer['LoadBalancerArn']
|
and x["Value"] == "true",
|
||||||
|
response["Attributes"],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
if result:
|
||||||
|
compliant_resource.append(load_balancer["LoadBalancerArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
result = [
|
|
||||||
attribute
|
def elb_deletion_protection_enabled(self):
|
||||||
for attribute in filter(
|
compliant_resource = []
|
||||||
lambda x: x['Key'] == "access_logs.s3.enabled"
|
non_compliant_resources = []
|
||||||
and x['Value'] == "true",
|
|
||||||
response['Attributes'],
|
for load_balancer in self.load_balancers:
|
||||||
)
|
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
|
||||||
]
|
|
||||||
if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
|
result = [
|
||||||
else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
|
attribute
|
||||||
return RuleCheckResult(
|
for attribute in filter(
|
||||||
passed=not non_compliant_resources,
|
lambda x: x["Key"] == "deletion_protection.enabled"
|
||||||
compliant_resources=compliant_resource,
|
and x["Value"] == "true",
|
||||||
non_compliant_resources=non_compliant_resources,
|
response["Attributes"],
|
||||||
)
|
)
|
||||||
|
]
|
||||||
|
if result:
|
||||||
|
compliant_resource.append(load_balancer["LoadBalancerArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def elb_logging_enabled(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for load_balancer in self.load_balancers:
|
||||||
|
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
|
||||||
|
|
||||||
|
result = [
|
||||||
|
attribute
|
||||||
|
for attribute in filter(
|
||||||
|
lambda x: x["Key"] == "access_logs.s3.enabled"
|
||||||
|
and x["Value"] == "true",
|
||||||
|
response["Attributes"],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
if result:
|
||||||
|
compliant_resource.append(load_balancer["LoadBalancerArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = ALBRuleChecker
|
||||||
|
@ -1,189 +1,203 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
v1_client = boto3.client("apigateway")
|
class APIGatewayRuleChecker(RuleChecker):
|
||||||
v2_client = boto3.client("apigatewayv2")
|
def __init__(self):
|
||||||
|
self.v1_client = boto3.client("apigateway")
|
||||||
|
self.v2_client = boto3.client("apigatewayv2")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def http_apis(self):
|
||||||
|
return self.v2_client.get_apis()["Items"]
|
||||||
|
|
||||||
def api_gwv2_access_logs_enabled():
|
@cached_property
|
||||||
apis = v2_client.get_apis()
|
def rest_apis(self):
|
||||||
compliant_resources = []
|
return self.v1_client.get_rest_apis()["items"]
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for api in apis["Items"]:
|
@cached_property
|
||||||
stages = v2_client.get_stages(
|
def rest_api_stages(self):
|
||||||
ApiId=api["ApiId"],
|
responses = [
|
||||||
)
|
self.v1_client.get_stages(
|
||||||
|
restApiId=api["id"],
|
||||||
non_compliant_resources += [
|
|
||||||
f"{api['Name']} / {stage['StageName']}"
|
|
||||||
for stage in stages["Items"]
|
|
||||||
if "AccessLogSettings" not in stage
|
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources += list(
|
|
||||||
set([f"{api['Name']} / {stage['StageName']}" for stage in stages["Items"]])
|
|
||||||
- set(non_compliant_resources)
|
|
||||||
)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def api_gwv2_authorization_type_configured():
|
|
||||||
apis = v2_client.get_apis()
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for api in apis["Items"]:
|
|
||||||
response = v2_client.get_routes(
|
|
||||||
ApiId=api["ApiId"],
|
|
||||||
)
|
|
||||||
|
|
||||||
non_compliant_resources += [
|
|
||||||
f"{api['Name']} / {route['RouteKey']}"
|
|
||||||
for route in response["Items"]
|
|
||||||
if route["AuthorizationType"] == "NONE"
|
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources += list(
|
|
||||||
set([f"{api['Name']} / {route['RouteKey']}" for route in response["Items"]])
|
|
||||||
- set(non_compliant_resources)
|
|
||||||
)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def api_gw_associated_with_waf():
|
|
||||||
apis = v1_client.get_rest_apis()
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for api in apis["items"]:
|
|
||||||
stages = v1_client.get_stages(
|
|
||||||
restApiId=api["id"],
|
|
||||||
)
|
|
||||||
|
|
||||||
for stage in stages["item"]:
|
|
||||||
stage_arn = f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
|
||||||
|
|
||||||
if "webAclArn" in stage:
|
|
||||||
compliant_resources.append(stage_arn)
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(stage_arn)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def api_gw_cache_enabled_and_encrypted():
|
|
||||||
apis = v1_client.get_rest_apis()
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for api in apis["items"]:
|
|
||||||
stages = v1_client.get_stages(
|
|
||||||
restApiId=api["id"],
|
|
||||||
)
|
|
||||||
|
|
||||||
non_compliant_resources += [
|
|
||||||
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
|
||||||
for stage in stages["item"]
|
|
||||||
if not "*/*" in stage["methodSettings"]
|
|
||||||
or (
|
|
||||||
not stage["methodSettings"]["*/*"]["cachingEnabled"]
|
|
||||||
or not stage["methodSettings"]["*/*"]["cacheDataEncrypted"]
|
|
||||||
)
|
)
|
||||||
|
for api in self.rest_apis
|
||||||
]
|
]
|
||||||
compliant_resources += list(
|
return {api["id"]: response for api, response in zip(self.rest_apis, responses)}
|
||||||
set(
|
|
||||||
[
|
def api_gwv2_access_logs_enabled(self):
|
||||||
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
compliant_resources = []
|
||||||
for stage in stages["item"]
|
non_compliant_resources = []
|
||||||
]
|
|
||||||
|
for api in self.http_apis:
|
||||||
|
stages = self.v2_client.get_stages(
|
||||||
|
ApiId=api["ApiId"],
|
||||||
)
|
)
|
||||||
- set(non_compliant_resources)
|
|
||||||
)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
non_compliant_resources += [
|
||||||
passed=not non_compliant_resources,
|
f"{api['Name']} / {stage['StageName']}"
|
||||||
compliant_resources=compliant_resources,
|
for stage in stages["Items"]
|
||||||
non_compliant_resources=non_compliant_resources,
|
if "AccessLogSettings" not in stage
|
||||||
)
|
]
|
||||||
|
|
||||||
|
compliant_resources += list(
|
||||||
def api_gw_execution_logging_enabled():
|
set(
|
||||||
apis = v1_client.get_rest_apis()
|
[
|
||||||
compliant_resources = []
|
f"{api['Name']} / {stage['StageName']}"
|
||||||
non_compliant_resources = []
|
for stage in stages["Items"]
|
||||||
for api in apis["items"]:
|
]
|
||||||
stages = v1_client.get_stages(
|
)
|
||||||
restApiId=api["id"],
|
- set(non_compliant_resources)
|
||||||
)
|
|
||||||
|
|
||||||
non_compliant_resources += [
|
|
||||||
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
|
||||||
for stage in stages["item"]
|
|
||||||
if not "*/*" in stage["methodSettings"]
|
|
||||||
or (
|
|
||||||
not "loggingLevel" in stage["methodSettings"]["*/*"]
|
|
||||||
or stage["methodSettings"]["*/*"]["loggingLevel"] == "OFF"
|
|
||||||
)
|
)
|
||||||
]
|
|
||||||
compliant_resources += list(
|
return RuleCheckResult(
|
||||||
set(
|
passed=not non_compliant_resources,
|
||||||
[
|
compliant_resources=compliant_resources,
|
||||||
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
non_compliant_resources=non_compliant_resources,
|
||||||
for stage in stages["item"]
|
)
|
||||||
]
|
|
||||||
|
def api_gwv2_authorization_type_configured(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for api in self.http_apis:
|
||||||
|
response = self.v2_client.get_routes(
|
||||||
|
ApiId=api["ApiId"],
|
||||||
)
|
)
|
||||||
- set(non_compliant_resources)
|
|
||||||
)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
non_compliant_resources += [
|
||||||
passed=not non_compliant_resources,
|
f"{api['Name']} / {route['RouteKey']}"
|
||||||
compliant_resources=compliant_resources,
|
for route in response["Items"]
|
||||||
non_compliant_resources=non_compliant_resources,
|
if route["AuthorizationType"] == "NONE"
|
||||||
)
|
]
|
||||||
|
|
||||||
|
compliant_resources += list(
|
||||||
def api_gw_xray_enabled():
|
set(
|
||||||
apis = v1_client.get_rest_apis()
|
[
|
||||||
compliant_resources = []
|
f"{api['Name']} / {route['RouteKey']}"
|
||||||
non_compliant_resources = []
|
for route in response["Items"]
|
||||||
for api in apis["items"]:
|
]
|
||||||
stages = v1_client.get_stages(
|
)
|
||||||
restApiId=api["id"],
|
- set(non_compliant_resources)
|
||||||
)
|
|
||||||
|
|
||||||
non_compliant_resources += [
|
|
||||||
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
|
||||||
for stage in stages["item"]
|
|
||||||
if not stage["tracingEnabled"]
|
|
||||||
]
|
|
||||||
compliant_resources += list(
|
|
||||||
set(
|
|
||||||
[
|
|
||||||
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
|
||||||
for stage in stages["item"]
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
- set(non_compliant_resources)
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
return RuleCheckResult(
|
def api_gw_associated_with_waf(self):
|
||||||
passed=not non_compliant_resources,
|
compliant_resources = []
|
||||||
compliant_resources=compliant_resources,
|
non_compliant_resources = []
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
for api in self.rest_apis:
|
||||||
|
stages = self.rest_api_stages[api["id"]]
|
||||||
|
|
||||||
|
for stage in stages["item"]:
|
||||||
|
stage_arn = f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
||||||
|
|
||||||
|
if "webAclArn" in stage:
|
||||||
|
compliant_resources.append(stage_arn)
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(stage_arn)
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def api_gw_cache_enabled_and_encrypted(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for api in self.rest_apis:
|
||||||
|
stages = self.rest_api_stages[api["id"]]
|
||||||
|
|
||||||
|
non_compliant_resources += [
|
||||||
|
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
||||||
|
for stage in stages["item"]
|
||||||
|
if not "*/*" in stage["methodSettings"]
|
||||||
|
or (
|
||||||
|
not stage["methodSettings"]["*/*"]["cachingEnabled"]
|
||||||
|
or not stage["methodSettings"]["*/*"]["cacheDataEncrypted"]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
compliant_resources += list(
|
||||||
|
set(
|
||||||
|
[
|
||||||
|
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
||||||
|
for stage in stages["item"]
|
||||||
|
]
|
||||||
|
)
|
||||||
|
- set(non_compliant_resources)
|
||||||
|
)
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def api_gw_execution_logging_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
for api in self.rest_apis:
|
||||||
|
stages = self.rest_api_stages[api["id"]]
|
||||||
|
|
||||||
|
non_compliant_resources += [
|
||||||
|
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
||||||
|
for stage in stages["item"]
|
||||||
|
if not "*/*" in stage["methodSettings"]
|
||||||
|
or (
|
||||||
|
not "loggingLevel" in stage["methodSettings"]["*/*"]
|
||||||
|
or stage["methodSettings"]["*/*"]["loggingLevel"] == "OFF"
|
||||||
|
)
|
||||||
|
]
|
||||||
|
compliant_resources += list(
|
||||||
|
set(
|
||||||
|
[
|
||||||
|
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
||||||
|
for stage in stages["item"]
|
||||||
|
]
|
||||||
|
)
|
||||||
|
- set(non_compliant_resources)
|
||||||
|
)
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def api_gw_xray_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
for api in self.rest_apis:
|
||||||
|
stages = self.rest_api_stages[api["id"]]
|
||||||
|
|
||||||
|
non_compliant_resources += [
|
||||||
|
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
||||||
|
for stage in stages["item"]
|
||||||
|
if not stage["tracingEnabled"]
|
||||||
|
]
|
||||||
|
compliant_resources += list(
|
||||||
|
set(
|
||||||
|
[
|
||||||
|
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
|
||||||
|
for stage in stages["item"]
|
||||||
|
]
|
||||||
|
)
|
||||||
|
- set(non_compliant_resources)
|
||||||
|
)
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = APIGatewayRuleChecker
|
||||||
|
@ -1,41 +1,67 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("autoscaling")
|
class ASGRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("autoscaling")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def asgs(self):
|
||||||
|
return self.client.describe_auto_scaling_groups()["AutoScalingGroups"]
|
||||||
|
|
||||||
|
def autoscaling_group_elb_healthcheck_required(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for asg in self.asgs:
|
||||||
|
if (
|
||||||
|
asg["LoadBalancerNames"]
|
||||||
|
or asg["TargetGroupARNs"]
|
||||||
|
and asg["HealthCheckType"] != "ELB"
|
||||||
|
):
|
||||||
|
non_compliant_resources.append(asg["AutoScalingGroupARN"])
|
||||||
|
else:
|
||||||
|
compliant_resources.append(asg["AutoScalingGroupARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def autoscaling_multiple_az(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for asg in self.asgs:
|
||||||
|
if len(asg["AvailabilityZones"]) > 1:
|
||||||
|
compliant_resources.append(asg["AutoScalingGroupARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(asg["AutoScalingGroupARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def autoscaling_launch_template(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for asg in self.asgs:
|
||||||
|
if "LaunchConfigurationName" in asg:
|
||||||
|
non_compliant_resources.append(asg["AutoScalingGroupARN"])
|
||||||
|
else:
|
||||||
|
compliant_resources.append(asg["AutoScalingGroupARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def autoscaling_group_elb_healthcheck_required():
|
rule_checker = ASGRuleChecker
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"]
|
|
||||||
|
|
||||||
for asg in asgs:
|
|
||||||
if asg["LoadBalancerNames"] or asg["TargetGroupARNs"] and asg["HealthCheckType"] != "ELB":
|
|
||||||
non_compliant_resources.append(asg["AutoScalingGroupARN"])
|
|
||||||
else:
|
|
||||||
compliant_resources.append(asg["AutoScalingGroupARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def autoscaling_multiple_az():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"]
|
|
||||||
|
|
||||||
for asg in asgs:
|
|
||||||
if len(asg["AvailabilityZones"]) > 1:
|
|
||||||
compliant_resources.append(asg["AutoScalingGroupARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(asg["AutoScalingGroupARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
@ -1,138 +1,152 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("cloudfront")
|
class CloudFrontRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("cloudfront")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def distributions(self):
|
||||||
|
return self.client.list_distributions()["DistributionList"]["Items"]
|
||||||
|
|
||||||
def cloudfront_accesslogs_enabled():
|
@cached_property
|
||||||
compliant_resources = []
|
def distribution_details(self):
|
||||||
non_compliant_resources = []
|
responses = [
|
||||||
distributions = client.list_distributions()["DistributionList"]["Items"]
|
self.client.get_distribution(Id=distribution["Id"])["Distribution"]
|
||||||
|
for distribution in self.distributions
|
||||||
for distribution in distributions:
|
|
||||||
distribution = client.get_distribution(Id=distribution["Id"])["Distribution"]
|
|
||||||
if (
|
|
||||||
"Logging" in distribution["DistributionConfig"]
|
|
||||||
and distribution["DistributionConfig"]["Logging"]["Enabled"] == True
|
|
||||||
):
|
|
||||||
compliant_resources.append(distribution["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(distribution["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def cloudfront_associated_with_waf():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
distributions = client.list_distributions()["DistributionList"]["Items"]
|
|
||||||
|
|
||||||
for distribution in distributions:
|
|
||||||
if "WebACLId" in distribution and distribution["WebACLId"] != "":
|
|
||||||
compliant_resources.append(distribution["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(distribution["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def cloudfront_default_root_object_configured():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
distributions = client.list_distributions()["DistributionList"]["Items"]
|
|
||||||
|
|
||||||
for distribution in distributions:
|
|
||||||
distribution = client.get_distribution(Id=distribution["Id"])["Distribution"]
|
|
||||||
|
|
||||||
if distribution["DistributionConfig"]["DefaultRootObject"] != "":
|
|
||||||
compliant_resources.append(distribution["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(distribution["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def cloudfront_no_deprecated_ssl_protocols():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
distributions = client.list_distributions()["DistributionList"]["Items"]
|
|
||||||
|
|
||||||
for distribution in distributions:
|
|
||||||
for origin in distribution["Origins"]["Items"]:
|
|
||||||
if (
|
|
||||||
"CustomOriginConfig" in origin
|
|
||||||
and origin["CustomOriginConfig"]["OriginProtocolPolicy"] in ["https-only", "match-viewer"]
|
|
||||||
and "SSLv3" in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"]
|
|
||||||
):
|
|
||||||
|
|
||||||
non_compliant_resources.append(distribution["ARN"])
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
compliant_resources.append(distribution["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def cloudfront_s3_origin_access_control_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
distributions = client.list_distributions()["DistributionList"]
|
|
||||||
|
|
||||||
for distribution in distributions["Items"]:
|
|
||||||
for origin in distribution["Origins"]["Items"]:
|
|
||||||
if "S3OriginConfig" in origin and origin["OriginAccessControlId"] == "":
|
|
||||||
non_compliant_resources.append(distribution["ARN"])
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
compliant_resources.append(distribution["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def cloudfront_viewer_policy_https():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
distributions = client.list_distributions()["DistributionList"]["Items"]
|
|
||||||
|
|
||||||
for distribution in distributions:
|
|
||||||
if distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"] == "allow-all":
|
|
||||||
non_compliant_resources.append(distribution["ARN"])
|
|
||||||
continue
|
|
||||||
|
|
||||||
allow_alls = [
|
|
||||||
behavior
|
|
||||||
for behavior in distribution["CacheBehaviors"]["Items"]
|
|
||||||
if behavior["ViewerProtocolPolicy"] == "allow-all"
|
|
||||||
]
|
]
|
||||||
if allow_alls:
|
return {
|
||||||
non_compliant_resources.append(distribution["ARN"])
|
distribution["Id"]: response
|
||||||
continue
|
for distribution, response in zip(self.distributions, responses)
|
||||||
|
}
|
||||||
|
|
||||||
compliant_resources.append(distribution["ARN"])
|
def cloudfront_accesslogs_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
return RuleCheckResult(
|
for distribution in self.distributions:
|
||||||
passed=not non_compliant_resources,
|
distribution = self.distribution_details[distribution["Id"]]
|
||||||
compliant_resources=compliant_resources,
|
if (
|
||||||
non_compliant_resources=non_compliant_resources,
|
"Logging" in distribution["DistributionConfig"]
|
||||||
)
|
and distribution["DistributionConfig"]["Logging"]["Enabled"] == True
|
||||||
|
):
|
||||||
|
compliant_resources.append(distribution["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(distribution["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def cloudfront_associated_with_waf(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for distribution in self.distributions:
|
||||||
|
if "WebACLId" in distribution and distribution["WebACLId"] != "":
|
||||||
|
compliant_resources.append(distribution["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(distribution["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def cloudfront_default_root_object_configured(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for distribution in self.distributions:
|
||||||
|
distribution = self.distribution_details[distribution["Id"]]
|
||||||
|
|
||||||
|
if distribution["DistributionConfig"]["DefaultRootObject"] != "":
|
||||||
|
compliant_resources.append(distribution["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(distribution["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def cloudfront_no_deprecated_ssl_protocols(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for distribution in self.distributions:
|
||||||
|
for origin in distribution["Origins"]["Items"]:
|
||||||
|
if (
|
||||||
|
"CustomOriginConfig" in origin
|
||||||
|
and origin["CustomOriginConfig"]["OriginProtocolPolicy"]
|
||||||
|
in ["https-only", "match-viewer"]
|
||||||
|
and "SSLv3"
|
||||||
|
in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"]
|
||||||
|
):
|
||||||
|
|
||||||
|
non_compliant_resources.append(distribution["ARN"])
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
compliant_resources.append(distribution["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def cloudfront_s3_origin_access_control_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for distribution in self.distributions:
|
||||||
|
for origin in distribution["Origins"]["Items"]:
|
||||||
|
if "S3OriginConfig" in origin and origin["OriginAccessControlId"] == "":
|
||||||
|
non_compliant_resources.append(distribution["ARN"])
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
compliant_resources.append(distribution["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def cloudfront_viewer_policy_https(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for distribution in self.distributions:
|
||||||
|
if (
|
||||||
|
distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"]
|
||||||
|
== "allow-all"
|
||||||
|
):
|
||||||
|
non_compliant_resources.append(distribution["ARN"])
|
||||||
|
continue
|
||||||
|
|
||||||
|
allow_alls = [
|
||||||
|
behavior
|
||||||
|
for behavior in distribution["CacheBehaviors"]["Items"]
|
||||||
|
if behavior["ViewerProtocolPolicy"] == "allow-all"
|
||||||
|
]
|
||||||
|
if allow_alls:
|
||||||
|
non_compliant_resources.append(distribution["ARN"])
|
||||||
|
continue
|
||||||
|
|
||||||
|
compliant_resources.append(distribution["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = CloudFrontRuleChecker
|
||||||
|
@ -1,57 +1,60 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("cloudwatch")
|
class CloudWatchRuleChecker(RuleChecker):
|
||||||
logs_client = boto3.client("logs")
|
def __init__(self):
|
||||||
|
self.client = boto3.client("cloudwatch")
|
||||||
|
self.logs_client = boto3.client("logs")
|
||||||
|
|
||||||
|
def cw_loggroup_retention_period_check(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
log_groups = self.logs_client.describe_log_groups()["logGroups"]
|
||||||
|
|
||||||
|
# This rule should check if `retentionInDays` is less than n days.
|
||||||
|
# But, instead of that, this will check if the retention setting is set to "Never expire" or not
|
||||||
|
for log_group in log_groups:
|
||||||
|
if "retentionInDays" in log_group:
|
||||||
|
compliant_resources.append(log_group["logGroupArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(log_group["logGroupArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def cloudwatch_alarm_settings_check(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
alarms = self.client.describe_alarms()["MetricAlarms"]
|
||||||
|
parameters = {
|
||||||
|
"MetricName": "", # required
|
||||||
|
"Threshold": None,
|
||||||
|
"EvaluationPeriods": None,
|
||||||
|
"Period": None,
|
||||||
|
"ComparisonOperator": None,
|
||||||
|
"Statistic": None,
|
||||||
|
}
|
||||||
|
|
||||||
|
for alarm in alarms:
|
||||||
|
for check in [i for i in parameters.keys() if parameters[i] != None]:
|
||||||
|
if alarm["MetricName"] != parameters["MetricName"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if alarm[check] != parameters[check]:
|
||||||
|
non_compliant_resources.append(alarm["AlarmArn"])
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
compliant_resources.append(alarm["AlarmArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def cw_loggroup_retention_period_check():
|
rule_checker = CloudWatchRuleChecker
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
log_groups = logs_client.describe_log_groups()["logGroups"]
|
|
||||||
|
|
||||||
# This rule should check if `retentionInDays` is less than n days.
|
|
||||||
# But, instead of that, this will check if the retention setting is set to "Never expire" or not
|
|
||||||
for log_group in log_groups:
|
|
||||||
if "retentionInDays" in log_group:
|
|
||||||
compliant_resources.append(log_group["logGroupArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(log_group["logGroupArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def cloudwatch_alarm_settings_check():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
alarms = client.describe_alarms()["MetricAlarms"]
|
|
||||||
parameters = {
|
|
||||||
"MetricName": "", # required
|
|
||||||
"Threshold": None,
|
|
||||||
"EvaluationPeriods": None,
|
|
||||||
"Period": None,
|
|
||||||
"ComparisonOperator": None,
|
|
||||||
"Statistic": None,
|
|
||||||
}
|
|
||||||
|
|
||||||
for alarm in alarms:
|
|
||||||
for check in [i for i in parameters.keys() if parameters[i] != None]:
|
|
||||||
if alarm["MetricName"] != parameters["MetricName"]:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if alarm[check] != parameters[check]:
|
|
||||||
non_compliant_resources.append(alarm["AlarmArn"])
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
compliant_resources.append(alarm["AlarmArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
@ -1,75 +1,85 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
build_client = boto3.client("codebuild")
|
class CodeSeriesChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.build_client = boto3.client("codebuild")
|
||||||
|
self.deploy_client = boto3.client("codedeploy")
|
||||||
|
|
||||||
deploy_client = boto3.client("codedeploy")
|
@cached_property
|
||||||
|
def projects(self):
|
||||||
|
project_names = self.build_client.list_projects()["projects"]
|
||||||
|
return self.build_client.batch_get_projects(names=project_names)["projects"]
|
||||||
|
|
||||||
|
def codebuild_project_environment_privileged_check(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def codebuild_project_environment_privileged_check():
|
for project in self.projects:
|
||||||
compliant_resources = []
|
if not project["environment"]["privilegedMode"]:
|
||||||
non_compliant_resources = []
|
compliant_resources.append(project["arn"])
|
||||||
projects = build_client.list_projects()["projects"]
|
else:
|
||||||
|
non_compliant_resources.append(project["arn"])
|
||||||
|
|
||||||
for project in projects:
|
return RuleCheckResult(
|
||||||
project = build_client.batch_get_projects(names=[project])["projects"][0]
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
if not project["environment"]["privilegedMode"]:
|
def codebuild_project_logging_enabled(self):
|
||||||
compliant_resources.append(project["arn"])
|
compliant_resources = []
|
||||||
else:
|
non_compliant_resources = []
|
||||||
non_compliant_resources.append(project["arn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
for project in self.projects:
|
||||||
passed=not non_compliant_resources,
|
logs_config = project["logsConfig"]
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def codebuild_project_logging_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
projects = build_client.list_projects()["projects"]
|
|
||||||
|
|
||||||
for project in projects:
|
|
||||||
project = build_client.batch_get_projects(names=[project])["projects"][0]
|
|
||||||
logs_config = project["logsConfig"]
|
|
||||||
|
|
||||||
if logs_config["cloudWatchLogs"]["status"] == "ENABLED" or logs_config["s3Logs"]["status"] == "ENABLED":
|
|
||||||
compliant_resources.append(project["arn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(project["arn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def codedeploy_auto_rollback_monitor_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
applications = deploy_client.list_applications()["applications"]
|
|
||||||
|
|
||||||
for application in applications:
|
|
||||||
deployment_groups = deploy_client.list_deployment_groups(applicationName=application)["deploymentGroups"]
|
|
||||||
for deployment_group in deployment_groups:
|
|
||||||
deployment_group = deploy_client.get_deployment_group(
|
|
||||||
applicationName=application, deploymentGroupName=deployment_group
|
|
||||||
)["deploymentGroupInfo"]
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
deployment_group["alarmConfiguration"]["enabled"]
|
logs_config["cloudWatchLogs"]["status"] == "ENABLED"
|
||||||
and deployment_group["autoRollbackConfiguration"]["enabled"]
|
or logs_config["s3Logs"]["status"] == "ENABLED"
|
||||||
):
|
):
|
||||||
compliant_resources.append(deployment_group["deploymentGroupId"])
|
compliant_resources.append(project["arn"])
|
||||||
else:
|
else:
|
||||||
non_compliant_resources.append(deployment_group["deploymentGroupId"])
|
non_compliant_resources.append(project["arn"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def codedeploy_auto_rollback_monitor_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
applications = self.deploy_client.list_applications()["applications"]
|
||||||
|
for application in applications:
|
||||||
|
deployment_group_names = self.deploy_client.list_deployment_groups(
|
||||||
|
applicationName=application
|
||||||
|
)["deploymentGroups"]
|
||||||
|
deployment_groups = self.deploy_client.batch_get_deployment_groups(
|
||||||
|
applicationName=application, deploymentGroupNames=deployment_group_names
|
||||||
|
)["deploymentGroupsInfo"]
|
||||||
|
|
||||||
|
for deployment_group in deployment_groups:
|
||||||
|
|
||||||
|
if (
|
||||||
|
deployment_group["alarmConfiguration"]["enabled"]
|
||||||
|
and deployment_group["autoRollbackConfiguration"]["enabled"]
|
||||||
|
):
|
||||||
|
compliant_resources.append(deployment_group["deploymentGroupId"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(
|
||||||
|
deployment_group["deploymentGroupId"]
|
||||||
|
)
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = CodeSeriesChecker
|
||||||
|
@ -1,153 +1,161 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
import datetime
|
from functools import cached_property
|
||||||
|
from datetime import datetime, timedelta
|
||||||
from dateutil.tz import tzlocal
|
from dateutil.tz import tzlocal
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("dynamodb")
|
class DynamoDBRuleChecker(RuleChecker):
|
||||||
backup_client = boto3.client("backup")
|
def __init__(self):
|
||||||
autoscaling_client = boto3.client("application-autoscaling")
|
self.client = boto3.client("dynamodb")
|
||||||
|
self.backup_client = boto3.client("backup")
|
||||||
|
self.autoscaling_client = boto3.client("application-autoscaling")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
def dynamodb_autoscaling_enabled():
|
def tables(self):
|
||||||
compliant_resources = []
|
table_names = self.client.list_tables()["TableNames"]
|
||||||
non_compliant_resources = []
|
return [
|
||||||
table_names = client.list_tables()["TableNames"]
|
self.client.describe_table(TableName=table_name)["Table"]
|
||||||
|
for table_name in table_names
|
||||||
for table_name in table_names:
|
|
||||||
table = client.describe_table(TableName=table_name)["Table"]
|
|
||||||
|
|
||||||
if table.get("BillingModeSummary", {}).get("BillingMode") == "PAY_PER_REQUEST":
|
|
||||||
compliant_resources.append(table["TableArn"])
|
|
||||||
continue
|
|
||||||
|
|
||||||
scaling_policies = autoscaling_client.describe_scaling_policies(
|
|
||||||
ServiceNamespace="dynamodb", ResourceId=f"table/{table_name}"
|
|
||||||
)["ScalingPolicies"]
|
|
||||||
scaling_policy_dimensions = [i["ScalableDimension"] for i in scaling_policies]
|
|
||||||
if (
|
|
||||||
"dynamodb:table:ReadCapacityUnits" in scaling_policy_dimensions
|
|
||||||
and "dynamodb:table:WriteCapacityUnits" in scaling_policy_dimensions
|
|
||||||
):
|
|
||||||
compliant_resources.append(table["TableArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(table["TableArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def dynamodb_last_backup_recovery_point_created():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
table_names = client.list_tables()["TableNames"]
|
|
||||||
|
|
||||||
for table_name in table_names:
|
|
||||||
table = client.describe_table(TableName=table_name)["Table"]
|
|
||||||
recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=table["TableArn"])[
|
|
||||||
"RecoveryPoints"
|
|
||||||
]
|
]
|
||||||
recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points])
|
|
||||||
|
|
||||||
if len(recovery_point_creation_dates) == 0:
|
def dynamodb_autoscaling_enabled(self):
|
||||||
non_compliant_resources.append(table["TableArn"])
|
compliant_resources = []
|
||||||
continue
|
non_compliant_resources = []
|
||||||
|
|
||||||
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1):
|
for table in self.tables:
|
||||||
compliant_resources.append(table["TableArn"])
|
if (
|
||||||
else:
|
table.get("BillingModeSummary", {}).get("BillingMode")
|
||||||
non_compliant_resources.append(table["TableArn"])
|
== "PAY_PER_REQUEST"
|
||||||
|
):
|
||||||
|
compliant_resources.append(table["TableArn"])
|
||||||
|
continue
|
||||||
|
|
||||||
return RuleCheckResult(
|
scaling_policies = self.autoscaling_client.describe_scaling_policies(
|
||||||
passed=not non_compliant_resources,
|
ServiceNamespace="dynamodb", ResourceId=f"table/{table['TableName']}"
|
||||||
compliant_resources=compliant_resources,
|
)["ScalingPolicies"]
|
||||||
non_compliant_resources=non_compliant_resources,
|
scaling_policy_dimensions = [
|
||||||
)
|
policy["ScalableDimension"] for policy in scaling_policies
|
||||||
|
]
|
||||||
|
|
||||||
|
if (
|
||||||
|
"dynamodb:table:ReadCapacityUnits" in scaling_policy_dimensions
|
||||||
|
and "dynamodb:table:WriteCapacityUnits" in scaling_policy_dimensions
|
||||||
|
):
|
||||||
|
compliant_resources.append(table["TableArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(table["TableArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def dynamodb_last_backup_recovery_point_created(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for table in self.tables:
|
||||||
|
recovery_points = self.backup_client.list_recovery_points_by_resource(
|
||||||
|
ResourceArn=table["TableArn"]
|
||||||
|
)["RecoveryPoints"]
|
||||||
|
if not recovery_points:
|
||||||
|
non_compliant_resources.append(table["TableArn"])
|
||||||
|
continue
|
||||||
|
|
||||||
|
latest_recovery_point = sorted(
|
||||||
|
[recovery_point["CreationDate"] for recovery_point in recovery_points]
|
||||||
|
)[-1]
|
||||||
|
|
||||||
|
if datetime.now(tz=tzlocal()) - latest_recovery_point > timedelta(days=1):
|
||||||
|
non_compliant_resources.append(table["TableArn"])
|
||||||
|
else:
|
||||||
|
compliant_resources.append(table["TableArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def dynamodb_pitr_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for table in self.tables:
|
||||||
|
backup = self.client.describe_continuous_backups(
|
||||||
|
TableName=table["TableName"]
|
||||||
|
)["ContinuousBackupsDescription"]
|
||||||
|
|
||||||
|
if (
|
||||||
|
backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"]
|
||||||
|
== "ENABLED"
|
||||||
|
):
|
||||||
|
compliant_resources.append(table["TableArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(table["TableArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def dynamodb_table_deletion_protection_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for table in self.tables:
|
||||||
|
if table["DeletionProtectionEnabled"] == True:
|
||||||
|
compliant_resources.append(table["TableArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(table["TableArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def dynamodb_table_encrypted_kms(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for table in self.tables:
|
||||||
|
if (
|
||||||
|
"SSEDescription" in table
|
||||||
|
and table["SSEDescription"]["Status"] == "ENABLED"
|
||||||
|
and table["SSEDescription"]["SSEType"] == "KMS"
|
||||||
|
):
|
||||||
|
compliant_resources.append(table["TableArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(table["TableArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def dynamodb_table_encryption_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for table in self.tables:
|
||||||
|
if (
|
||||||
|
"SSEDescription" in table
|
||||||
|
and table["SSEDescription"]["Status"] == "ENABLED"
|
||||||
|
):
|
||||||
|
compliant_resources.append(table["TableArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(table["TableArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def dynamodb_pitr_enabled():
|
rule_checker = DynamoDBRuleChecker
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
table_names = client.list_tables()["TableNames"]
|
|
||||||
|
|
||||||
for table_name in table_names:
|
|
||||||
backup = client.describe_continuous_backups(TableName=table_name)["ContinuousBackupsDescription"]
|
|
||||||
table = client.describe_table(TableName=table_name)["Table"]
|
|
||||||
|
|
||||||
if backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"] == "ENABLED":
|
|
||||||
compliant_resources.append(table["TableArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(table["TableArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def dynamodb_table_deletion_protection_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
table_names = client.list_tables()["TableNames"]
|
|
||||||
|
|
||||||
for table_name in table_names:
|
|
||||||
table = client.describe_table(TableName=table_name)["Table"]
|
|
||||||
|
|
||||||
if table["DeletionProtectionEnabled"] == True:
|
|
||||||
compliant_resources.append(table["TableArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(table["TableArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def dynamodb_table_encrypted_kms():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
table_names = client.list_tables()["TableNames"]
|
|
||||||
|
|
||||||
for table_name in table_names:
|
|
||||||
table = client.describe_table(TableName=table_name)["Table"]
|
|
||||||
|
|
||||||
if (
|
|
||||||
"SSEDescription" in table
|
|
||||||
and table["SSEDescription"]["Status"] == "ENABLED"
|
|
||||||
and table["SSEDescription"]["SSEType"] == "KMS"
|
|
||||||
):
|
|
||||||
compliant_resources.append(table["TableArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(table["TableArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def dynamodb_table_encryption_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
table_names = client.list_tables()["TableNames"]
|
|
||||||
|
|
||||||
for table_name in table_names:
|
|
||||||
table = client.describe_table(TableName=table_name)["Table"]
|
|
||||||
|
|
||||||
if "SSEDescription" in table and table["SSEDescription"]["Status"] == "ENABLED":
|
|
||||||
compliant_resources.append(table["TableArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(table["TableArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
236
services/ec2.py
236
services/ec2.py
@ -1,192 +1,158 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("ec2")
|
class EC2RuleChecker(RuleChecker):
|
||||||
autoscaling_client = boto3.client("autoscaling")
|
def __init__(self):
|
||||||
ssm_client = boto3.client("ssm")
|
self.client = boto3.client("ec2")
|
||||||
|
self.ssm_client = boto3.client("ssm")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def instances(self):
|
||||||
|
valid_instances = [
|
||||||
|
instance
|
||||||
|
for reservation in self.client.describe_instances()["Reservations"]
|
||||||
|
for instance in reservation["Instances"]
|
||||||
|
if instance["State"]["Name"] != "terminated"
|
||||||
|
]
|
||||||
|
return valid_instances
|
||||||
|
|
||||||
def autoscaling_launch_template():
|
def ec2_ebs_encryption_by_default(self):
|
||||||
compliant_resources = []
|
compliant_resources = []
|
||||||
non_compliant_resources = []
|
non_compliant_resources = []
|
||||||
asgs = autoscaling_client.describe_auto_scaling_groups()["AutoScalingGroups"]
|
|
||||||
|
|
||||||
for asg in asgs:
|
volumes = self.client.describe_volumes()["Volumes"]
|
||||||
if "LaunchConfigurationName" in asg:
|
for volume in volumes:
|
||||||
non_compliant_resources.append(asg["AutoScalingGroupARN"])
|
if volume["Encrypted"]:
|
||||||
else:
|
compliant_resources.append(volume["VolumeId"])
|
||||||
compliant_resources.append(asg["AutoScalingGroupARN"])
|
else:
|
||||||
|
non_compliant_resources.append(volume["VolumeId"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ec2_imdsv2_check(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ec2_ebs_encryption_by_default():
|
for instance in self.instances:
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
ebses = client.describe_volumes()["Volumes"]
|
|
||||||
|
|
||||||
for ebs in ebses:
|
|
||||||
if ebs["Encrypted"] == True:
|
|
||||||
compliant_resources.append(ebs["VolumeId"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(ebs["VolumeId"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def ec2_imdsv2_check():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
reservations = client.describe_instances()["Reservations"]
|
|
||||||
|
|
||||||
for reservation in reservations:
|
|
||||||
for instance in reservation["Instances"]:
|
|
||||||
if instance["State"]["Name"] == "terminated":
|
|
||||||
continue
|
|
||||||
if instance["MetadataOptions"]["HttpTokens"] == "required":
|
if instance["MetadataOptions"]["HttpTokens"] == "required":
|
||||||
compliant_resources.append(instance["InstanceId"])
|
compliant_resources.append(instance["InstanceId"])
|
||||||
else:
|
else:
|
||||||
non_compliant_resources.append(instance["InstanceId"])
|
non_compliant_resources.append(instance["InstanceId"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ec2_instance_detailed_monitoring_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ec2_instance_detailed_monitoring_enabled():
|
for instance in self.instances:
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
reservations = client.describe_instances()["Reservations"]
|
|
||||||
|
|
||||||
for reservation in reservations:
|
|
||||||
for instance in reservation["Instances"]:
|
|
||||||
if instance["State"]["Name"] == "terminated":
|
|
||||||
continue
|
|
||||||
if instance["Monitoring"]["State"] == "enabled":
|
if instance["Monitoring"]["State"] == "enabled":
|
||||||
compliant_resources.append(instance["InstanceId"])
|
compliant_resources.append(instance["InstanceId"])
|
||||||
else:
|
else:
|
||||||
non_compliant_resources.append(instance["InstanceId"])
|
non_compliant_resources.append(instance["InstanceId"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ec2_instance_managed_by_systems_manager(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ec2_instance_managed_by_systems_manager():
|
informations = self.ssm_client.describe_instance_information()[
|
||||||
compliant_resources = []
|
"InstanceInformationList"
|
||||||
non_compliant_resources = []
|
]
|
||||||
reservations = client.describe_instances()["Reservations"]
|
managed_instance_ids = [
|
||||||
informations = ssm_client.describe_instance_information()["InstanceInformationList"]
|
info["InstanceId"] for info in informations if info["PingStatus"]
|
||||||
managed_instance_ids = [i["InstanceId"] for i in informations if i["PingStatus"]]
|
]
|
||||||
|
|
||||||
for reservation in reservations:
|
for instance in self.instances:
|
||||||
for instance in reservation["Instances"]:
|
|
||||||
if instance["State"]["Name"] == "terminated":
|
|
||||||
continue
|
|
||||||
if instance["InstanceId"] in managed_instance_ids:
|
if instance["InstanceId"] in managed_instance_ids:
|
||||||
compliant_resources.append(instance["InstanceId"])
|
compliant_resources.append(instance["InstanceId"])
|
||||||
else:
|
else:
|
||||||
non_compliant_resources.append(instance["InstanceId"])
|
non_compliant_resources.append(instance["InstanceId"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ec2_instance_profile_attached(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ec2_instance_profile_attached():
|
for instance in self.instances:
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
reservations = client.describe_instances()["Reservations"]
|
|
||||||
|
|
||||||
for reservation in reservations:
|
|
||||||
for instance in reservation["Instances"]:
|
|
||||||
if instance["State"]["Name"] == "terminated":
|
|
||||||
continue
|
|
||||||
if "IamInstanceProfile" in instance:
|
if "IamInstanceProfile" in instance:
|
||||||
compliant_resources.append(instance["InstanceId"])
|
compliant_resources.append(instance["InstanceId"])
|
||||||
else:
|
else:
|
||||||
non_compliant_resources.append(instance["InstanceId"])
|
non_compliant_resources.append(instance["InstanceId"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ec2_no_amazon_key_pair(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ec2_no_amazon_key_pair():
|
for instance in self.instances:
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
reservations = client.describe_instances()["Reservations"]
|
|
||||||
|
|
||||||
for reservation in reservations:
|
|
||||||
for instance in reservation["Instances"]:
|
|
||||||
if instance["State"]["Name"] == "terminated":
|
|
||||||
continue
|
|
||||||
if "KeyName" in instance:
|
if "KeyName" in instance:
|
||||||
non_compliant_resources.append(instance["InstanceId"])
|
non_compliant_resources.append(instance["InstanceId"])
|
||||||
else:
|
else:
|
||||||
compliant_resources.append(instance["InstanceId"])
|
compliant_resources.append(instance["InstanceId"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ec2_stopped_instance(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ec2_stopped_instance():
|
for instance in self.instances:
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
reservations = client.describe_instances()["Reservations"]
|
|
||||||
|
|
||||||
for reservation in reservations:
|
|
||||||
for instance in reservation["Instances"]:
|
|
||||||
if instance["State"]["Name"] == "terminated":
|
|
||||||
continue
|
|
||||||
if instance["State"]["Name"] != "stopped":
|
if instance["State"]["Name"] != "stopped":
|
||||||
compliant_resources.append(instance["InstanceId"])
|
compliant_resources.append(instance["InstanceId"])
|
||||||
else:
|
else:
|
||||||
non_compliant_resources.append(instance["InstanceId"])
|
non_compliant_resources.append(instance["InstanceId"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ec2_token_hop_limit_check(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ec2_token_hop_limit_check():
|
for instance in self.instances:
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
reservations = client.describe_instances()["Reservations"]
|
|
||||||
|
|
||||||
for reservation in reservations:
|
|
||||||
for instance in reservation["Instances"]:
|
|
||||||
if instance["State"]["Name"] == "terminated":
|
|
||||||
continue
|
|
||||||
if instance["MetadataOptions"]["HttpPutResponseHopLimit"] < 2:
|
if instance["MetadataOptions"]["HttpPutResponseHopLimit"] < 2:
|
||||||
compliant_resources.append(instance["InstanceId"])
|
compliant_resources.append(instance["InstanceId"])
|
||||||
else:
|
else:
|
||||||
non_compliant_resources.append(instance["InstanceId"])
|
non_compliant_resources.append(instance["InstanceId"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = EC2RuleChecker
|
||||||
|
151
services/ecr.py
151
services/ecr.py
@ -1,85 +1,86 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
import botocore
|
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("ecr")
|
class ECRRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("ecr")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def repositories(self):
|
||||||
|
return self.client.describe_repositories()["repositories"]
|
||||||
|
|
||||||
def ecr_private_image_scanning_enabled():
|
def ecr_private_image_scanning_enabled(self):
|
||||||
repositories = client.describe_repositories()
|
compliant_resource = []
|
||||||
compliant_resource = []
|
non_compliant_resources = []
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for repository in repositories["repositories"]:
|
for repository in self.repositories:
|
||||||
if repository["imageScanningConfiguration"]["scanOnPush"] == True:
|
if repository["imageScanningConfiguration"]["scanOnPush"] == True:
|
||||||
compliant_resource.append(repository["repositoryArn"])
|
compliant_resource.append(repository["repositoryArn"])
|
||||||
else:
|
|
||||||
non_compliant_resources.append(repository["repositoryArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def ecr_private_lifecycle_policy_configured():
|
|
||||||
repositories = client.describe_repositories()
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for repository in repositories["repositories"]:
|
|
||||||
try:
|
|
||||||
response = client.get_lifecycle_policy(
|
|
||||||
registryId=repository["registryId"],
|
|
||||||
repositoryName=repository["repositoryName"],
|
|
||||||
)
|
|
||||||
compliant_resource.append(repository["repositoryArn"])
|
|
||||||
except Exception as e:
|
|
||||||
if e.__class__.__name__ == "LifecyclePolicyNotFoundException":
|
|
||||||
non_compliant_resources.append(repository["repositoryArn"])
|
|
||||||
else:
|
else:
|
||||||
raise e
|
non_compliant_resources.append(repository["repositoryArn"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resource,
|
compliant_resources=compliant_resource,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ecr_private_lifecycle_policy_configured(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for repository in self.repositories:
|
||||||
|
try:
|
||||||
|
response = self.client.get_lifecycle_policy(
|
||||||
|
registryId=repository["registryId"],
|
||||||
|
repositoryName=repository["repositoryName"],
|
||||||
|
)
|
||||||
|
compliant_resource.append(repository["repositoryArn"])
|
||||||
|
except Exception as e:
|
||||||
|
if e.__class__.__name__ == "LifecyclePolicyNotFoundException":
|
||||||
|
non_compliant_resources.append(repository["repositoryArn"])
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def ecr_private_tag_immutability_enabled(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for repository in self.repositories:
|
||||||
|
if repository["imageTagMutability"] == "IMMUTABLE":
|
||||||
|
compliant_resource.append(repository["repositoryArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(repository["repositoryArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def ecr_kms_encryption_1(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for repository in self.repositories:
|
||||||
|
if repository["encryptionConfiguration"]["encryptionType"] == "KMS":
|
||||||
|
compliant_resource.append(repository["repositoryArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(repository["repositoryArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def ecr_private_tag_immutability_enabled():
|
rule_checker = ECRRuleChecker
|
||||||
repositories = client.describe_repositories()
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for repository in repositories["repositories"]:
|
|
||||||
if repository["imageTagMutability"] == "IMMUTABLE":
|
|
||||||
compliant_resource.append(repository["repositoryArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(repository["repositoryArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def ecr_kms_encryption_1():
|
|
||||||
repositories = client.describe_repositories()
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for repository in repositories["repositories"]:
|
|
||||||
if repository["encryptionConfiguration"]["encryptionType"] == "KMS":
|
|
||||||
compliant_resource.append(repository["repositoryArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(repository["repositoryArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
339
services/ecs.py
339
services/ecs.py
@ -1,219 +1,222 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("ecs")
|
class ECSRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("ecs")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def task_definitions(self):
|
||||||
|
task_definition_arns = self.client.list_task_definitions(status="ACTIVE")[
|
||||||
|
"taskDefinitionArns"
|
||||||
|
]
|
||||||
|
latest_task_definitions = {}
|
||||||
|
|
||||||
def ecs_awsvpc_networking_enabled():
|
# Filter latest task definition arns
|
||||||
compliant_resources = []
|
for task_definition_arn in task_definition_arns:
|
||||||
non_compliant_resources = []
|
family, revision = task_definition_arn.rsplit(":", 1)
|
||||||
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
|
latest_task_definitions[family] = max(
|
||||||
latest_task_definitions = {}
|
latest_task_definitions.get(family, 0), int(revision)
|
||||||
|
)
|
||||||
|
|
||||||
for task_definition in task_definitions:
|
# Fetch latest task definition details
|
||||||
family, revision = task_definition.rsplit(":", 1)
|
task_definitions = [
|
||||||
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
|
self.client.describe_task_definition(taskDefinition=f"{family}:{revision}")[
|
||||||
|
"taskDefinition"
|
||||||
|
]
|
||||||
|
for family, revision in latest_task_definitions.items()
|
||||||
|
]
|
||||||
|
|
||||||
for family, revision in latest_task_definitions.items():
|
return task_definitions
|
||||||
task_definition_arn = f"{family}:{revision}"
|
|
||||||
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
|
|
||||||
|
|
||||||
if task_definition.get("networkMode") == "awsvpc":
|
@cached_property
|
||||||
compliant_resources.append(task_definition["taskDefinitionArn"])
|
def clusters(self):
|
||||||
else:
|
return self.client.describe_clusters(include=["SETTINGS"])["clusters"]
|
||||||
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
@cached_property
|
||||||
passed=not non_compliant_resources,
|
def services(self):
|
||||||
compliant_resources=compliant_resources,
|
services = []
|
||||||
non_compliant_resources=non_compliant_resources,
|
for cluster in self.clusters:
|
||||||
)
|
service_arns = self.client.list_services(
|
||||||
|
cluster=cluster["clusterArn"], launchType="FARGATE"
|
||||||
|
)["serviceArns"]
|
||||||
|
services += self.client.describe_services(
|
||||||
|
cluster=cluster["clusterArn"], services=service_arns
|
||||||
|
)["services"]
|
||||||
|
return services
|
||||||
|
|
||||||
|
def ecs_awsvpc_networking_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ecs_containers_nonprivileged():
|
for task_definition in self.task_definitions:
|
||||||
compliant_resources = []
|
if task_definition.get("networkMode") == "awsvpc":
|
||||||
non_compliant_resources = []
|
compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
|
else:
|
||||||
latest_task_definitions = {}
|
|
||||||
|
|
||||||
for task_definition in task_definitions:
|
|
||||||
family, revision = task_definition.rsplit(":", 1)
|
|
||||||
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
|
|
||||||
|
|
||||||
for family, revision in latest_task_definitions.items():
|
|
||||||
task_definition_arn = f"{family}:{revision}"
|
|
||||||
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
|
|
||||||
containers = task_definition["containerDefinitions"]
|
|
||||||
|
|
||||||
for container in containers:
|
|
||||||
if container.get("privileged"):
|
|
||||||
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
break
|
|
||||||
else:
|
|
||||||
compliant_resources.append(task_definition["taskDefinitionArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ecs_containers_nonprivileged(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ecs_containers_readonly_access():
|
for task_definition in self.task_definitions:
|
||||||
compliant_resources = []
|
containers = task_definition["containerDefinitions"]
|
||||||
non_compliant_resources = []
|
privileged_containers = [
|
||||||
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
|
container for container in containers if container.get("privileged")
|
||||||
latest_task_definitions = {}
|
]
|
||||||
|
|
||||||
for task_definition in task_definitions:
|
if privileged_containers:
|
||||||
family, revision = task_definition.rsplit(":", 1)
|
|
||||||
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
|
|
||||||
|
|
||||||
for family, revision in latest_task_definitions.items():
|
|
||||||
task_definition_arn = f"{family}:{revision}"
|
|
||||||
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
|
|
||||||
containers = task_definition["containerDefinitions"]
|
|
||||||
|
|
||||||
for container in containers:
|
|
||||||
if not container.get("readonlyRootFilesystem"):
|
|
||||||
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
break
|
else:
|
||||||
else:
|
compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
compliant_resources.append(task_definition["taskDefinitionArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ecs_containers_readonly_access(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ecs_container_insights_enabled():
|
for task_definition in self.task_definitions:
|
||||||
compliant_resources = []
|
containers = task_definition["containerDefinitions"]
|
||||||
non_compliant_resources = []
|
not_readonly_containers = [
|
||||||
|
container
|
||||||
|
for container in containers
|
||||||
|
if not container.get("readonlyRootFilesystem")
|
||||||
|
]
|
||||||
|
|
||||||
clusters = client.describe_clusters(include=["SETTINGS"])["clusters"]
|
if not_readonly_containers:
|
||||||
|
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
|
else:
|
||||||
|
compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
|
|
||||||
for cluster in clusters:
|
return RuleCheckResult(
|
||||||
container_insights_setting = [setting for setting in cluster["settings"] if setting["name"] == "containerInsights"]
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
if container_insights_setting and container_insights_setting[0]["value"] == "enabled":
|
def ecs_container_insights_enabled(self):
|
||||||
compliant_resources.append(cluster["clusterArn"])
|
compliant_resources = []
|
||||||
else:
|
non_compliant_resources = []
|
||||||
non_compliant_resources.append(cluster["clusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
for cluster in self.clusters:
|
||||||
passed=not non_compliant_resources,
|
container_insights_setting = [
|
||||||
compliant_resources=compliant_resources,
|
setting
|
||||||
non_compliant_resources=non_compliant_resources,
|
for setting in cluster["settings"]
|
||||||
)
|
if setting["name"] == "containerInsights"
|
||||||
|
]
|
||||||
|
|
||||||
|
if (
|
||||||
|
container_insights_setting
|
||||||
|
and container_insights_setting[0]["value"] == "enabled"
|
||||||
|
):
|
||||||
|
compliant_resources.append(cluster["clusterArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["clusterArn"])
|
||||||
|
|
||||||
def ecs_fargate_latest_platform_version():
|
return RuleCheckResult(
|
||||||
compliant_resources = []
|
passed=not non_compliant_resources,
|
||||||
non_compliant_resources = []
|
compliant_resources=compliant_resources,
|
||||||
cluster_arns = client.list_clusters()["clusterArns"]
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
for cluster_arn in cluster_arns:
|
def ecs_fargate_latest_platform_version(self):
|
||||||
service_arns = client.list_services(cluster=cluster_arn, launchType="FARGATE")["serviceArns"]
|
compliant_resources = []
|
||||||
services = client.describe_services(cluster=cluster_arn, services=service_arns)["services"]
|
non_compliant_resources = []
|
||||||
|
|
||||||
for service in services:
|
for service in self.services:
|
||||||
if service["platformVersion"] == "LATEST":
|
if service["platformVersion"] == "LATEST":
|
||||||
compliant_resources.append(service["serviceArn"])
|
compliant_resources.append(service["serviceArn"])
|
||||||
else:
|
else:
|
||||||
non_compliant_resources.append(service["serviceArn"])
|
non_compliant_resources.append(service["serviceArn"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ecs_task_definition_log_configuration(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ecs_task_definition_log_configuration():
|
for task_definition in self.task_definitions:
|
||||||
compliant_resources = []
|
containers = task_definition["containerDefinitions"]
|
||||||
non_compliant_resources = []
|
|
||||||
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
|
|
||||||
latest_task_definitions = {}
|
|
||||||
|
|
||||||
for task_definition in task_definitions:
|
log_disabled_containers = [
|
||||||
family, revision = task_definition.rsplit(":", 1)
|
container
|
||||||
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
|
for container in containers
|
||||||
|
if "logConfiguration" not in container
|
||||||
|
]
|
||||||
|
|
||||||
for family, revision in latest_task_definitions.items():
|
if log_disabled_containers:
|
||||||
task_definition_arn = f"{family}:{revision}"
|
|
||||||
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
|
|
||||||
containers = task_definition["containerDefinitions"]
|
|
||||||
|
|
||||||
for container in containers:
|
|
||||||
if "logConfiguration" not in container:
|
|
||||||
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
break
|
else:
|
||||||
else:
|
compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
compliant_resources.append(task_definition["taskDefinitionArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ecs_task_definition_memory_hard_limit(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ecs_task_definition_memory_hard_limit():
|
for task_definition in self.task_definitions:
|
||||||
compliant_resources = []
|
containers = task_definition["containerDefinitions"]
|
||||||
non_compliant_resources = []
|
|
||||||
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
|
|
||||||
latest_task_definitions = {}
|
|
||||||
|
|
||||||
for task_definition in task_definitions:
|
containers_without_memory_limit = [
|
||||||
family, revision = task_definition.rsplit(":", 1)
|
container for container in containers if "memory" not in container
|
||||||
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
|
]
|
||||||
|
|
||||||
for family, revision in latest_task_definitions.items():
|
if containers_without_memory_limit:
|
||||||
task_definition_arn = f"{family}:{revision}"
|
|
||||||
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
|
|
||||||
containers = task_definition["containerDefinitions"]
|
|
||||||
|
|
||||||
for container in containers:
|
|
||||||
if "memory" not in container:
|
|
||||||
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
break
|
else:
|
||||||
else:
|
compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
compliant_resources.append(task_definition["taskDefinitionArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ecs_task_definition_nonroot_user(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def ecs_task_definition_nonroot_user():
|
for task_definition in self.task_definitions:
|
||||||
compliant_resources = []
|
containers = task_definition["containerDefinitions"]
|
||||||
non_compliant_resources = []
|
|
||||||
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
|
|
||||||
latest_task_definitions = {}
|
|
||||||
|
|
||||||
for task_definition in task_definitions:
|
privileged_containers = [
|
||||||
family, revision = task_definition.rsplit(":", 1)
|
container
|
||||||
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
|
for container in containers
|
||||||
|
if container.get("user") in [None, "root"]
|
||||||
|
]
|
||||||
|
|
||||||
for family, revision in latest_task_definitions.items():
|
if privileged_containers:
|
||||||
task_definition_arn = f"{family}:{revision}"
|
|
||||||
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
|
|
||||||
containers = task_definition["containerDefinitions"]
|
|
||||||
|
|
||||||
for container in containers:
|
|
||||||
if container.get("user") in [None, "root"]:
|
|
||||||
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
non_compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
break
|
else:
|
||||||
else:
|
compliant_resources.append(task_definition["taskDefinitionArn"])
|
||||||
compliant_resources.append(task_definition["taskDefinitionArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = ECSRuleChecker
|
||||||
|
220
services/efs.py
220
services/efs.py
@ -1,118 +1,124 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("efs")
|
class EFSRuleChecker(RuleChecker):
|
||||||
ec2_client = boto3.client("ec2")
|
def __init__(self):
|
||||||
|
self.client = boto3.client("efs")
|
||||||
|
self.ec2_client = boto3.client("ec2")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def access_points(self):
|
||||||
|
return self.client.describe_access_points()["AccessPoints"]
|
||||||
|
|
||||||
def efs_access_point_enforce_root_directory():
|
@cached_property
|
||||||
access_points = client.describe_access_points()["AccessPoints"]
|
def file_systems(self):
|
||||||
compliant_resource = []
|
return self.client.describe_file_systems()["FileSystems"]
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for access_point in access_points:
|
def efs_access_point_enforce_root_directory(self):
|
||||||
if access_point["RootDirectory"]["Path"] != "/":
|
compliant_resource = []
|
||||||
compliant_resource.append(access_point["AccessPointArn"])
|
non_compliant_resources = []
|
||||||
else:
|
|
||||||
non_compliant_resources.append(access_point["AccessPointArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
for access_point in self.access_points:
|
||||||
passed=not non_compliant_resources,
|
if access_point["RootDirectory"]["Path"] != "/":
|
||||||
compliant_resources=compliant_resource,
|
compliant_resource.append(access_point["AccessPointArn"])
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def efs_access_point_enforce_user_identity():
|
|
||||||
access_points = client.describe_access_points()["AccessPoints"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for access_point in access_points:
|
|
||||||
if "PosixUser" in access_point:
|
|
||||||
compliant_resource.append(access_point["AccessPointArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(access_point["AccessPointArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def efs_automatic_backups_enabled():
|
|
||||||
file_systems = client.describe_file_systems()["FileSystems"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for file_system in file_systems:
|
|
||||||
response = client.describe_backup_policy(
|
|
||||||
FileSystemId=file_system["FileSystemId"]
|
|
||||||
)
|
|
||||||
if response["BackupPolicy"]["Status"] == "ENABLED":
|
|
||||||
compliant_resource.append(file_system["FileSystemArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(file_system["FileSystemArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def efs_encrypted_check():
|
|
||||||
file_systems = client.describe_file_systems()["FileSystems"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for file_system in file_systems:
|
|
||||||
if file_system["Encrypted"] == True:
|
|
||||||
compliant_resource.append(file_system["FileSystemArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(file_system["FileSystemArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def efs_mount_target_public_accessible():
|
|
||||||
file_systems = client.describe_file_systems()["FileSystems"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for file_system in file_systems:
|
|
||||||
mount_targets = client.describe_mount_targets(
|
|
||||||
FileSystemId=file_system["FileSystemId"]
|
|
||||||
)["MountTargets"]
|
|
||||||
for mount_target in mount_targets:
|
|
||||||
subnet_id = mount_target["SubnetId"]
|
|
||||||
routes = ec2_client.describe_route_tables(
|
|
||||||
Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}]
|
|
||||||
)["RouteTables"][0]["Routes"]
|
|
||||||
|
|
||||||
for route in routes:
|
|
||||||
if (
|
|
||||||
"DestinationCidrBlock" in route
|
|
||||||
and route["DestinationCidrBlock"] == "0.0.0.0/0"
|
|
||||||
and "GatewayId" in route
|
|
||||||
and route["GatewayId"].startswith("igw-")
|
|
||||||
):
|
|
||||||
non_compliant_resources.append(file_system["FileSystemArn"])
|
|
||||||
break
|
|
||||||
else:
|
else:
|
||||||
|
non_compliant_resources.append(access_point["AccessPointArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def efs_access_point_enforce_user_identity(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for access_point in self.access_points:
|
||||||
|
if "PosixUser" in access_point:
|
||||||
|
compliant_resource.append(access_point["AccessPointArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(access_point["AccessPointArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def efs_automatic_backups_enabled(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for file_system in self.file_systems:
|
||||||
|
response = self.client.describe_backup_policy(
|
||||||
|
FileSystemId=file_system["FileSystemId"]
|
||||||
|
)
|
||||||
|
|
||||||
|
if response["BackupPolicy"]["Status"] == "ENABLED":
|
||||||
compliant_resource.append(file_system["FileSystemArn"])
|
compliant_resource.append(file_system["FileSystemArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(file_system["FileSystemArn"])
|
||||||
|
|
||||||
compliant_resource = list(set(compliant_resource))
|
return RuleCheckResult(
|
||||||
non_compliant_resources = list(set(non_compliant_resources))
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
return RuleCheckResult(
|
def efs_encrypted_check(self):
|
||||||
passed=not non_compliant_resources,
|
compliant_resource = []
|
||||||
compliant_resources=compliant_resource,
|
non_compliant_resources = []
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
for file_system in self.file_systems:
|
||||||
|
if file_system["Encrypted"]:
|
||||||
|
compliant_resource.append(file_system["FileSystemArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(file_system["FileSystemArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def efs_mount_target_public_accessible(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for file_system in self.file_systems:
|
||||||
|
mount_targets = self.client.describe_mount_targets(
|
||||||
|
FileSystemId=file_system["FileSystemId"]
|
||||||
|
)["MountTargets"]
|
||||||
|
|
||||||
|
for mount_target in mount_targets:
|
||||||
|
subnet_id = mount_target["SubnetId"]
|
||||||
|
routes = self.ec2_client.describe_route_tables(
|
||||||
|
Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}]
|
||||||
|
)["RouteTables"][0]["Routes"]
|
||||||
|
|
||||||
|
for route in routes:
|
||||||
|
if (
|
||||||
|
"DestinationCidrBlock" in route
|
||||||
|
and route["DestinationCidrBlock"] == "0.0.0.0/0"
|
||||||
|
and "GatewayId" in route
|
||||||
|
and route["GatewayId"].startswith("igw-")
|
||||||
|
):
|
||||||
|
non_compliant_resources.append(file_system["FileSystemArn"])
|
||||||
|
break
|
||||||
|
|
||||||
|
non_compliant_resources = list(set(non_compliant_resources))
|
||||||
|
compliant_resource = list(
|
||||||
|
set(compliant_resource) - set(non_compliant_resources)
|
||||||
|
)
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = EFSRuleChecker
|
||||||
|
131
services/eks.py
131
services/eks.py
@ -1,68 +1,73 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("eks")
|
class EKSRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("eks")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def clusters(self):
|
||||||
|
cluster_names = self.client.list_clusters()["clusters"]
|
||||||
|
return [
|
||||||
|
self.client.describe_cluster(name=cluster_name)["cluster"]
|
||||||
|
for cluster_name in cluster_names
|
||||||
|
]
|
||||||
|
|
||||||
|
def eks_cluster_logging_enabled(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for cluster in self.clusters:
|
||||||
|
if (
|
||||||
|
cluster["logging"]["clusterLogging"][0]["enabled"]
|
||||||
|
and len(cluster["logging"]["clusterLogging"][0]["types"]) == 5
|
||||||
|
):
|
||||||
|
compliant_resource.append(cluster["arn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["arn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def eks_cluster_secrets_encrypted(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for cluster in self.clusters:
|
||||||
|
if (
|
||||||
|
"encryptionConfig" in cluster
|
||||||
|
and "secrets" in cluster["encryptionConfig"][0]["resources"]
|
||||||
|
):
|
||||||
|
compliant_resource.append(cluster["arn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["arn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def eks_endpoint_no_public_access(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for cluster in self.clusters:
|
||||||
|
if cluster["resourcesVpcConfig"]["endpointPublicAccess"]:
|
||||||
|
non_compliant_resources.append(cluster["arn"])
|
||||||
|
else:
|
||||||
|
compliant_resource.append(cluster["arn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def eks_cluster_logging_enabled():
|
rule_checker = EKSRuleChecker
|
||||||
clusters = client.list_clusters()["clusters"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
response = client.describe_cluster(name=cluster)["cluster"]
|
|
||||||
if (
|
|
||||||
len(response["logging"]["clusterLogging"][0]["types"]) == 5
|
|
||||||
and response["logging"]["clusterLogging"][0]["enabled"] == True
|
|
||||||
):
|
|
||||||
compliant_resource.append(response["arn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(response["arn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def eks_cluster_secrets_encrypted():
|
|
||||||
clusters = client.list_clusters()["clusters"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
response = client.describe_cluster(name=cluster)["cluster"]
|
|
||||||
if (
|
|
||||||
"encryptionConfig" in response
|
|
||||||
and "secrets" in response["encryptionConfig"][0]["resources"]
|
|
||||||
):
|
|
||||||
compliant_resource.append(response["arn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(response["arn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def eks_endpoint_no_public_access():
|
|
||||||
clusters = client.list_clusters()["clusters"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
response = client.describe_cluster(name=cluster)["cluster"]
|
|
||||||
if response["resourcesVpcConfig"]["endpointPublicAccess"] == False:
|
|
||||||
compliant_resource.append(response["arn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(response["arn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
@ -1,113 +1,115 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("elasticache")
|
class ElastiCacheRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("elasticache")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def clusters(self):
|
||||||
|
return self.client.describe_cache_clusters()["CacheClusters"]
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def replication_groups(self):
|
||||||
|
return self.client.describe_replication_groups()["ReplicationGroups"]
|
||||||
|
|
||||||
|
def elasticache_auto_minor_version_upgrade_check(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for cluster in self.clusters:
|
||||||
|
if cluster["AutoMinorVersionUpgrade"]:
|
||||||
|
compliant_resource.append(cluster["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def elasticache_redis_cluster_automatic_backup_check(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for replication_group in self.replication_groups:
|
||||||
|
if "SnapshottingClusterId" in replication_group:
|
||||||
|
compliant_resource.append(replication_group["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(replication_group["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def elasticache_repl_grp_auto_failover_enabled(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for replication_group in self.replication_groups:
|
||||||
|
if replication_group["AutomaticFailover"] == "enabled":
|
||||||
|
compliant_resource.append(replication_group["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(replication_group["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def elasticache_repl_grp_encrypted_at_rest(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for replication_group in self.replication_groups:
|
||||||
|
if replication_group["AtRestEncryptionEnabled"] == True:
|
||||||
|
compliant_resource.append(replication_group["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(replication_group["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def elasticache_repl_grp_encrypted_in_transit(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for replication_group in self.replication_groups:
|
||||||
|
if replication_group["TransitEncryptionEnabled"] == True:
|
||||||
|
compliant_resource.append(replication_group["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(replication_group["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def elasticache_subnet_group_check(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for cluster in self.clusters:
|
||||||
|
if cluster["CacheSubnetGroupName"] != "default":
|
||||||
|
compliant_resource.append(cluster["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def elasticache_auto_minor_version_upgrade_check():
|
rule_checker = ElastiCacheRuleChecker
|
||||||
clusters = client.describe_cache_clusters()["CacheClusters"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if cluster["AutoMinorVersionUpgrade"] == True:
|
|
||||||
compliant_resource.append(cluster["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def elasticache_redis_cluster_automatic_backup_check():
|
|
||||||
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for replication_group in replication_groups:
|
|
||||||
if "SnapshottingClusterId" in replication_group:
|
|
||||||
compliant_resource.append(replication_group["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(replication_group["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def elasticache_repl_grp_auto_failover_enabled():
|
|
||||||
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for replication_group in replication_groups:
|
|
||||||
if replication_group["AutomaticFailover"] == "enabled":
|
|
||||||
compliant_resource.append(replication_group["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(replication_group["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def elasticache_repl_grp_encrypted_at_rest():
|
|
||||||
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for replication_group in replication_groups:
|
|
||||||
if replication_group["AtRestEncryptionEnabled"] == True:
|
|
||||||
compliant_resource.append(replication_group["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(replication_group["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def elasticache_repl_grp_encrypted_in_transit():
|
|
||||||
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for replication_group in replication_groups:
|
|
||||||
if replication_group["TransitEncryptionEnabled"] == True:
|
|
||||||
compliant_resource.append(replication_group["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(replication_group["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def elasticache_subnet_group_check():
|
|
||||||
clusters = client.describe_cache_clusters()["CacheClusters"]
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if cluster["CacheSubnetGroupName"] != "default":
|
|
||||||
compliant_resource.append(cluster["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
163
services/iam.py
163
services/iam.py
@ -1,83 +1,104 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("iam")
|
class IAMRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("iam")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def policies(self):
|
||||||
|
return self.client.list_policies(Scope="Local")["Policies"]
|
||||||
|
|
||||||
def iam_policy_no_statements_with_admin_access():
|
@cached_property
|
||||||
compliant_resource = []
|
def policy_default_versions(self):
|
||||||
non_compliant_resources = []
|
responses = [
|
||||||
policies = client.list_policies(Scope="Local")["Policies"]
|
self.client.get_policy_version(
|
||||||
|
PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"]
|
||||||
for policy in policies:
|
)["PolicyVersion"]
|
||||||
policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[
|
for policy in self.policies
|
||||||
"PolicyVersion"
|
|
||||||
]
|
]
|
||||||
|
|
||||||
for statement in policy_version["Document"]["Statement"]:
|
return {
|
||||||
|
policy["Arn"]: response
|
||||||
|
for policy, response in zip(self.policies, responses)
|
||||||
|
}
|
||||||
|
|
||||||
|
def iam_policy_no_statements_with_admin_access(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for policy in self.policies:
|
||||||
|
policy_version = self.policy_default_versions[policy["Arn"]]
|
||||||
|
|
||||||
|
for statement in policy_version["Document"]["Statement"]:
|
||||||
|
if (
|
||||||
|
statement["Action"] == "*"
|
||||||
|
and statement["Resource"] == "*"
|
||||||
|
and statement["Effect"] == "Allow"
|
||||||
|
):
|
||||||
|
non_compliant_resources.append(policy["Arn"])
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
compliant_resource.append(policy["Arn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def iam_policy_no_statements_with_full_access(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for policy in self.policies:
|
||||||
|
policy_version = self.policy_default_versions[policy["Arn"]]
|
||||||
|
|
||||||
|
for statement in policy_version["Document"]["Statement"]:
|
||||||
|
if statement["Effect"] == "Deny":
|
||||||
|
continue
|
||||||
|
|
||||||
|
if type(statement["Action"]) == str:
|
||||||
|
statement["Action"] = [statement["Action"]]
|
||||||
|
|
||||||
|
full_access_actions = [
|
||||||
|
action for action in statement["Action"] if action.endswith(":*")
|
||||||
|
]
|
||||||
|
if full_access_actions:
|
||||||
|
non_compliant_resources.append(policy["Arn"])
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
compliant_resource.append(policy["Arn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resource,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def iam_role_managed_policy_check(self):
|
||||||
|
compliant_resource = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
policy_arns = [] # 검사할 managed policy arn 목록
|
||||||
|
|
||||||
|
for policy in policy_arns:
|
||||||
|
response = self.client.list_entities_for_policy(PolicyArn=policy)
|
||||||
if (
|
if (
|
||||||
statement["Action"] == "*"
|
response["PolicyGroups"] == []
|
||||||
and statement["Resource"] == "*"
|
and response["PolicyUsers"] == []
|
||||||
and statement["Effect"] == "Allow"
|
and response["PolicyRoles"] == []
|
||||||
):
|
):
|
||||||
non_compliant_resources.append(policy["Arn"])
|
non_compliant_resources.append(policy)
|
||||||
break
|
else:
|
||||||
else:
|
compliant_resource.append(policy)
|
||||||
compliant_resource.append(policy["Arn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not compliant_resource,
|
||||||
compliant_resources=compliant_resource,
|
compliant_resources=compliant_resource,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def iam_policy_no_statements_with_full_access():
|
rule_checker = IAMRuleChecker
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
policies = client.list_policies(Scope="Local")["Policies"]
|
|
||||||
|
|
||||||
for policy in policies:
|
|
||||||
policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[
|
|
||||||
"PolicyVersion"
|
|
||||||
]
|
|
||||||
|
|
||||||
for statement in policy_version["Document"]["Statement"]:
|
|
||||||
if statement["Effect"] == "Deny":
|
|
||||||
continue
|
|
||||||
|
|
||||||
if type(statement["Action"]) == str:
|
|
||||||
statement["Action"] = [statement["Action"]]
|
|
||||||
|
|
||||||
full_access_actions = [action for action in statement["Action"] if action.endswith(":*")]
|
|
||||||
if full_access_actions:
|
|
||||||
non_compliant_resources.append(policy["Arn"])
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
compliant_resource.append(policy["Arn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def iam_role_managed_policy_check():
|
|
||||||
compliant_resource = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
policy_arns = [] # 검사할 managed policy arn 목록
|
|
||||||
|
|
||||||
for policy in policy_arns:
|
|
||||||
response = client.list_entities_for_policy(PolicyArn=policy)
|
|
||||||
if response["PolicyGroups"] == [] and response["PolicyUsers"] == [] and response["PolicyRoles"] == []:
|
|
||||||
non_compliant_resources.append(policy)
|
|
||||||
else:
|
|
||||||
compliant_resource.append(policy)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not compliant_resource,
|
|
||||||
compliant_resources=compliant_resource,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
@ -1,25 +1,29 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("kms")
|
class KMSRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("kms")
|
||||||
|
|
||||||
|
def cmk_backing_key_rotation_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
keys = self.client.list_keys()["Keys"]
|
||||||
|
|
||||||
|
for key in keys:
|
||||||
|
response = self.client.get_key_rotation_status(KeyId=key["KeyId"])
|
||||||
|
|
||||||
|
if response["KeyRotationEnabled"] == True:
|
||||||
|
compliant_resources.append(response["KeyId"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(response["KeyId"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def cmk_backing_key_rotation_enabled():
|
rule_checker = KMSRuleChecker
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
keys = client.list_keys()["Keys"]
|
|
||||||
|
|
||||||
for key in keys:
|
|
||||||
response = client.get_key_rotation_status(KeyId=key["KeyId"])
|
|
||||||
|
|
||||||
if response["KeyRotationEnabled"] == True:
|
|
||||||
compliant_resources.append(response["KeyId"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(response["KeyId"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
550
services/rds.py
550
services/rds.py
@ -1,278 +1,298 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import datetime
|
import datetime
|
||||||
from dateutil.tz import tzlocal
|
from dateutil.tz import tzlocal
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
client = boto3.client("rds")
|
|
||||||
backup_client = boto3.client("backup")
|
|
||||||
ec2_client = boto3.client("ec2")
|
|
||||||
|
|
||||||
|
class RDSRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("rds")
|
||||||
|
self.backup_client = boto3.client("backup")
|
||||||
|
self.ec2_client = boto3.client("ec2")
|
||||||
|
|
||||||
def aurora_last_backup_recovery_point_created():
|
@cached_property
|
||||||
compliant_resources = []
|
def db_clusters(self):
|
||||||
non_compliant_resources = []
|
return self.client.describe_db_clusters()["DBClusters"]
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
for cluster in clusters:
|
@cached_property
|
||||||
recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=cluster["DBClusterArn"])[
|
def db_instances(self):
|
||||||
"RecoveryPoints"
|
return self.client.describe_db_instances()["DBInstances"]
|
||||||
]
|
|
||||||
recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points])
|
|
||||||
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1):
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
def aurora_last_backup_recovery_point_created(self):
|
||||||
passed=not non_compliant_resources,
|
compliant_resources = []
|
||||||
compliant_resources=compliant_resources,
|
non_compliant_resources = []
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
def aurora_mysql_backtracking_enabled():
|
for cluster in clusters:
|
||||||
compliant_resources = []
|
recovery_points = self.backup_client.list_recovery_points_by_resource(
|
||||||
non_compliant_resources = []
|
ResourceArn=cluster["DBClusterArn"]
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
)["RecoveryPoints"]
|
||||||
|
recovery_point_creation_dates = sorted(
|
||||||
for cluster in clusters:
|
[i["CreationDate"] for i in recovery_points]
|
||||||
if cluster["Engine"] == "aurora-mysql" and cluster.get("EarliestBacktrackTime", None) == None:
|
)
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[
|
||||||
else:
|
-1
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
] < datetime.timedelta(days=1):
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
return RuleCheckResult(
|
else:
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def db_instance_backup_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if "BackupRetentionPeriod" in cluster:
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_cluster_auto_minor_version_upgrade_enable():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if cluster["Engine"] == "docdb" or cluster.get("AutoMinorVersionUpgrade"):
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_cluster_default_admin_check():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if cluster["MasterUsername"] not in ["admin", "postgres"]:
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_cluster_deletion_protection_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if cluster["DeletionProtection"]:
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_cluster_encrypted_at_rest():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if cluster["StorageEncrypted"]:
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_cluster_iam_authentication_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if cluster["Engine"] == "docdb" or cluster.get("IAMDatabaseAuthenticationEnabled"):
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_cluster_multi_az_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if len(cluster.get("AvailabilityZones", [])) > 1:
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_db_security_group_not_allowed():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
security_groups = ec2_client.describe_security_groups()["SecurityGroups"]
|
|
||||||
default_security_group_ids = [i["GroupId"] for i in security_groups if i["GroupName"] == "default"]
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
db_security_groups = [i["VpcSecurityGroupId"] for i in cluster["VpcSecurityGroups"] if i["Status"] == "active"]
|
|
||||||
|
|
||||||
for default_security_group_id in default_security_group_ids:
|
|
||||||
if default_security_group_id in db_security_groups:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
break
|
|
||||||
else:
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def aurora_mysql_backtracking_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
for cluster in clusters:
|
||||||
|
if (
|
||||||
|
cluster["Engine"] == "aurora-mysql"
|
||||||
|
and cluster.get("EarliestBacktrackTime", None) == None
|
||||||
|
):
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
else:
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def db_instance_backup_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
for cluster in clusters:
|
||||||
|
if "BackupRetentionPeriod" in cluster:
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_cluster_auto_minor_version_upgrade_enable(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
for cluster in clusters:
|
||||||
|
if cluster["Engine"] == "docdb" or cluster.get("AutoMinorVersionUpgrade"):
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_cluster_default_admin_check(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
for cluster in clusters:
|
||||||
|
if cluster["MasterUsername"] not in ["admin", "postgres"]:
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_cluster_deletion_protection_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
for cluster in clusters:
|
||||||
|
if cluster["DeletionProtection"]:
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_cluster_encrypted_at_rest(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
for cluster in clusters:
|
||||||
|
if cluster["StorageEncrypted"]:
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_cluster_iam_authentication_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
for cluster in clusters:
|
||||||
|
if cluster["Engine"] == "docdb" or cluster.get(
|
||||||
|
"IAMDatabaseAuthenticationEnabled"
|
||||||
|
):
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_cluster_multi_az_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
for cluster in clusters:
|
||||||
|
if len(cluster.get("AvailabilityZones", [])) > 1:
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_db_security_group_not_allowed(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
security_groups = self.ec2_client.describe_security_groups()["SecurityGroups"]
|
||||||
|
default_security_group_ids = [
|
||||||
|
i["GroupId"] for i in security_groups if i["GroupName"] == "default"
|
||||||
|
]
|
||||||
|
|
||||||
|
for cluster in clusters:
|
||||||
|
db_security_groups = [
|
||||||
|
i["VpcSecurityGroupId"]
|
||||||
|
for i in cluster["VpcSecurityGroups"]
|
||||||
|
if i["Status"] == "active"
|
||||||
|
]
|
||||||
|
|
||||||
|
for default_security_group_id in default_security_group_ids:
|
||||||
|
if default_security_group_id in db_security_groups:
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_enhanced_monitoring_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
instances = self.db_instances
|
||||||
|
for instance in instances:
|
||||||
|
if instance.get("MonitoringInterval", 0):
|
||||||
|
compliant_resources.append(instance["DBInstanceArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(instance["DBInstanceArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_instance_public_access_check(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
instances = self.db_instances
|
||||||
|
for instance in instances:
|
||||||
|
if instance["PubliclyAccessible"]:
|
||||||
|
non_compliant_resources.append(instance["DBInstanceArn"])
|
||||||
|
else:
|
||||||
|
compliant_resources.append(instance["DBInstanceArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_logging_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
clusters = self.db_clusters
|
||||||
|
logs_for_engine = {
|
||||||
|
"aurora-mysql": ["audit", "error", "general", "slowquery"],
|
||||||
|
"aurora-postgresql": ["postgresql"],
|
||||||
|
"docdb": ["audit", "profiler"],
|
||||||
|
}
|
||||||
|
|
||||||
|
for cluster in clusters:
|
||||||
|
if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get(
|
||||||
|
cluster["Engine"]
|
||||||
|
):
|
||||||
|
compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(cluster["DBClusterArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def rds_snapshot_encrypted(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
cluster_snapshots = self.client.describe_db_cluster_snapshots()[
|
||||||
|
"DBClusterSnapshots"
|
||||||
|
]
|
||||||
|
|
||||||
|
for snapshot in cluster_snapshots:
|
||||||
|
if snapshot.get("StorageEncrypted") == True:
|
||||||
|
compliant_resources.append(snapshot["DBClusterSnapshotArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(snapshot["DBClusterSnapshotArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def rds_enhanced_monitoring_enabled():
|
rule_checker = RDSRuleChecker
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
instances = client.describe_db_instances()["DBInstances"]
|
|
||||||
|
|
||||||
for instance in instances:
|
|
||||||
if instance.get("MonitoringInterval", 0):
|
|
||||||
compliant_resources.append(instance["DBInstanceArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(instance["DBInstanceArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_instance_public_access_check():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
instances = client.describe_db_instances()["DBInstances"]
|
|
||||||
|
|
||||||
for instance in instances:
|
|
||||||
if instance["PubliclyAccessible"]:
|
|
||||||
non_compliant_resources.append(instance["DBInstanceArn"])
|
|
||||||
else:
|
|
||||||
compliant_resources.append(instance["DBInstanceArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_logging_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
clusters = client.describe_db_clusters()["DBClusters"]
|
|
||||||
|
|
||||||
logs_for_engine = {
|
|
||||||
"aurora-mysql": ["audit", "error", "general", "slowquery"],
|
|
||||||
"aurora-postgresql": ["postgresql"],
|
|
||||||
"docdb": ["audit", "profiler"]
|
|
||||||
}
|
|
||||||
|
|
||||||
for cluster in clusters:
|
|
||||||
if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get(cluster["Engine"]):
|
|
||||||
compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(cluster["DBClusterArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def rds_snapshot_encrypted():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
|
|
||||||
cluster_snapshots = client.describe_db_cluster_snapshots()["DBClusterSnapshots"]
|
|
||||||
|
|
||||||
for snapshot in cluster_snapshots:
|
|
||||||
if snapshot.get("StorageEncrypted") == True:
|
|
||||||
compliant_resources.append(snapshot["DBClusterSnapshotArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(snapshot["DBClusterSnapshotArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
402
services/s3.py
402
services/s3.py
@ -1,211 +1,225 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
import botocore.exceptions
|
import botocore.exceptions
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("s3")
|
class S3RuleChecker(RuleChecker):
|
||||||
sts_client = boto3.client("sts")
|
def __init__(self):
|
||||||
s3control_client = boto3.client("s3control")
|
self.client = boto3.client("s3")
|
||||||
backup_client = boto3.client("backup")
|
self.sts_client = boto3.client("sts")
|
||||||
|
self.s3control_client = boto3.client("s3control")
|
||||||
|
self.backup_client = boto3.client("backup")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def account_id(self):
|
||||||
|
return self.sts_client.get_caller_identity().get("Account")
|
||||||
|
|
||||||
def s3_access_point_in_vpc_only():
|
@cached_property
|
||||||
compliant_resources = []
|
def buckets(self):
|
||||||
non_compliant_resources = []
|
return self.client.list_buckets()["Buckets"]
|
||||||
account_id = sts_client.get_caller_identity().get("Account")
|
|
||||||
access_points = s3control_client.list_access_points(AccountId=account_id)["AccessPointList"]
|
|
||||||
|
|
||||||
for access_point in access_points:
|
def s3_access_point_in_vpc_only(self):
|
||||||
if access_point["NetworkOrigin"] == "VPC":
|
compliant_resources = []
|
||||||
compliant_resources.append(access_point["AccessPointArn"])
|
non_compliant_resources = []
|
||||||
else:
|
|
||||||
non_compliant_resources.append(access_point["AccessPointArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
access_points = self.s3control_client.list_access_points(
|
||||||
passed=not non_compliant_resources,
|
AccountId=self.account_id
|
||||||
compliant_resources=compliant_resources,
|
)["AccessPointList"]
|
||||||
non_compliant_resources=non_compliant_resources,
|
for access_point in access_points:
|
||||||
)
|
if access_point["NetworkOrigin"] == "VPC":
|
||||||
|
compliant_resources.append(access_point["AccessPointArn"])
|
||||||
|
|
||||||
def s3_bucket_default_lock_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
buckets = client.list_buckets()["Buckets"]
|
|
||||||
|
|
||||||
for bucket in buckets:
|
|
||||||
try:
|
|
||||||
response = client.get_object_lock_configuration(Bucket=bucket["Name"])
|
|
||||||
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
except botocore.exceptions.ClientError as e:
|
|
||||||
if e.response['Error']['Code'] == "ObjectLockConfigurationNotFoundError":
|
|
||||||
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
else:
|
else:
|
||||||
raise e
|
non_compliant_resources.append(access_point["AccessPointArn"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def s3_bucket_default_lock_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
def s3_bucket_level_public_access_prohibited():
|
for bucket in self.buckets:
|
||||||
compliant_resources = []
|
try:
|
||||||
non_compliant_resources = []
|
response = self.client.get_object_lock_configuration(
|
||||||
buckets = client.list_buckets()["Buckets"]
|
Bucket=bucket["Name"]
|
||||||
|
)
|
||||||
|
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if (
|
||||||
|
e.response["Error"]["Code"]
|
||||||
|
== "ObjectLockConfigurationNotFoundError"
|
||||||
|
):
|
||||||
|
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
for bucket in buckets:
|
return RuleCheckResult(
|
||||||
response = client.get_public_access_block(Bucket=bucket["Name"])
|
passed=not non_compliant_resources,
|
||||||
if False not in response["PublicAccessBlockConfiguration"].values():
|
compliant_resources=compliant_resources,
|
||||||
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
non_compliant_resources=non_compliant_resources,
|
||||||
else:
|
)
|
||||||
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
def s3_bucket_level_public_access_prohibited(self):
|
||||||
passed=not non_compliant_resources,
|
compliant_resources = []
|
||||||
compliant_resources=compliant_resources,
|
non_compliant_resources = []
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
for bucket in self.buckets:
|
||||||
def s3_bucket_logging_enabled():
|
response = self.client.get_public_access_block(Bucket=bucket["Name"])
|
||||||
compliant_resources = []
|
if False not in response["PublicAccessBlockConfiguration"].values():
|
||||||
non_compliant_resources = []
|
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
buckets = client.list_buckets()["Buckets"]
|
|
||||||
|
|
||||||
for bucket in buckets:
|
|
||||||
response = client.get_bucket_logging(Bucket=bucket["Name"])
|
|
||||||
if "LoggingEnabled" in response:
|
|
||||||
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def s3_bucket_ssl_requests_only():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
buckets = client.list_buckets()["Buckets"]
|
|
||||||
|
|
||||||
for bucket in buckets:
|
|
||||||
policy = client.get_bucket_policy(Bucket=bucket["Name"])["Policy"]
|
|
||||||
if "aws:SecureTransport" in policy:
|
|
||||||
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def s3_bucket_versioning_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
buckets = client.list_buckets()["Buckets"]
|
|
||||||
|
|
||||||
for bucket in buckets:
|
|
||||||
response = client.get_bucket_versioning(Bucket=bucket["Name"])
|
|
||||||
if "Status" in response and response["Status"] == "Enabled":
|
|
||||||
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def s3_default_encryption_kms():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
buckets = client.list_buckets()["Buckets"]
|
|
||||||
|
|
||||||
for bucket in buckets:
|
|
||||||
configuration = client.get_bucket_encryption(Bucket=bucket["Name"])["ServerSideEncryptionConfiguration"]
|
|
||||||
|
|
||||||
if configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] == "aws:kms":
|
|
||||||
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def s3_event_notifications_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
buckets = client.list_buckets()["Buckets"]
|
|
||||||
|
|
||||||
for bucket in buckets:
|
|
||||||
configuration = client.get_bucket_notification_configuration(Bucket=bucket["Name"])
|
|
||||||
if (
|
|
||||||
"LambdaFunctionConfigurations" in configuration
|
|
||||||
or "QueueConfigurations" in configuration
|
|
||||||
or "TopicConfigurations" in configuration
|
|
||||||
):
|
|
||||||
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def s3_last_backup_recovery_point_created():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
buckets = client.list_buckets()["Buckets"]
|
|
||||||
|
|
||||||
for bucket in buckets:
|
|
||||||
backups = backup_client.list_recovery_points_by_resource(ResourceArn=f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
|
|
||||||
if backups["RecoveryPoints"] != []:
|
|
||||||
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def s3_lifecycle_policy_check():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
buckets = client.list_buckets()["Buckets"]
|
|
||||||
|
|
||||||
for bucket in buckets:
|
|
||||||
try:
|
|
||||||
configuration = client.get_bucket_lifecycle_configuration(Bucket=bucket["Name"])
|
|
||||||
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
except botocore.exceptions.ClientError as e:
|
|
||||||
if e.response['Error']['Code'] == "NoSuchLifecycleConfiguration":
|
|
||||||
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
|
||||||
else:
|
else:
|
||||||
raise e
|
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def s3_bucket_logging_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for bucket in self.buckets:
|
||||||
|
response = self.client.get_bucket_logging(Bucket=bucket["Name"])
|
||||||
|
if "LoggingEnabled" in response:
|
||||||
|
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def s3_bucket_ssl_requests_only(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for bucket in self.buckets:
|
||||||
|
policy = self.client.get_bucket_policy(Bucket=bucket["Name"])["Policy"]
|
||||||
|
if "aws:SecureTransport" in policy:
|
||||||
|
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def s3_bucket_versioning_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for bucket in self.buckets:
|
||||||
|
response = self.client.get_bucket_versioning(Bucket=bucket["Name"])
|
||||||
|
if "Status" in response and response["Status"] == "Enabled":
|
||||||
|
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def s3_default_encryption_kms(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for bucket in self.buckets:
|
||||||
|
configuration = self.client.get_bucket_encryption(Bucket=bucket["Name"])[
|
||||||
|
"ServerSideEncryptionConfiguration"
|
||||||
|
]
|
||||||
|
|
||||||
|
if (
|
||||||
|
configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"][
|
||||||
|
"SSEAlgorithm"
|
||||||
|
]
|
||||||
|
== "aws:kms"
|
||||||
|
):
|
||||||
|
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def s3_event_notifications_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for bucket in self.buckets:
|
||||||
|
configuration = self.client.get_bucket_notification_configuration(
|
||||||
|
Bucket=bucket["Name"]
|
||||||
|
)
|
||||||
|
if (
|
||||||
|
"LambdaFunctionConfigurations" in configuration
|
||||||
|
or "QueueConfigurations" in configuration
|
||||||
|
or "TopicConfigurations" in configuration
|
||||||
|
):
|
||||||
|
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def s3_last_backup_recovery_point_created(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for bucket in self.buckets:
|
||||||
|
backups = self.backup_client.list_recovery_points_by_resource(
|
||||||
|
ResourceArn=f"arn:aws:s3:::{bucket['Name']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if backups["RecoveryPoints"] != []:
|
||||||
|
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def s3_lifecycle_policy_check(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for bucket in self.buckets:
|
||||||
|
try:
|
||||||
|
configuration = self.client.get_bucket_lifecycle_configuration(
|
||||||
|
Bucket=bucket["Name"]
|
||||||
|
)
|
||||||
|
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response["Error"]["Code"] == "NoSuchLifecycleConfiguration":
|
||||||
|
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = S3RuleChecker
|
||||||
|
@ -1,80 +1,84 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
import datetime
|
from datetime import datetime, timedelta
|
||||||
from dateutil.tz import tzlocal
|
from dateutil.tz import tzlocal
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("secretsmanager")
|
class SecretsManagerRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("secretsmanager")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def secrets(self):
|
||||||
|
return self.client.list_secrets()["SecretList"]
|
||||||
|
|
||||||
def secretsmanager_rotation_enabled_check():
|
def secretsmanager_rotation_enabled_check(self):
|
||||||
compliant_resources = []
|
compliant_resources = []
|
||||||
non_compliant_resources = []
|
non_compliant_resources = []
|
||||||
secrets = client.list_secrets()["SecretList"]
|
|
||||||
|
|
||||||
for secret in secrets:
|
for secret in self.secrets:
|
||||||
if secret.get("RotationEnabled") == True:
|
if secret.get("RotationEnabled", False):
|
||||||
compliant_resources.append(secret["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(secret["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def secretsmanager_scheduled_rotation_success_check():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
secrets = client.list_secrets()["SecretList"]
|
|
||||||
|
|
||||||
for secret in secrets:
|
|
||||||
if secret.get("RotationEnabled") == True:
|
|
||||||
if 'LastRotatedDate' not in secret:
|
|
||||||
non_compliant_resources.append(secret["ARN"])
|
|
||||||
continue
|
|
||||||
|
|
||||||
now = datetime.datetime.now(tz=tzlocal())
|
|
||||||
rotation_period = datetime.timedelta(
|
|
||||||
days=secret["RotationRules"]["AutomaticallyAfterDays"] + 2
|
|
||||||
) # 최대 2일 지연 가능 (aws)
|
|
||||||
elapsed_time_after_rotation = now - secret["LastRotatedDate"]
|
|
||||||
|
|
||||||
if elapsed_time_after_rotation > rotation_period:
|
|
||||||
non_compliant_resources.append(secret["ARN"])
|
|
||||||
else:
|
|
||||||
compliant_resources.append(secret["ARN"])
|
compliant_resources.append(secret["ARN"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def secretsmanager_secret_periodic_rotation():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
secrets = client.list_secrets()["SecretList"]
|
|
||||||
|
|
||||||
for secret in secrets:
|
|
||||||
if secret.get("RotationEnabled") == True:
|
|
||||||
if 'LastRotatedDate' not in secret:
|
|
||||||
non_compliant_resources.append(secret["ARN"])
|
|
||||||
continue
|
|
||||||
|
|
||||||
now = datetime.datetime.now(tz=tzlocal())
|
|
||||||
elapsed_time_after_rotation = now - secret["LastRotatedDate"]
|
|
||||||
|
|
||||||
if elapsed_time_after_rotation > datetime.timedelta(days=90):
|
|
||||||
non_compliant_resources.append(secret["ARN"])
|
|
||||||
else:
|
else:
|
||||||
compliant_resources.append(secret["ARN"])
|
non_compliant_resources.append(secret["ARN"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def secretsmanager_scheduled_rotation_success_check(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for secret in self.secrets:
|
||||||
|
if secret.get("RotationEnabled", False):
|
||||||
|
if "LastRotatedDate" not in secret:
|
||||||
|
non_compliant_resources.append(secret["ARN"])
|
||||||
|
continue
|
||||||
|
|
||||||
|
now = datetime.now(tz=tzlocal())
|
||||||
|
rotation_period = timedelta(
|
||||||
|
days=secret["RotationRules"]["AutomaticallyAfterDays"] + 2
|
||||||
|
) # 최대 2일 지연 가능 (aws)
|
||||||
|
elapsed_time_after_rotation = now - secret["LastRotatedDate"]
|
||||||
|
|
||||||
|
if elapsed_time_after_rotation > rotation_period:
|
||||||
|
non_compliant_resources.append(secret["ARN"])
|
||||||
|
else:
|
||||||
|
compliant_resources.append(secret["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def secretsmanager_secret_periodic_rotation(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for secret in self.secrets:
|
||||||
|
if secret.get("RotationEnabled") == True:
|
||||||
|
if "LastRotatedDate" not in secret:
|
||||||
|
non_compliant_resources.append(secret["ARN"])
|
||||||
|
continue
|
||||||
|
|
||||||
|
now = datetime.now(tz=tzlocal())
|
||||||
|
elapsed_time_after_rotation = now - secret["LastRotatedDate"]
|
||||||
|
|
||||||
|
if elapsed_time_after_rotation > timedelta(days=90):
|
||||||
|
non_compliant_resources.append(secret["ARN"])
|
||||||
|
else:
|
||||||
|
compliant_resources.append(secret["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = SecretsManagerRuleChecker
|
||||||
|
@ -1,28 +1,31 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("securityhub")
|
class SecurityHubRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("securityhub")
|
||||||
|
self.sts_client = boto3.client("sts")
|
||||||
|
|
||||||
sts_client = boto3.client("sts")
|
def securityhub_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
aws_account_id = self.sts_client.get_caller_identity()["Account"]
|
||||||
|
|
||||||
|
try:
|
||||||
|
hub = self.client.describe_hub()
|
||||||
|
compliant_resources.append(aws_account_id)
|
||||||
|
except Exception as e:
|
||||||
|
if e.__class__.__name__ == "InvalidAccessException":
|
||||||
|
non_compliant_resources.append(aws_account_id)
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def securityhub_enabled():
|
rule_checker = SecurityHubRuleChecker
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
aws_account_id = sts_client.get_caller_identity()["Account"]
|
|
||||||
|
|
||||||
try:
|
|
||||||
hub = client.describe_hub()
|
|
||||||
compliant_resources.append(aws_account_id)
|
|
||||||
except Exception as e:
|
|
||||||
if e.__class__.__name__ == "InvalidAccessException":
|
|
||||||
non_compliant_resources.append(aws_account_id)
|
|
||||||
else:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
@ -1,46 +1,57 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("sns")
|
class SNSRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.client = boto3.client("sns")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def topics(self):
|
||||||
|
topics = self.client.list_topics()["Topics"]
|
||||||
|
return [
|
||||||
|
self.client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
|
||||||
|
for topic in topics
|
||||||
|
]
|
||||||
|
|
||||||
def sns_encrypted_kms():
|
def sns_encrypted_kms(self):
|
||||||
compliant_resources = []
|
compliant_resources = []
|
||||||
non_compliant_resources = []
|
non_compliant_resources = []
|
||||||
topics = client.list_topics()["Topics"]
|
|
||||||
|
|
||||||
for topic in topics:
|
for topic in self.topics:
|
||||||
topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
|
if "KmsMasterKeyId" in topic:
|
||||||
if "KmsMasterKeyId" in topic:
|
|
||||||
compliant_resources.append(topic["TopicArn"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(topic["TopicArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def sns_topic_message_delivery_notification_enabled():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
topics = client.list_topics()["Topics"]
|
|
||||||
|
|
||||||
for topic in topics:
|
|
||||||
topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
|
|
||||||
|
|
||||||
for key in topic.keys():
|
|
||||||
if key.endswith("FeedbackRoleArn") == True:
|
|
||||||
compliant_resources.append(topic["TopicArn"])
|
compliant_resources.append(topic["TopicArn"])
|
||||||
break
|
else:
|
||||||
else:
|
non_compliant_resources.append(topic["TopicArn"])
|
||||||
non_compliant_resources.append(topic["TopicArn"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def sns_topic_message_delivery_notification_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for topic in self.topics:
|
||||||
|
notification_roles = [
|
||||||
|
attribute
|
||||||
|
for attribute in topic.keys()
|
||||||
|
if attribute.endswith("FeedbackRoleArn")
|
||||||
|
]
|
||||||
|
|
||||||
|
if notification_roles:
|
||||||
|
compliant_resources.append(topic["TopicArn"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(topic["TopicArn"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = SNSRuleChecker
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
from models import RuleCheckResult
|
|
||||||
import boto3
|
|
||||||
|
|
||||||
|
|
||||||
# client = boto3.client("")
|
|
||||||
|
|
||||||
|
|
||||||
def required_tags():
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=False, compliant_resources=[], non_compliant_resources=[]
|
|
||||||
)
|
|
456
services/vpc.py
456
services/vpc.py
@ -1,257 +1,261 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
from pprint import pprint
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
ec2 = boto3.client("ec2")
|
class VPCRuleChecker(RuleChecker):
|
||||||
|
def __init__(self):
|
||||||
|
self.ec2 = boto3.client("ec2")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def security_group_rules(self):
|
||||||
|
return self.ec2.describe_security_group_rules()["SecurityGroupRules"]
|
||||||
|
|
||||||
def ec2_transit_gateway_auto_vpc_attach_disabled():
|
def ec2_transit_gateway_auto_vpc_attach_disabled(self):
|
||||||
response = ec2.describe_transit_gateways()
|
response = self.ec2.describe_transit_gateways()
|
||||||
|
|
||||||
non_compliant_resources = [
|
non_compliant_resources = [
|
||||||
resource["TransitGatewayArn"]
|
resource["TransitGatewayArn"]
|
||||||
for resource in filter(
|
for resource in filter(
|
||||||
lambda x: x["Options"]["AutoAcceptSharedAttachments"] == "enable",
|
lambda x: x["Options"]["AutoAcceptSharedAttachments"] == "enable",
|
||||||
response["TransitGateways"],
|
response["TransitGateways"],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
compliant_resources = list(
|
||||||
|
set(
|
||||||
|
[
|
||||||
|
resource["TransitGatewayArn"]
|
||||||
|
for resource in response["TransitGateways"]
|
||||||
|
]
|
||||||
|
)
|
||||||
|
- set(non_compliant_resources)
|
||||||
)
|
)
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources = list(
|
return RuleCheckResult(
|
||||||
set([resource["TransitGatewayArn"] for resource in response["TransitGateways"]])
|
passed=not non_compliant_resources,
|
||||||
- set(non_compliant_resources)
|
compliant_resources=compliant_resources,
|
||||||
)
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def restricted_ssh():
|
|
||||||
response = ec2.describe_security_group_rules()
|
|
||||||
|
|
||||||
non_compliant_resources = [
|
|
||||||
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
|
||||||
for resource in filter(
|
|
||||||
lambda x: x["IsEgress"] == False
|
|
||||||
and x["FromPort"] <= 22
|
|
||||||
and x["ToPort"] >= 22
|
|
||||||
and x.get("CidrIpv4") == "0.0.0.0/0",
|
|
||||||
response["SecurityGroupRules"],
|
|
||||||
)
|
)
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources = list(
|
def restricted_ssh(self):
|
||||||
set(
|
non_compliant_resources = [
|
||||||
[
|
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
||||||
|
for resource in filter(
|
||||||
|
lambda x: x["IsEgress"] == False
|
||||||
|
and x["FromPort"] <= 22
|
||||||
|
and x["ToPort"] >= 22
|
||||||
|
and x.get("CidrIpv4") == "0.0.0.0/0",
|
||||||
|
self.security_group_rules,
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
compliant_resources = list(
|
||||||
|
set(
|
||||||
|
[
|
||||||
|
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
||||||
|
for resource in self.security_group_rules
|
||||||
|
]
|
||||||
|
)
|
||||||
|
- set(non_compliant_resources)
|
||||||
|
)
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def restricted_common_ports(self):
|
||||||
|
common_ports = [
|
||||||
|
22, # SSH
|
||||||
|
80, # HTTP
|
||||||
|
3306, # MySQL
|
||||||
|
3389, # RDP
|
||||||
|
5432, # PostgreSQL
|
||||||
|
6379, # Redis
|
||||||
|
11211, # Memcached
|
||||||
|
]
|
||||||
|
|
||||||
|
non_compliant_resources = [
|
||||||
|
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
||||||
|
for resource in filter(
|
||||||
|
lambda x: x["IsEgress"] == False
|
||||||
|
and x["FromPort"] in common_ports
|
||||||
|
and x["ToPort"] in common_ports
|
||||||
|
and x.get("PrefixListId") is None,
|
||||||
|
self.security_group_rules,
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
compliant_resources = list(
|
||||||
|
set(
|
||||||
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
||||||
for resource in response["SecurityGroupRules"]
|
for resource in self.security_group_rules
|
||||||
]
|
)
|
||||||
|
- set(non_compliant_resources)
|
||||||
)
|
)
|
||||||
- set(non_compliant_resources)
|
|
||||||
)
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
def restricted_common_ports():
|
passed=not non_compliant_resources,
|
||||||
common_ports = [
|
compliant_resources=compliant_resources,
|
||||||
22, # SSH
|
non_compliant_resources=non_compliant_resources,
|
||||||
80, # HTTP
|
|
||||||
3306, # MySQL
|
|
||||||
3389, # RDP
|
|
||||||
5432, # PostgreSQL
|
|
||||||
6379, # Redis
|
|
||||||
11211, # Memcached
|
|
||||||
]
|
|
||||||
response = ec2.describe_security_group_rules()
|
|
||||||
|
|
||||||
non_compliant_resources = [
|
|
||||||
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
|
||||||
for resource in filter(
|
|
||||||
lambda x: x["IsEgress"] == False
|
|
||||||
and x["FromPort"] in common_ports
|
|
||||||
and x["ToPort"] in common_ports
|
|
||||||
and x.get("PrefixListId") is None,
|
|
||||||
response["SecurityGroupRules"],
|
|
||||||
)
|
)
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources = list(
|
def subnet_auto_assign_public_ip_disabled(self):
|
||||||
set(
|
response = self.ec2.describe_subnets()
|
||||||
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
|
||||||
for resource in response["SecurityGroupRules"]
|
non_compliant_resources = [
|
||||||
|
resource["SubnetId"]
|
||||||
|
for resource in filter(
|
||||||
|
lambda x: x["MapPublicIpOnLaunch"], response["Subnets"]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
compliant_resources = list(
|
||||||
|
set(resource["SubnetId"] for resource in response["Subnets"])
|
||||||
|
- set(non_compliant_resources)
|
||||||
)
|
)
|
||||||
- set(non_compliant_resources)
|
|
||||||
)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def subnet_auto_assign_public_ip_disabled():
|
|
||||||
response = ec2.describe_subnets()
|
|
||||||
|
|
||||||
non_compliant_resources = [
|
|
||||||
resource["SubnetId"]
|
|
||||||
for resource in filter(lambda x: x["MapPublicIpOnLaunch"], response["Subnets"])
|
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources = list(
|
|
||||||
set(resource["SubnetId"] for resource in response["Subnets"])
|
|
||||||
- set(non_compliant_resources)
|
|
||||||
)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def vpc_default_security_group_closed():
|
|
||||||
response = ec2.describe_security_groups(
|
|
||||||
Filters=[{"Name": "group-name", "Values": ["default"]}]
|
|
||||||
)
|
|
||||||
|
|
||||||
non_compliant_resources = [
|
|
||||||
resource["GroupId"]
|
|
||||||
for resource in filter(
|
|
||||||
lambda x: x["IpPermissions"] or x["IpPermissionsEgress"],
|
|
||||||
response["SecurityGroups"],
|
|
||||||
)
|
)
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources = list(
|
def vpc_default_security_group_closed(self):
|
||||||
set(resource["GroupId"] for resource in response["SecurityGroups"])
|
response = self.ec2.describe_security_groups(
|
||||||
- set(non_compliant_resources)
|
Filters=[{"Name": "group-name", "Values": ["default"]}]
|
||||||
)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def vpc_flow_logs_enabled():
|
|
||||||
response = ec2.describe_flow_logs()
|
|
||||||
flow_log_enabled_vpcs = [
|
|
||||||
resource["ResourceId"] for resource in response["FlowLogs"]
|
|
||||||
]
|
|
||||||
|
|
||||||
response = ec2.describe_vpcs()
|
|
||||||
|
|
||||||
non_compliant_resources = [
|
|
||||||
resource["VpcId"]
|
|
||||||
for resource in filter(
|
|
||||||
lambda x: x["VpcId"] not in flow_log_enabled_vpcs, response["Vpcs"]
|
|
||||||
)
|
)
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources = list(
|
non_compliant_resources = [
|
||||||
set(resource["VpcId"] for resource in response["Vpcs"])
|
resource["GroupId"]
|
||||||
- set(non_compliant_resources)
|
for resource in filter(
|
||||||
)
|
lambda x: x["IpPermissions"] or x["IpPermissionsEgress"],
|
||||||
|
response["SecurityGroups"],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
return RuleCheckResult(
|
compliant_resources = list(
|
||||||
passed=not non_compliant_resources,
|
set(resource["GroupId"] for resource in response["SecurityGroups"])
|
||||||
compliant_resources=compliant_resources,
|
- set(non_compliant_resources)
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def vpc_network_acl_unused_check():
|
|
||||||
response = ec2.describe_network_acls()
|
|
||||||
|
|
||||||
non_compliant_resources = [
|
|
||||||
resource["NetworkAclId"]
|
|
||||||
for resource in filter(lambda x: not x["Associations"], response["NetworkAcls"])
|
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources = list(
|
|
||||||
set(resource["NetworkAclId"] for resource in response["NetworkAcls"])
|
|
||||||
- set(non_compliant_resources)
|
|
||||||
)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def vpc_peering_dns_resolution_check():
|
|
||||||
response = ec2.describe_vpc_peering_connections()
|
|
||||||
|
|
||||||
non_compliant_resources = [
|
|
||||||
resource["VpcPeeringConnectionId"]
|
|
||||||
for resource in filter(
|
|
||||||
lambda x: x["Status"]["Code"] not in ["deleted", "deleting"]
|
|
||||||
and (
|
|
||||||
not x["AccepterVpcInfo"].get("PeeringOptions")
|
|
||||||
or not x["AccepterVpcInfo"]["PeeringOptions"][
|
|
||||||
"AllowDnsResolutionFromRemoteVpc"
|
|
||||||
]
|
|
||||||
or not x["RequesterVpcInfo"]["PeeringOptions"][
|
|
||||||
"AllowDnsResolutionFromRemoteVpc"
|
|
||||||
]
|
|
||||||
),
|
|
||||||
response["VpcPeeringConnections"],
|
|
||||||
)
|
)
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources = list(
|
return RuleCheckResult(
|
||||||
set(
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def vpc_flow_logs_enabled(self):
|
||||||
|
response = self.ec2.describe_flow_logs()
|
||||||
|
flow_log_enabled_vpcs = [
|
||||||
|
resource["ResourceId"] for resource in response["FlowLogs"]
|
||||||
|
]
|
||||||
|
|
||||||
|
response = self.ec2.describe_vpcs()
|
||||||
|
|
||||||
|
non_compliant_resources = [
|
||||||
|
resource["VpcId"]
|
||||||
|
for resource in filter(
|
||||||
|
lambda x: x["VpcId"] not in flow_log_enabled_vpcs, response["Vpcs"]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
compliant_resources = list(
|
||||||
|
set(resource["VpcId"] for resource in response["Vpcs"])
|
||||||
|
- set(non_compliant_resources)
|
||||||
|
)
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def vpc_network_acl_unused_check(self):
|
||||||
|
response = self.ec2.describe_network_acls()
|
||||||
|
|
||||||
|
non_compliant_resources = [
|
||||||
|
resource["NetworkAclId"]
|
||||||
|
for resource in filter(
|
||||||
|
lambda x: not x["Associations"], response["NetworkAcls"]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
compliant_resources = list(
|
||||||
|
set(resource["NetworkAclId"] for resource in response["NetworkAcls"])
|
||||||
|
- set(non_compliant_resources)
|
||||||
|
)
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def vpc_peering_dns_resolution_check(self):
|
||||||
|
response = self.ec2.describe_vpc_peering_connections()
|
||||||
|
|
||||||
|
non_compliant_resources = [
|
||||||
resource["VpcPeeringConnectionId"]
|
resource["VpcPeeringConnectionId"]
|
||||||
for resource in response["VpcPeeringConnections"]
|
for resource in filter(
|
||||||
|
lambda x: x["Status"]["Code"] not in ["deleted", "deleting"]
|
||||||
|
and (
|
||||||
|
not x["AccepterVpcInfo"].get("PeeringOptions")
|
||||||
|
or not x["AccepterVpcInfo"]["PeeringOptions"][
|
||||||
|
"AllowDnsResolutionFromRemoteVpc"
|
||||||
|
]
|
||||||
|
or not x["RequesterVpcInfo"]["PeeringOptions"][
|
||||||
|
"AllowDnsResolutionFromRemoteVpc"
|
||||||
|
]
|
||||||
|
),
|
||||||
|
response["VpcPeeringConnections"],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
compliant_resources = list(
|
||||||
|
set(
|
||||||
|
resource["VpcPeeringConnectionId"]
|
||||||
|
for resource in response["VpcPeeringConnections"]
|
||||||
|
)
|
||||||
|
- set(non_compliant_resources)
|
||||||
)
|
)
|
||||||
- set(non_compliant_resources)
|
|
||||||
)
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def vpc_sg_open_only_to_authorized_ports():
|
|
||||||
response = ec2.describe_security_group_rules()
|
|
||||||
|
|
||||||
authorized_port = [
|
|
||||||
# 80
|
|
||||||
]
|
|
||||||
|
|
||||||
non_compliant_resources = [
|
|
||||||
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
|
||||||
for resource in filter(
|
|
||||||
lambda x: x["IsEgress"] == False
|
|
||||||
and (x.get("CidrIpv4") == "0.0.0.0/0" or x.get("CidrIpv6") == "::/0")
|
|
||||||
and x["FromPort"] not in authorized_port
|
|
||||||
and x["ToPort"] not in authorized_port,
|
|
||||||
response["SecurityGroupRules"],
|
|
||||||
)
|
)
|
||||||
]
|
|
||||||
|
|
||||||
compliant_resources = list(
|
def vpc_sg_open_only_to_authorized_ports(self):
|
||||||
set(
|
authorized_port = [
|
||||||
|
# 80
|
||||||
|
]
|
||||||
|
|
||||||
|
non_compliant_resources = [
|
||||||
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
||||||
for resource in response["SecurityGroupRules"]
|
for resource in filter(
|
||||||
)
|
lambda x: x["IsEgress"] == False
|
||||||
- set(non_compliant_resources)
|
and (x.get("CidrIpv4") == "0.0.0.0/0" or x.get("CidrIpv6") == "::/0")
|
||||||
)
|
and x["FromPort"] not in authorized_port
|
||||||
|
and x["ToPort"] not in authorized_port,
|
||||||
|
self.security_group_rules,
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
return RuleCheckResult(
|
compliant_resources = list(
|
||||||
passed=not non_compliant_resources,
|
set(
|
||||||
compliant_resources=compliant_resources,
|
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
|
||||||
non_compliant_resources=non_compliant_resources,
|
for resource in self.security_group_rules
|
||||||
)
|
)
|
||||||
|
- set(non_compliant_resources)
|
||||||
|
)
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rule_checker = VPCRuleChecker
|
||||||
|
@ -1,120 +1,144 @@
|
|||||||
from models import RuleCheckResult
|
from models import RuleCheckResult, RuleChecker
|
||||||
|
from functools import cached_property
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
|
||||||
client = boto3.client("wafv2")
|
class WAFv2RuleChecker(RuleChecker):
|
||||||
global_client = boto3.client("wafv2", region_name="us-east-1")
|
def __init__(self):
|
||||||
|
self.client = boto3.client("wafv2")
|
||||||
|
self.global_client = boto3.client("wafv2", region_name="us-east-1")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def regional_web_acls(self):
|
||||||
|
return self.client.list_web_acls(Scope="REGIONAL")["WebACLs"]
|
||||||
|
|
||||||
def wafv2_logging_enabled():
|
@cached_property
|
||||||
compliant_resources = []
|
def cloudfront_web_acls(self):
|
||||||
non_compliant_resources = []
|
return self.global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
|
||||||
regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"]
|
|
||||||
cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
|
|
||||||
|
|
||||||
for web_acl in regional_web_acls:
|
@cached_property
|
||||||
try:
|
def regional_rule_groups(self):
|
||||||
configuration = client.get_logging_configuration(ResourceArn=web_acl["ARN"])
|
rule_groups = self.client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
|
||||||
compliant_resources.append(web_acl["ARN"])
|
return [
|
||||||
except Exception as e:
|
self.client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"]
|
||||||
if e.__class__.__name__ == "WAFNonexistentItemException":
|
for rule_group in rule_groups
|
||||||
non_compliant_resources.append(web_acl["ARN"])
|
]
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def cloudfront_rule_groups(self):
|
||||||
|
rule_groups = self.global_client.list_rule_groups(Scope="CLOUDFRONT")[
|
||||||
|
"RuleGroups"
|
||||||
|
]
|
||||||
|
return [
|
||||||
|
self.global_client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"]
|
||||||
|
for rule_group in rule_groups
|
||||||
|
]
|
||||||
|
|
||||||
|
def wafv2_logging_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for web_acl in self.regional_web_acls:
|
||||||
|
try:
|
||||||
|
configuration = self.client.get_logging_configuration(
|
||||||
|
ResourceArn=web_acl["ARN"]
|
||||||
|
)
|
||||||
|
compliant_resources.append(web_acl["ARN"])
|
||||||
|
except Exception as e:
|
||||||
|
if e.__class__.__name__ == "WAFNonexistentItemException":
|
||||||
|
non_compliant_resources.append(web_acl["ARN"])
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
for web_acl in self.cloudfront_web_acls:
|
||||||
|
try:
|
||||||
|
configuration = self.global_client.get_logging_configuration(
|
||||||
|
ResourceArn=web_acl["ARN"]
|
||||||
|
)
|
||||||
|
compliant_resources.append(web_acl["ARN"])
|
||||||
|
except Exception as e:
|
||||||
|
if e.__class__.__name__ == "WAFNonexistentItemException":
|
||||||
|
non_compliant_resources.append(web_acl["ARN"])
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def wafv2_rulegroup_logging_enabled(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for rule_group in self.regional_rule_groups:
|
||||||
|
if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
|
||||||
|
compliant_resources.append(rule_group["ARN"])
|
||||||
else:
|
else:
|
||||||
raise e
|
non_compliant_resources.append(rule_group["ARN"])
|
||||||
|
|
||||||
for web_acl in cloudfront_web_acls:
|
for rule_group in self.cloudfront_rule_groups:
|
||||||
try:
|
if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
|
||||||
configuration = global_client.get_logging_configuration(ResourceArn=web_acl["ARN"])
|
compliant_resources.append(rule_group["ARN"])
|
||||||
compliant_resources.append(web_acl["ARN"])
|
|
||||||
except Exception as e:
|
|
||||||
if e.__class__.__name__ == "WAFNonexistentItemException":
|
|
||||||
non_compliant_resources.append(web_acl["ARN"])
|
|
||||||
else:
|
else:
|
||||||
raise e
|
non_compliant_resources.append(rule_group["ARN"])
|
||||||
|
|
||||||
return RuleCheckResult(
|
return RuleCheckResult(
|
||||||
passed=not non_compliant_resources,
|
passed=not non_compliant_resources,
|
||||||
compliant_resources=compliant_resources,
|
compliant_resources=compliant_resources,
|
||||||
non_compliant_resources=non_compliant_resources,
|
non_compliant_resources=non_compliant_resources,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def wafv2_rulegroup_not_empty(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for rule_group in self.regional_rule_groups:
|
||||||
|
if len(rule_group["Rules"]) > 0:
|
||||||
|
compliant_resources.append(rule_group["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(rule_group["ARN"])
|
||||||
|
|
||||||
|
for rule_group in self.cloudfront_rule_groups:
|
||||||
|
if len(rule_group["Rules"]) > 0:
|
||||||
|
compliant_resources.append(rule_group["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(rule_group["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
def wafv2_webacl_not_empty(self):
|
||||||
|
compliant_resources = []
|
||||||
|
non_compliant_resources = []
|
||||||
|
|
||||||
|
for web_acl in self.regional_web_acls:
|
||||||
|
response = self.client.get_web_acl(
|
||||||
|
Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL"
|
||||||
|
)
|
||||||
|
if len(response["WebACL"]["Rules"]) > 0:
|
||||||
|
compliant_resources.append(web_acl["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(web_acl["ARN"])
|
||||||
|
|
||||||
|
for web_acl in self.cloudfront_web_acls:
|
||||||
|
response = self.global_client.get_web_acl(
|
||||||
|
Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT"
|
||||||
|
)
|
||||||
|
if len(response["WebACL"]["Rules"]) > 0:
|
||||||
|
compliant_resources.append(web_acl["ARN"])
|
||||||
|
else:
|
||||||
|
non_compliant_resources.append(web_acl["ARN"])
|
||||||
|
|
||||||
|
return RuleCheckResult(
|
||||||
|
passed=not non_compliant_resources,
|
||||||
|
compliant_resources=compliant_resources,
|
||||||
|
non_compliant_resources=non_compliant_resources,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def wafv2_rulegroup_logging_enabled():
|
rule_checker = WAFv2RuleChecker
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
|
|
||||||
cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"]
|
|
||||||
|
|
||||||
|
|
||||||
for rule_group in regional_rule_groups:
|
|
||||||
configuration = client.get_rule_group(ARN=rule_group["ARN"])
|
|
||||||
if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
|
|
||||||
compliant_resources.append(rule_group["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(rule_group["ARN"])
|
|
||||||
|
|
||||||
for rule_group in cloudfront_rule_groups:
|
|
||||||
configuration = global_client.get_rule_group(ARN=rule_group["ARN"])
|
|
||||||
if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
|
|
||||||
compliant_resources.append(rule_group["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(rule_group["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def wafv2_rulegroup_not_empty():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
|
|
||||||
cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"]
|
|
||||||
|
|
||||||
for rule_group in regional_rule_groups:
|
|
||||||
configuration = client.get_rule_group(ARN=rule_group["ARN"])
|
|
||||||
if len(configuration["RuleGroup"]["Rules"]) > 0:
|
|
||||||
compliant_resources.append(rule_group["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(rule_group["ARN"])
|
|
||||||
|
|
||||||
for rule_group in cloudfront_rule_groups:
|
|
||||||
configuration = global_client.get_rule_group(ARN=rule_group["ARN"])
|
|
||||||
if len(configuration["RuleGroup"]["Rules"]) > 0:
|
|
||||||
compliant_resources.append(rule_group["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(rule_group["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def wafv2_webacl_not_empty():
|
|
||||||
compliant_resources = []
|
|
||||||
non_compliant_resources = []
|
|
||||||
regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"]
|
|
||||||
cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
|
|
||||||
|
|
||||||
for web_acl in regional_web_acls:
|
|
||||||
response = client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL")
|
|
||||||
if len(response["WebACL"]["Rules"]) > 0:
|
|
||||||
compliant_resources.append(web_acl["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(web_acl["ARN"])
|
|
||||||
for web_acl in cloudfront_web_acls:
|
|
||||||
response = global_client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT")
|
|
||||||
if len(response["WebACL"]["Rules"]) > 0:
|
|
||||||
compliant_resources.append(web_acl["ARN"])
|
|
||||||
else:
|
|
||||||
non_compliant_resources.append(web_acl["ARN"])
|
|
||||||
|
|
||||||
return RuleCheckResult(
|
|
||||||
passed=not non_compliant_resources,
|
|
||||||
compliant_resources=compliant_resources,
|
|
||||||
non_compliant_resources=non_compliant_resources,
|
|
||||||
)
|
|
||||||
|
Loading…
Reference in New Issue
Block a user