Compare commits

..

No commits in common. "32345c2703778950ca9cbe7a2a03c7acb0ed7424" and "eae572c5ba643b71b8970d7d32207a9c9e7c4c98" have entirely different histories.

31 changed files with 2622 additions and 3654 deletions

View File

@ -1,74 +1,4 @@
{ {
"VPC": {
"enabled": true,
"rules": {
"ec2-transit-gateway-auto-vpc-attach-disabled": {
"enabled": true,
"level": 1
},
"restricted-ssh": {
"enabled": true,
"level": 2
},
"restricted-common-ports": {
"enabled": true,
"level": 2
},
"subnet-auto-assign-public-ip-disabled": {
"enabled": true,
"level": 1
},
"vpc-default-security-group-closed": {
"enabled": true,
"level": 2
},
"vpc-flow-logs-enabled": {
"enabled": true,
"level": 2
},
"vpc-network-acl-unused-check": {
"enabled": true,
"level": 2
},
"vpc-peering-dns-resolution-check": {
"enabled": true,
"level": 2
},
"vpc-sg-open-only-to-authorized-ports": {
"enabled": true,
"level": 2
}
}
},
"CloudFront": {
"enabled": true,
"rules": {
"cloudfront-accesslogs-enabled": {
"enabled": true,
"level": 2
},
"cloudfront-associated-with-waf": {
"enabled": true,
"level": 2
},
"cloudfront-default-root-object-configured": {
"enabled": true,
"level": 2
},
"cloudfront-no-deprecated-ssl-protocols": {
"enabled": true,
"level": 2
},
"cloudfront-s3-origin-access-control-enabled": {
"enabled": true,
"level": 2
},
"cloudfront-viewer-policy-https": {
"enabled": true,
"level": 2
}
}
},
"ALB": { "ALB": {
"enabled": true, "enabled": true,
"rules": { "rules": {
@ -123,180 +53,6 @@
} }
} }
}, },
"EC2": {
"enabled": true,
"rules": {
"ec2-ebs-encryption-by-default": {
"enabled": true,
"level": 2
},
"ec2-imdsv2-check": {
"enabled": true,
"level": 2
},
"ec2-instance-detailed-monitoring-enabled": {
"enabled": true,
"level": 2
},
"ec2-instance-managed-by-systems-manager": {
"enabled": true,
"level": 2
},
"ec2-instance-profile-attached": {
"enabled": true,
"level": 2
},
"ec2-no-amazon-key-pair": {
"enabled": true,
"level": 1
},
"ec2-stopped-instance": {
"enabled": true,
"level": 2
},
"ec2-token-hop-limit-check": {
"enabled": true,
"level": 2
}
}
},
"ASG": {
"enabled": true,
"rules": {
"autoscaling-group-elb-healthcheck-required": {
"enabled": true,
"level": 2
},
"autoscaling-multiple-az": {
"enabled": true,
"level": 2
},
"autoscaling-launch-template": {
"enabled": true,
"level": 2
}
}
},
"ECS": {
"enabled": true,
"rules": {
"ecs-awsvpc-networking-enabled": {
"enabled": true,
"level": 2
},
"ecs-containers-nonprivileged": {
"enabled": true,
"level": 2
},
"ecs-containers-readonly-access": {
"enabled": true,
"level": 2
},
"ecs-container-insights-enabled": {
"enabled": true,
"level": 2
},
"ecs-fargate-latest-platform-version": {
"enabled": true,
"level": 2
},
"ecs-task-definition-log-configuration": {
"enabled": true,
"level": 2
},
"ecs-task-definition-memory-hard-limit": {
"enabled": true,
"level": 1
},
"ecs-task-definition-nonroot-user": {
"enabled": true,
"level": 1
}
}
},
"EKS": {
"enabled": true,
"rules": {
"eks-cluster-logging-enabled": {
"enabled": true,
"level": 2
},
"eks-cluster-secrets-encrypted": {
"enabled": true,
"level": 2
},
"eks-endpoint-no-public-access": {
"enabled": true,
"level": 1
}
}
},
"ECR": {
"enabled": true,
"rules": {
"ecr-private-image-scanning-enabled": {
"enabled": true,
"level": 2
},
"ecr-private-lifecycle-policy-configured": {
"enabled": true,
"level": 2
},
"ecr-private-tag-immutability-enabled": {
"enabled": true,
"level": 2
},
"ecr-kms-encryption-1": {
"enabled": true,
"level": 2
}
}
},
"S3": {
"enabled": true,
"rules": {
"s3-access-point-in-vpc-only": {
"enabled": true,
"level": 1
},
"s3-bucket-default-lock-enabled": {
"enabled": true,
"level": 1
},
"s3-bucket-level-public-access-prohibited": {
"enabled": true,
"level": 2
},
"s3-bucket-logging-enabled": {
"enabled": true,
"level": 1
},
"s3-bucket-ssl-requests-only": {
"enabled": true,
"level": 2
},
"s3-bucket-versioning-enabled": {
"enabled": true,
"level": 2
},
"s3-default-encryption-kms": {
"enabled": true,
"level": 2
},
"s3-event-notifications-enabled": {
"enabled": true,
"level": 1
},
"s3-last-backup-recovery-point-created": {
"enabled": true,
"level": 1
},
"s3-lifecycle-policy-check": {
"enabled": true,
"level": 2
}
}
},
"RDS": { "RDS": {
"enabled": true, "enabled": true,
"rules": { "rules": {
@ -358,30 +114,123 @@
} }
} }
}, },
"ElastiCache": { "ASG": {
"enabled": true, "enabled": true,
"rules": { "rules": {
"elasticache-auto-minor-version-upgrade-check": { "autoscaling-group-elb-healthcheck-required": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
}, },
"elasticache-redis-cluster-automatic-backup-check": { "autoscaling-multiple-az": {
"enabled": true,
"level": 2
}
}
},
"EC2": {
"enabled": true,
"rules": {
"autoscaling-launch-template": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
}, },
"elasticache-repl-grp-auto-failover-enabled": { "ec2-ebs-encryption-by-default": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
}, },
"elasticache-repl-grp-encrypted-at-rest": { "ec2-imdsv2-check": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
}, },
"elasticache-repl-grp-encrypted-in-transit": { "ec2-instance-detailed-monitoring-enabled": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
}, },
"elasticache-subnet-group-check": { "ec2-instance-managed-by-systems-manager": {
"enabled": true,
"level": 2
},
"ec2-instance-profile-attached": {
"enabled": true,
"level": 2
},
"ec2-no-amazon-key-pair": {
"enabled": true,
"level": 1
},
"ec2-stopped-instance": {
"enabled": true,
"level": 2
},
"ec2-token-hop-limit-check": {
"enabled": true,
"level": 2
}
}
},
"CloudFront": {
"enabled": true,
"rules": {
"cloudfront-accesslogs-enabled": {
"enabled": true,
"level": 2
},
"cloudfront-associated-with-waf": {
"enabled": true,
"level": 2
},
"cloudfront-default-root-object-configured": {
"enabled": true,
"level": 2
},
"cloudfront-no-deprecated-ssl-protocols": {
"enabled": true,
"level": 2
},
"cloudfront-s3-origin-access-control-enabled": {
"enabled": true,
"level": 2
},
"cloudfront-viewer-policy-https": {
"enabled": true,
"level": 2
}
}
},
"KMS": {
"enabled": true,
"rules": {
"cmk-backing-key-rotation-enabled": {
"enabled": true,
"level": 2
}
}
},
"CodeSeries": {
"enabled": true,
"rules": {
"codebuild-project-environment-privileged-check": {
"enabled": true,
"level": 1
},
"codebuild-project-logging-enabled": {
"enabled": true,
"level": 2
},
"codedeploy-auto-rollback-monitor-enabled": {
"enabled": true,
"level": 2
}
}
},
"CloudWatch": {
"enabled": true,
"rules": {
"cw-loggroup-retention-period-check": {
"enabled": true,
"level": 2
},
"cloudwatch-alarm-settings-check": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
} }
@ -416,6 +265,64 @@
} }
} }
}, },
"ECR": {
"enabled": true,
"rules": {
"ecr-private-image-scanning-enabled": {
"enabled": true,
"level": 2
},
"ecr-private-lifecycle-policy-configured": {
"enabled": true,
"level": 2
},
"ecr-private-tag-immutability-enabled": {
"enabled": true,
"level": 2
},
"ecr-kms-encryption-1": {
"enabled": true,
"level": 2
}
}
},
"ECS": {
"enabled": true,
"rules": {
"ecs-awsvpc-networking-enabled": {
"enabled": true,
"level": 2
},
"ecs-containers-nonprivileged": {
"enabled": true,
"level": 2
},
"ecs-containers-readonly-access": {
"enabled": true,
"level": 2
},
"ecs-container-insights-enabled": {
"enabled": true,
"level": 2
},
"ecs-fargate-latest-platform-version": {
"enabled": true,
"level": 2
},
"ecs-task-definition-log-configuration": {
"enabled": true,
"level": 2
},
"ecs-task-definition-memory-hard-limit": {
"enabled": true,
"level": 1
},
"ecs-task-definition-nonroot-user": {
"enabled": true,
"level": 1
}
}
},
"EFS": { "EFS": {
"enabled": true, "enabled": true,
"rules": { "rules": {
@ -441,6 +348,69 @@
} }
} }
}, },
"EKS": {
"enabled": true,
"rules": {
"eks-cluster-logging-enabled": {
"enabled": true,
"level": 2
},
"eks-cluster-secrets-encrypted": {
"enabled": true,
"level": 2
},
"eks-endpoint-no-public-access": {
"enabled": true,
"level": 1
}
}
},
"ElastiCache": {
"enabled": true,
"rules": {
"elasticache-auto-minor-version-upgrade-check": {
"enabled": true,
"level": 2
},
"elasticache-redis-cluster-automatic-backup-check": {
"enabled": true,
"level": 2
},
"elasticache-repl-grp-auto-failover-enabled": {
"enabled": true,
"level": 2
},
"elasticache-repl-grp-encrypted-at-rest": {
"enabled": true,
"level": 2
},
"elasticache-repl-grp-encrypted-in-transit": {
"enabled": true,
"level": 2
},
"elasticache-subnet-group-check": {
"enabled": true,
"level": 2
}
}
},
"IAM": {
"enabled": true,
"rules": {
"iam-policy-no-statements-with-admin-access": {
"enabled": true,
"level": 1
},
"iam-policy-no-statements-with-full-access": {
"enabled": true,
"level": 1
},
"iam-role-managed-policy-check": {
"enabled": true,
"level": 1
}
}
},
"Lambda": { "Lambda": {
"enabled": true, "enabled": true,
"rules": { "rules": {
@ -462,23 +432,55 @@
} }
} }
}, },
"CloudWatch": { "Tags": {
"enabled": true, "enabled": true,
"rules": { "rules": {
"cw-loggroup-retention-period-check": { "required-tags": {
"enabled": true,
"level": 2
},
"cloudwatch-alarm-settings-check": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
} }
} }
}, },
"KMS": { "S3": {
"enabled": true, "enabled": true,
"rules": { "rules": {
"cmk-backing-key-rotation-enabled": { "s3-access-point-in-vpc-only": {
"enabled": true,
"level": 1
},
"s3-bucket-default-lock-enabled": {
"enabled": true,
"level": 1
},
"s3-bucket-level-public-access-prohibited": {
"enabled": true,
"level": 2
},
"s3-bucket-logging-enabled": {
"enabled": true,
"level": 1
},
"s3-bucket-ssl-requests-only": {
"enabled": true,
"level": 2
},
"s3-bucket-versioning-enabled": {
"enabled": true,
"level": 2
},
"s3-default-encryption-kms": {
"enabled": true,
"level": 2
},
"s3-event-notifications-enabled": {
"enabled": true,
"level": 1
},
"s3-last-backup-recovery-point-created": {
"enabled": true,
"level": 1
},
"s3-lifecycle-policy-check": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
} }
@ -501,6 +503,69 @@
} }
} }
}, },
"Security Hub": {
"enabled": true,
"rules": {
"securityhub-enabled": {
"enabled": true,
"level": 1
}
}
},
"SNS": {
"enabled": true,
"rules": {
"sns-encrypted-kms": {
"enabled": true,
"level": 2
},
"sns-topic-message-delivery-notification-enabled": {
"enabled": true,
"level": 2
}
}
},
"VPC": {
"enabled": true,
"rules": {
"ec2-transit-gateway-auto-vpc-attach-disabled": {
"enabled": true,
"level": 1
},
"restricted-ssh": {
"enabled": true,
"level": 2
},
"restricted-common-ports": {
"enabled": true,
"level": 2
},
"subnet-auto-assign-public-ip-disabled": {
"enabled": true,
"level": 1
},
"vpc-default-security-group-closed": {
"enabled": true,
"level": 2
},
"vpc-flow-logs-enabled": {
"enabled": true,
"level": 2
},
"vpc-network-acl-unused-check": {
"enabled": true,
"level": 2
},
"vpc-peering-dns-resolution-check": {
"enabled": true,
"level": 2
},
"vpc-sg-open-only-to-authorized-ports": {
"enabled": true,
"level": 2
}
}
},
"WAFv2": { "WAFv2": {
"enabled": true, "enabled": true,
"rules": { "rules": {
@ -521,61 +586,5 @@
"level": 2 "level": 2
} }
} }
},
"IAM": {
"enabled": true,
"rules": {
"iam-policy-no-statements-with-admin-access": {
"enabled": true,
"level": 1
},
"iam-policy-no-statements-with-full-access": {
"enabled": true,
"level": 1
},
"iam-role-managed-policy-check": {
"enabled": true,
"level": 1
}
}
},
"CodeSeries": {
"enabled": true,
"rules": {
"codebuild-project-environment-privileged-check": {
"enabled": true,
"level": 1
},
"codebuild-project-logging-enabled": {
"enabled": true,
"level": 2
},
"codedeploy-auto-rollback-monitor-enabled": {
"enabled": true,
"level": 2
}
}
},
"SNS": {
"enabled": true,
"rules": {
"sns-encrypted-kms": {
"enabled": true,
"level": 2
},
"sns-topic-message-delivery-notification-enabled": {
"enabled": true,
"level": 2
}
}
},
"Security Hub": {
"enabled": true,
"rules": {
"securityhub-enabled": {
"enabled": true,
"level": 1
}
}
} }
} }

View File

@ -1,581 +0,0 @@
{
"VPC": {
"enabled": true,
"rules": {
"ec2-transit-gateway-auto-vpc-attach-disabled": {
"enabled": false,
"level": 1
},
"restricted-ssh": {
"enabled": true,
"level": 2
},
"restricted-common-ports": {
"enabled": true,
"level": 2
},
"subnet-auto-assign-public-ip-disabled": {
"enabled": false,
"level": 1
},
"vpc-default-security-group-closed": {
"enabled": true,
"level": 2
},
"vpc-flow-logs-enabled": {
"enabled": true,
"level": 2
},
"vpc-network-acl-unused-check": {
"enabled": false,
"level": 2
},
"vpc-peering-dns-resolution-check": {
"enabled": false,
"level": 2
},
"vpc-sg-open-only-to-authorized-ports": {
"enabled": true,
"level": 2
}
}
},
"CloudFront": {
"enabled": true,
"rules": {
"cloudfront-accesslogs-enabled": {
"enabled": true,
"level": 2
},
"cloudfront-associated-with-waf": {
"enabled": true,
"level": 2
},
"cloudfront-default-root-object-configured": {
"enabled": true,
"level": 2
},
"cloudfront-no-deprecated-ssl-protocols": {
"enabled": true,
"level": 2
},
"cloudfront-s3-origin-access-control-enabled": {
"enabled": true,
"level": 2
},
"cloudfront-viewer-policy-https": {
"enabled": true,
"level": 2
}
}
},
"ALB": {
"enabled": true,
"rules": {
"alb-http-drop-invalid-header-enabled": {
"enabled": false,
"level": 2
},
"alb-waf-enabled": {
"enabled": true,
"level": 2
},
"elb-cross-zone-load-balancing-enabled": {
"enabled": false,
"level": 2
},
"elb-deletion-protection-enabled": {
"enabled": true,
"level": 1
},
"elb-logging-enabled": {
"enabled": true,
"level": 2
}
}
},
"API GW": {
"enabled": true,
"rules": {
"api-gwv2-access-logs-enabled": {
"enabled": true,
"level": 2
},
"api-gwv2-authorization-type-configured": {
"enabled": true,
"level": 1
},
"api-gw-associated-with-waf": {
"enabled": true,
"level": 2
},
"api-gw-cache-enabled-and-encrypted": {
"enabled": true,
"level": 2
},
"api-gw-execution-logging-enabled": {
"enabled": true,
"level": 2
},
"api-gw-xray-enabled": {
"enabled": true,
"level": 1
}
}
},
"EC2": {
"enabled": true,
"rules": {
"ec2-ebs-encryption-by-default": {
"enabled": true,
"level": 2
},
"ec2-imdsv2-check": {
"enabled": true,
"level": 2
},
"ec2-instance-detailed-monitoring-enabled": {
"enabled": true,
"level": 2
},
"ec2-instance-managed-by-systems-manager": {
"enabled": false,
"level": 2
},
"ec2-instance-profile-attached": {
"enabled": false,
"level": 2
},
"ec2-no-amazon-key-pair": {
"enabled": true,
"level": 1
},
"ec2-stopped-instance": {
"enabled": true,
"level": 2
},
"ec2-token-hop-limit-check": {
"enabled": true,
"level": 2
}
}
},
"ASG": {
"enabled": true,
"rules": {
"autoscaling-group-elb-healthcheck-required": {
"enabled": true,
"level": 2
},
"autoscaling-multiple-az": {
"enabled": true,
"level": 2
},
"autoscaling-launch-template": {
"enabled": true,
"level": 2
}
}
},
"ECS": {
"enabled": true,
"rules": {
"ecs-awsvpc-networking-enabled": {
"enabled": true,
"level": 2
},
"ecs-containers-nonprivileged": {
"enabled": true,
"level": 2
},
"ecs-containers-readonly-access": {
"enabled": false,
"level": 2
},
"ecs-container-insights-enabled": {
"enabled": true,
"level": 2
},
"ecs-fargate-latest-platform-version": {
"enabled": false,
"level": 2
},
"ecs-task-definition-log-configuration": {
"enabled": true,
"level": 2
},
"ecs-task-definition-memory-hard-limit": {
"enabled": false,
"level": 1
},
"ecs-task-definition-nonroot-user": {
"enabled": false,
"level": 1
}
}
},
"EKS": {
"enabled": true,
"rules": {
"eks-cluster-logging-enabled": {
"enabled": true,
"level": 2
},
"eks-cluster-secrets-encrypted": {
"enabled": true,
"level": 2
},
"eks-endpoint-no-public-access": {
"enabled": true,
"level": 1
}
}
},
"ECR": {
"enabled": true,
"rules": {
"ecr-private-image-scanning-enabled": {
"enabled": true,
"level": 2
},
"ecr-private-lifecycle-policy-configured": {
"enabled": true,
"level": 2
},
"ecr-private-tag-immutability-enabled": {
"enabled": true,
"level": 2
},
"ecr-kms-encryption-1": {
"enabled": true,
"level": 2
}
}
},
"S3": {
"enabled": true,
"rules": {
"s3-access-point-in-vpc-only": {
"enabled": false,
"level": 1
},
"s3-bucket-default-lock-enabled": {
"enabled": false,
"level": 1
},
"s3-bucket-level-public-access-prohibited": {
"enabled": true,
"level": 2
},
"s3-bucket-logging-enabled": {
"enabled": true,
"level": 1
},
"s3-bucket-ssl-requests-only": {
"enabled": true,
"level": 2
},
"s3-bucket-versioning-enabled": {
"enabled": true,
"level": 2
},
"s3-default-encryption-kms": {
"enabled": true,
"level": 2
},
"s3-event-notifications-enabled": {
"enabled": false,
"level": 1
},
"s3-last-backup-recovery-point-created": {
"enabled": false,
"level": 1
},
"s3-lifecycle-policy-check": {
"enabled": true,
"level": 2
}
}
},
"RDS": {
"enabled": true,
"rules": {
"aurora-last-backup-recovery-point-created": {
"enabled": true,
"level": 2
},
"aurora-mysql-backtracking-enabled": {
"enabled": true,
"level": 2
},
"db-instance-backup-enabled": {
"enabled": true,
"level": 2
},
"rds-cluster-auto-minor-version-upgrade-enable": {
"enabled": true,
"level": 2
},
"rds-cluster-default-admin-check": {
"enabled": true,
"level": 2
},
"rds-cluster-deletion-protection-enabled": {
"enabled": true,
"level": 1
},
"rds-cluster-encrypted-at-rest": {
"enabled": true,
"level": 2
},
"rds-cluster-iam-authentication-enabled": {
"enabled": true,
"level": 2
},
"rds-cluster-multi-az-enabled": {
"enabled": true,
"level": 2
},
"rds-db-security-group-not-allowed": {
"enabled": true,
"level": 2
},
"rds-enhanced-monitoring-enabled": {
"enabled": true,
"level": 2
},
"rds-instance-public-access-check": {
"enabled": true,
"level": 2
},
"rds-logging-enabled": {
"enabled": true,
"level": 2
},
"rds-snapshot-encrypted": {
"enabled": false,
"level": 2
}
}
},
"ElastiCache": {
"enabled": true,
"rules": {
"elasticache-auto-minor-version-upgrade-check": {
"enabled": true,
"level": 2
},
"elasticache-redis-cluster-automatic-backup-check": {
"enabled": true,
"level": 2
},
"elasticache-repl-grp-auto-failover-enabled": {
"enabled": true,
"level": 2
},
"elasticache-repl-grp-encrypted-at-rest": {
"enabled": true,
"level": 2
},
"elasticache-repl-grp-encrypted-in-transit": {
"enabled": true,
"level": 2
},
"elasticache-subnet-group-check": {
"enabled": false,
"level": 2
}
}
},
"DynamoDB": {
"enabled": true,
"rules": {
"dynamodb-autoscaling-enabled": {
"enabled": true,
"level": 2
},
"dynamodb-last-backup-recovery-point-created": {
"enabled": true,
"level": 2
},
"dynamodb-pitr-enabled": {
"enabled": true,
"level": 2
},
"dynamodb-table-deletion-protection-enabled": {
"enabled": true,
"level": 1
},
"dynamodb-table-encrypted-kms": {
"enabled": true,
"level": 2
},
"dynamodb-table-encryption-enabled": {
"enabled": true,
"level": 2
}
}
},
"EFS": {
"enabled": true,
"rules": {
"efs-access-point-enforce-root-directory": {
"enabled": true,
"level": 2
},
"efs-access-point-enforce-user-identity": {
"enabled": true,
"level": 2
},
"efs-automatic-backups-enabled": {
"enabled": true,
"level": 2
},
"efs-encrypted-check": {
"enabled": true,
"level": 2
},
"efs-mount-target-public-accessible": {
"enabled": false,
"level": 2
}
}
},
"Lambda": {
"enabled": true,
"rules": {
"lambda-dlq-check": {
"enabled": false,
"level": 1
},
"lambda-function-public-access-prohibited": {
"enabled": false,
"level": 2
},
"lambda-function-settings-check": {
"enabled": true,
"level": 2
},
"lambda-inside-vpc": {
"enabled": false,
"level": 1
}
}
},
"CloudWatch": {
"enabled": true,
"rules": {
"cw-loggroup-retention-period-check": {
"enabled": true,
"level": 2
},
"cloudwatch-alarm-settings-check": {
"enabled": false,
"level": 2
}
}
},
"KMS": {
"enabled": true,
"rules": {
"cmk-backing-key-rotation-enabled": {
"enabled": true,
"level": 2
}
}
},
"Secrets Manager": {
"enabled": true,
"rules": {
"secretsmanager-rotation-enabled-check": {
"enabled": true,
"level": 2
},
"secretsmanager-scheduled-rotation-success-check": {
"enabled": true,
"level": 1
},
"secretsmanager-secret-periodic-rotation": {
"enabled": true,
"level": 2
}
}
},
"WAFv2": {
"enabled": true,
"rules": {
"wafv2-logging-enabled": {
"enabled": true,
"level": 2
},
"wafv2-rulegroup-logging-enabled": {
"enabled": true,
"level": 2
},
"wafv2-rulegroup-not-empty": {
"enabled": true,
"level": 2
},
"wafv2-webacl-not-empty": {
"enabled": true,
"level": 2
}
}
},
"IAM": {
"enabled": false,
"rules": {
"iam-policy-no-statements-with-admin-access": {
"enabled": true,
"level": 1
},
"iam-policy-no-statements-with-full-access": {
"enabled": true,
"level": 1
},
"iam-role-managed-policy-check": {
"enabled": true,
"level": 1
}
}
},
"CodeSeries": {
"enabled": true,
"rules": {
"codebuild-project-environment-privileged-check": {
"enabled": true,
"level": 1
},
"codebuild-project-logging-enabled": {
"enabled": true,
"level": 2
},
"codedeploy-auto-rollback-monitor-enabled": {
"enabled": true,
"level": 2
}
}
},
"SNS": {
"enabled": true,
"rules": {
"sns-encrypted-kms": {
"enabled": true,
"level": 2
},
"sns-topic-message-delivery-notification-enabled": {
"enabled": true,
"level": 2
}
}
},
"Security Hub": {
"enabled": true,
"rules": {
"securityhub-enabled": {
"enabled": true,
"level": 1
}
}
}
}

View File

@ -1,3 +0,0 @@
resource,scope
sg-04e88ce667a9bac70 / sgr-0b5cea485d7e46045,restricted-common-ports
test
1 resource,scope
2 sg-04e88ce667a9bac70 / sgr-0b5cea485d7e46045,restricted-common-ports
3 test

77
main.py
View File

@ -1,7 +1,3 @@
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
import argparse
from InquirerLib import prompt from InquirerLib import prompt
from InquirerLib.InquirerPy.utils import InquirerPyKeybindings from InquirerLib.InquirerPy.utils import InquirerPyKeybindings
from InquirerLib.InquirerPy.base import Choice from InquirerLib.InquirerPy.base import Choice
@ -18,26 +14,6 @@ prompt_key_bindings: InquirerPyKeybindings = {
} }
def get_command_line_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--level",
help="Only perform checks if level <= rule_level. Default: 1",
type=int,
choices=[1, 2],
default=1,
)
parser.add_argument(
"--ruleset", help="Use predefined bp rule sets. Please provide filename."
)
parser.add_argument(
"--show-all",
help="Show all resources including compliant one.",
action="store_true",
)
return parser.parse_args()
def ask_services_to_enable(bp): def ask_services_to_enable(bp):
cli_questions = [ cli_questions = [
{ {
@ -57,43 +33,30 @@ def ask_services_to_enable(bp):
return bp return bp
def perform_bp_rules_check(bp, level=2): def perform_bp_rules_check(bp):
with ThreadPoolExecutor() as executor: for service_name, service in bp.items():
futures = [
executor.submit(_rule_check, service_name, service, level)
for service_name, service in bp.items()
]
[future.result() for future in futures]
return bp
def _rule_check(service_name, service, level):
now = datetime.now()
if not service["enabled"]: if not service["enabled"]:
return continue
if service_name == "Lambda": if service_name == "Lambda":
service_name = "_lambda" service_name = "_lambda"
rule_checker = getattr(services, convert_snake_case(service_name)).rule_checker() module = getattr(services, convert_snake_case(service_name))
for rule_name, rule in service["rules"].items(): for rule_name, rule in service["rules"].items():
if not rule["enabled"] or rule["level"] < level: if not rule["enabled"]:
continue continue
rule["result"] = rule_checker.check_rule(convert_snake_case(rule_name))
elapsed_time = datetime.now() - now rule["result"] = getattr(module, convert_snake_case(rule_name))()
print(convert_snake_case(service_name), elapsed_time.total_seconds()) return bp
def show_bp_result(bp, level=2, show_all=False, excluded_resources={}): def show_bp_result(bp):
for service_name, service in bp.items(): for service_name, service in bp.items():
if not service["enabled"]: if not service["enabled"]:
continue continue
print(f"{'=' * 25} {service_name + ' ':=<30}") print(f"{'=' * 25} {service_name + ' ':=<30}")
for rule_name, rule in service["rules"].items(): for rule_name, rule in service["rules"].items():
if not rule["enabled"] or rule["level"] < level: if not rule["enabled"]:
continue continue
if rule["result"].passed: if rule["result"].passed:
@ -110,31 +73,15 @@ def show_bp_result(bp, level=2, show_all=False, excluded_resources={}):
mark = "" mark = ""
print(f"{style}{rule_name:50}{Style.RESET_ALL} - {color}{mark}{Fore.RESET}") print(f"{style}{rule_name:50}{Style.RESET_ALL} - {color}{mark}{Fore.RESET}")
if show_all:
for resource in rule["result"].compliant_resources:
print(f" - {Style.DIM}{resource}{Style.RESET_ALL}")
for resource in rule["result"].non_compliant_resources: for resource in rule["result"].non_compliant_resources:
if excluded_resources.get(resource) in [rule_name, "all"]:
print(f" - {Style.DIM}{resource}{Style.RESET_ALL}")
else:
print(f" - {color}{resource}{Fore.RESET}") print(f" - {color}{resource}{Fore.RESET}")
print() print()
if __name__ == "__main__": if __name__ == "__main__":
args = get_command_line_args() bp = load_bp_from_file()
excluded_resources = parse_excluded_resources()
bp = load_bp_from_file(default_ruleset=args.ruleset)
bp = ask_services_to_enable(bp) bp = ask_services_to_enable(bp)
save_bp_to_file(bp) save_bp_to_file(bp)
bp = perform_bp_rules_check(bp, level=args.level) bp = perform_bp_rules_check(bp)
show_bp_result( show_bp_result(bp)
bp,
level=args.level,
show_all=args.show_all,
excluded_resources=excluded_resources,
)

View File

@ -1,5 +1,4 @@
from pydantic import BaseModel from pydantic import BaseModel
from utils import convert_snake_case
from typing import List from typing import List
@ -7,23 +6,3 @@ class RuleCheckResult(BaseModel):
passed: bool passed: bool
compliant_resources: List[str] compliant_resources: List[str]
non_compliant_resources: List[str] non_compliant_resources: List[str]
class RuleChecker:
def __init__(self):
pass
def check_rule(self, rule_name) -> RuleCheckResult:
check_func = getattr(self, convert_snake_case(rule_name))
try:
result = check_func()
except Exception as e:
result = RuleCheckResult(
passed=False,
compliant_resources=[],
non_compliant_resources=[
"Rule check failed due to folling errors: ",
str(e),
],
)
return result

View File

@ -16,6 +16,7 @@ from . import (
elasticache, elasticache,
iam, iam,
_lambda, _lambda,
tags,
s3, s3,
secrets_manager, secrets_manager,
security_hub, security_hub,

View File

@ -1,24 +1,18 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
import json import json
class LambdaRuleChecker(RuleChecker): client = boto3.client("lambda")
def __init__(self): iam_client = boto3.client("iam")
self.client = boto3.client("lambda")
self.iam_client = boto3.client("iam")
@cached_property
def functions(self):
return self.client.list_functions()["Functions"]
def lambda_dlq_check(self): def lambda_dlq_check():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
functions = client.list_functions()["Functions"]
for function in self.functions: for function in functions:
if "DeadLetterConfig" in function: if "DeadLetterConfig" in function:
compliant_resource.append(function["FunctionArn"]) compliant_resource.append(function["FunctionArn"])
else: else:
@ -30,24 +24,17 @@ class LambdaRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def lambda_function_public_access_prohibited(self):
def lambda_function_public_access_prohibited():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
functions = client.list_functions()["Functions"]
for function in self.functions: for function in functions:
try: try:
policy = json.loads( policy = json.loads(client.get_policy(FunctionName=function["FunctionName"])["Policy"])
self.client.get_policy(FunctionName=function["FunctionName"])[
"Policy"
]
)
for statement in policy["Statement"]: for statement in policy["Statement"]:
if statement["Principal"] in [ if statement["Principal"] in ["*", "", '{"AWS": ""}', '{"AWS": "*"}']:
"*",
"",
'{"AWS": ""}',
'{"AWS": "*"}',
]:
non_compliant_resources.append(function["FunctionArn"]) non_compliant_resources.append(function["FunctionArn"])
break break
else: else:
@ -64,18 +51,17 @@ class LambdaRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def lambda_function_settings_check(self):
def lambda_function_settings_check():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
functions = client.list_functions()["Functions"]
default_timeout = 3 default_timeout = 3
default_memory_size = 128 default_memory_size = 128
for function in self.functions: for function in functions:
if ( if function["Timeout"] == default_timeout or function["MemorySize"] == default_memory_size:
function["Timeout"] == default_timeout
or function["MemorySize"] == default_memory_size
):
non_compliant_resources.append(function["FunctionArn"]) non_compliant_resources.append(function["FunctionArn"])
else: else:
compliant_resource.append(function["FunctionArn"]) compliant_resource.append(function["FunctionArn"])
@ -86,11 +72,13 @@ class LambdaRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def lambda_inside_vpc(self):
def lambda_inside_vpc():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
functions = client.list_functions()["Functions"]
for function in self.functions: for function in functions:
if "VpcConfig" in function: if "VpcConfig" in function:
compliant_resource.append(function["FunctionArn"]) compliant_resource.append(function["FunctionArn"])
else: else:
@ -101,6 +89,3 @@ class LambdaRuleChecker(RuleChecker):
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = LambdaRuleChecker

View File

@ -1,150 +1,123 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class ALBRuleChecker(RuleChecker): client = boto3.client("elbv2")
def __init__(self): wafv2_client = boto3.client("wafv2")
self.client = boto3.client("elbv2")
self.wafv2_client = boto3.client("wafv2")
@cached_property def alb_http_drop_invalid_header_enabled():
def load_balancers(self): load_balancers = client.describe_load_balancers()
return self.client.describe_load_balancers()["LoadBalancers"]
@cached_property
def load_balancer_attributes(self):
responses = [
self.client.describe_load_balancer_attributes(
LoadBalancerArn=load_balancer["LoadBalancerArn"]
)
for load_balancer in self.load_balancers
]
return {
load_balancer["LoadBalancerArn"]: response
for load_balancer, response in zip(self.load_balancers, responses)
}
def alb_http_drop_invalid_header_enabled(self):
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
for load_balancer in self.load_balancers: response = client.describe_load_balancer_attributes(
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]] LoadBalancerArn=load_balancer['LoadBalancerArn']
)
result = [ result = [
attribute attribute
for attribute in filter( for attribute in filter(
lambda x: x["Key"] lambda x: x['Key'] == "routing.http.drop_invalid_header_fields.enabled"
== "routing.http.drop_invalid_header_fields.enabled" and x['Value'] == "true",
and x["Value"] == "true", response['Attributes'],
response["Attributes"],
) )
] ]
if result: if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
compliant_resource.append(load_balancer["LoadBalancerArn"]) else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def alb_waf_enabled(self):
def alb_waf_enabled():
load_balancers = client.describe_load_balancers()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
for load_balancer in self.load_balancers: response = wafv2_client.get_web_acl_for_resource(
response = self.wafv2_client.get_web_acl_for_resource( ResourceArn=load_balancer['LoadBalancerArn']
ResourceArn=load_balancer["LoadBalancerArn"]
) )
if "WebACL" in response: if 'WebACL' in response: compliant_resource.append(load_balancer['LoadBalancerArn'])
compliant_resource.append(load_balancer["LoadBalancerArn"]) else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elb_cross_zone_load_balancing_enabled(self):
def elb_cross_zone_load_balancing_enabled():
load_balancers = client.describe_load_balancers()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
for load_balancer in self.load_balancers: response = client.describe_load_balancer_attributes(
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]] LoadBalancerArn=load_balancer['LoadBalancerArn']
)
result = [ result = [
attribute attribute
for attribute in filter( for attribute in filter(
lambda x: x["Key"] == "load_balancing.cross_zone.enabled" lambda x: x['Key'] == "load_balancing.cross_zone.enabled"
and x["Value"] == "true", and x['Value'] == "true",
response["Attributes"], response['Attributes'],
) )
] ]
if result: if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
compliant_resource.append(load_balancer["LoadBalancerArn"]) else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elb_deletion_protection_enabled(self):
def elb_deletion_protection_enabled():
load_balancers = client.describe_load_balancers()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
for load_balancer in self.load_balancers: response = client.describe_load_balancer_attributes(
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]] LoadBalancerArn=load_balancer['LoadBalancerArn']
)
result = [ result = [
attribute attribute
for attribute in filter( for attribute in filter(
lambda x: x["Key"] == "deletion_protection.enabled" lambda x: x['Key'] == "deletion_protection.enabled"
and x["Value"] == "true", and x['Value'] == "true",
response["Attributes"], response['Attributes'],
) )
] ]
if result: if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
compliant_resource.append(load_balancer["LoadBalancerArn"]) else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elb_logging_enabled(self):
def elb_logging_enabled():
load_balancers = client.describe_load_balancers()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
for load_balancer in self.load_balancers: response = client.describe_load_balancer_attributes(
response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]] LoadBalancerArn=load_balancer['LoadBalancerArn']
)
result = [ result = [
attribute attribute
for attribute in filter( for attribute in filter(
lambda x: x["Key"] == "access_logs.s3.enabled" lambda x: x['Key'] == "access_logs.s3.enabled"
and x["Value"] == "true", and x['Value'] == "true",
response["Attributes"], response['Attributes'],
) )
] ]
if result: if result: compliant_resource.append(load_balancer['LoadBalancerArn'])
compliant_resource.append(load_balancer["LoadBalancerArn"]) else: non_compliant_resources.append(load_balancer['LoadBalancerArn'])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = ALBRuleChecker

View File

@ -1,203 +1,41 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class APIGatewayRuleChecker(RuleChecker): # client = boto3.client("")
def __init__(self):
self.v1_client = boto3.client("apigateway")
self.v2_client = boto3.client("apigatewayv2")
@cached_property
def http_apis(self):
return self.v2_client.get_apis()["Items"]
@cached_property
def rest_apis(self):
return self.v1_client.get_rest_apis()["items"]
@cached_property
def rest_api_stages(self):
responses = [
self.v1_client.get_stages(
restApiId=api["id"],
)
for api in self.rest_apis
]
return {api["id"]: response for api, response in zip(self.rest_apis, responses)}
def api_gwv2_access_logs_enabled(self):
compliant_resources = []
non_compliant_resources = []
for api in self.http_apis:
stages = self.v2_client.get_stages(
ApiId=api["ApiId"],
)
non_compliant_resources += [
f"{api['Name']} / {stage['StageName']}"
for stage in stages["Items"]
if "AccessLogSettings" not in stage
]
compliant_resources += list(
set(
[
f"{api['Name']} / {stage['StageName']}"
for stage in stages["Items"]
]
)
- set(non_compliant_resources)
)
def api_gwv2_access_logs_enabled():
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=False, compliant_resources=[], non_compliant_resources=[]
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
) )
def api_gwv2_authorization_type_configured(self):
compliant_resources = []
non_compliant_resources = []
for api in self.http_apis:
response = self.v2_client.get_routes(
ApiId=api["ApiId"],
)
non_compliant_resources += [
f"{api['Name']} / {route['RouteKey']}"
for route in response["Items"]
if route["AuthorizationType"] == "NONE"
]
compliant_resources += list(
set(
[
f"{api['Name']} / {route['RouteKey']}"
for route in response["Items"]
]
)
- set(non_compliant_resources)
)
def api_gwv2_authorization_type_configured():
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=False, compliant_resources=[], non_compliant_resources=[]
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
) )
def api_gw_associated_with_waf(self):
compliant_resources = []
non_compliant_resources = []
for api in self.rest_apis:
stages = self.rest_api_stages[api["id"]]
for stage in stages["item"]:
stage_arn = f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
if "webAclArn" in stage:
compliant_resources.append(stage_arn)
else:
non_compliant_resources.append(stage_arn)
def api_gw_associated_with_waf():
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=False, compliant_resources=[], non_compliant_resources=[]
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
) )
def api_gw_cache_enabled_and_encrypted(self):
compliant_resources = []
non_compliant_resources = []
for api in self.rest_apis:
stages = self.rest_api_stages[api["id"]]
non_compliant_resources += [
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
if not "*/*" in stage["methodSettings"]
or (
not stage["methodSettings"]["*/*"]["cachingEnabled"]
or not stage["methodSettings"]["*/*"]["cacheDataEncrypted"]
)
]
compliant_resources += list(
set(
[
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
]
)
- set(non_compliant_resources)
)
def api_gw_cache_enabled_and_encrypted():
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=False, compliant_resources=[], non_compliant_resources=[]
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
) )
def api_gw_execution_logging_enabled(self):
compliant_resources = []
non_compliant_resources = []
for api in self.rest_apis:
stages = self.rest_api_stages[api["id"]]
non_compliant_resources += [
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
if not "*/*" in stage["methodSettings"]
or (
not "loggingLevel" in stage["methodSettings"]["*/*"]
or stage["methodSettings"]["*/*"]["loggingLevel"] == "OFF"
)
]
compliant_resources += list(
set(
[
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
]
)
- set(non_compliant_resources)
)
def api_gw_execution_logging_enabled():
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=False, compliant_resources=[], non_compliant_resources=[]
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
) )
def api_gw_xray_enabled(self):
compliant_resources = []
non_compliant_resources = []
for api in self.rest_apis:
stages = self.rest_api_stages[api["id"]]
non_compliant_resources += [
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
if not stage["tracingEnabled"]
]
compliant_resources += list(
set(
[
f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"]
]
)
- set(non_compliant_resources)
)
def api_gw_xray_enabled():
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=False, compliant_resources=[], non_compliant_resources=[]
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
) )
rule_checker = APIGatewayRuleChecker

View File

@ -1,26 +1,17 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class ASGRuleChecker(RuleChecker): client = boto3.client("autoscaling")
def __init__(self):
self.client = boto3.client("autoscaling")
@cached_property
def asgs(self):
return self.client.describe_auto_scaling_groups()["AutoScalingGroups"]
def autoscaling_group_elb_healthcheck_required(self): def autoscaling_group_elb_healthcheck_required():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"]
for asg in self.asgs: for asg in asgs:
if ( if asg["LoadBalancerNames"] or asg["TargetGroupARNs"] and asg["HealthCheckType"] != "ELB":
asg["LoadBalancerNames"]
or asg["TargetGroupARNs"]
and asg["HealthCheckType"] != "ELB"
):
non_compliant_resources.append(asg["AutoScalingGroupARN"]) non_compliant_resources.append(asg["AutoScalingGroupARN"])
else: else:
compliant_resources.append(asg["AutoScalingGroupARN"]) compliant_resources.append(asg["AutoScalingGroupARN"])
@ -31,11 +22,13 @@ class ASGRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def autoscaling_multiple_az(self):
def autoscaling_multiple_az():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"]
for asg in self.asgs: for asg in asgs:
if len(asg["AvailabilityZones"]) > 1: if len(asg["AvailabilityZones"]) > 1:
compliant_resources.append(asg["AutoScalingGroupARN"]) compliant_resources.append(asg["AutoScalingGroupARN"])
else: else:
@ -46,22 +39,3 @@ class ASGRuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def autoscaling_launch_template(self):
compliant_resources = []
non_compliant_resources = []
for asg in self.asgs:
if "LaunchConfigurationName" in asg:
non_compliant_resources.append(asg["AutoScalingGroupARN"])
else:
compliant_resources.append(asg["AutoScalingGroupARN"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = ASGRuleChecker

View File

@ -1,33 +1,17 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class CloudFrontRuleChecker(RuleChecker): client = boto3.client("cloudfront")
def __init__(self):
self.client = boto3.client("cloudfront")
@cached_property
def distributions(self):
return self.client.list_distributions()["DistributionList"].get("Items", [])
@cached_property def cloudfront_accesslogs_enabled():
def distribution_details(self):
responses = [
self.client.get_distribution(Id=distribution["Id"])["Distribution"]
for distribution in self.distributions
]
return {
distribution["Id"]: response
for distribution, response in zip(self.distributions, responses)
}
def cloudfront_accesslogs_enabled(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in self.distributions: for distribution in distributions:
distribution = self.distribution_details[distribution["Id"]] distribution = client.get_distribution(Id=distribution["Id"])["Distribution"]
if ( if (
"Logging" in distribution["DistributionConfig"] "Logging" in distribution["DistributionConfig"]
and distribution["DistributionConfig"]["Logging"]["Enabled"] == True and distribution["DistributionConfig"]["Logging"]["Enabled"] == True
@ -42,11 +26,13 @@ class CloudFrontRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_associated_with_waf(self):
def cloudfront_associated_with_waf():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in self.distributions: for distribution in distributions:
if "WebACLId" in distribution and distribution["WebACLId"] != "": if "WebACLId" in distribution and distribution["WebACLId"] != "":
compliant_resources.append(distribution["ARN"]) compliant_resources.append(distribution["ARN"])
else: else:
@ -58,12 +44,14 @@ class CloudFrontRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_default_root_object_configured(self):
def cloudfront_default_root_object_configured():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in self.distributions: for distribution in distributions:
distribution = self.distribution_details[distribution["Id"]] distribution = client.get_distribution(Id=distribution["Id"])["Distribution"]
if distribution["DistributionConfig"]["DefaultRootObject"] != "": if distribution["DistributionConfig"]["DefaultRootObject"] != "":
compliant_resources.append(distribution["ARN"]) compliant_resources.append(distribution["ARN"])
@ -76,18 +64,18 @@ class CloudFrontRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_no_deprecated_ssl_protocols(self):
def cloudfront_no_deprecated_ssl_protocols():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in self.distributions: for distribution in distributions:
for origin in distribution["Origins"]["Items"]: for origin in distribution["Origins"]["Items"]:
if ( if (
"CustomOriginConfig" in origin "CustomOriginConfig" in origin
and origin["CustomOriginConfig"]["OriginProtocolPolicy"] and origin["CustomOriginConfig"]["OriginProtocolPolicy"] in ["https-only", "match-viewer"]
in ["https-only", "match-viewer"] and "SSLv3" in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"]
and "SSLv3"
in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"]
): ):
non_compliant_resources.append(distribution["ARN"]) non_compliant_resources.append(distribution["ARN"])
@ -101,11 +89,13 @@ class CloudFrontRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_s3_origin_access_control_enabled(self):
def cloudfront_s3_origin_access_control_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]
for distribution in self.distributions: for distribution in distributions["Items"]:
for origin in distribution["Origins"]["Items"]: for origin in distribution["Origins"]["Items"]:
if "S3OriginConfig" in origin and origin["OriginAccessControlId"] == "": if "S3OriginConfig" in origin and origin["OriginAccessControlId"] == "":
non_compliant_resources.append(distribution["ARN"]) non_compliant_resources.append(distribution["ARN"])
@ -119,15 +109,14 @@ class CloudFrontRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_viewer_policy_https(self):
def cloudfront_viewer_policy_https():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in self.distributions: for distribution in distributions:
if ( if distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"] == "allow-all":
distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"]
== "allow-all"
):
non_compliant_resources.append(distribution["ARN"]) non_compliant_resources.append(distribution["ARN"])
continue continue
@ -147,6 +136,3 @@ class CloudFrontRuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = CloudFrontRuleChecker

View File

@ -1,16 +1,15 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
import boto3 import boto3
class CloudWatchRuleChecker(RuleChecker): client = boto3.client("cloudwatch")
def __init__(self): logs_client = boto3.client("logs")
self.client = boto3.client("cloudwatch")
self.logs_client = boto3.client("logs")
def cw_loggroup_retention_period_check(self):
def cw_loggroup_retention_period_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
log_groups = self.logs_client.describe_log_groups()["logGroups"] log_groups = logs_client.describe_log_groups()["logGroups"]
# This rule should check if `retentionInDays` is less than n days. # This rule should check if `retentionInDays` is less than n days.
# But, instead of that, this will check if the retention setting is set to "Never expire" or not # But, instead of that, this will check if the retention setting is set to "Never expire" or not
@ -26,10 +25,11 @@ class CloudWatchRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudwatch_alarm_settings_check(self):
def cloudwatch_alarm_settings_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
alarms = self.client.describe_alarms()["MetricAlarms"] alarms = client.describe_alarms()["MetricAlarms"]
parameters = { parameters = {
"MetricName": "", # required "MetricName": "", # required
"Threshold": None, "Threshold": None,
@ -55,6 +55,3 @@ class CloudWatchRuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = CloudWatchRuleChecker

View File

@ -1,26 +1,21 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class CodeSeriesChecker(RuleChecker): build_client = boto3.client("codebuild")
def __init__(self):
self.build_client = boto3.client("codebuild")
self.deploy_client = boto3.client("codedeploy")
@cached_property deploy_client = boto3.client("codedeploy")
def projects(self):
project_names = self.build_client.list_projects()["projects"]
if not project_names:
return []
return self.build_client.batch_get_projects(names=project_names)["projects"]
def codebuild_project_environment_privileged_check(self):
def codebuild_project_environment_privileged_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
projects = build_client.list_projects()["projects"]
for project in self.projects: for project in projects:
if not project["environment"]["privilegedMode"]: project = build_client.batch_get_projects(names=[project])["projects"][0]
if project["environment"]["privilegedMode"] != True:
compliant_resources.append(project["arn"]) compliant_resources.append(project["arn"])
else: else:
non_compliant_resources.append(project["arn"]) non_compliant_resources.append(project["arn"])
@ -31,17 +26,17 @@ class CodeSeriesChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def codebuild_project_logging_enabled(self):
def codebuild_project_logging_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
projects = build_client.list_projects()["projects"]
for project in self.projects: for project in projects:
project = build_client.batch_get_projects(names=[project])["projects"][0]
logs_config = project["logsConfig"] logs_config = project["logsConfig"]
if ( if logs_config["cloudWatchLogs"]["status"] == "ENABLED" or logs_config["s3Logs"]["status"] == "ENABLED":
logs_config["cloudWatchLogs"]["status"] == "ENABLED"
or logs_config["s3Logs"]["status"] == "ENABLED"
):
compliant_resources.append(project["arn"]) compliant_resources.append(project["arn"])
else: else:
non_compliant_resources.append(project["arn"]) non_compliant_resources.append(project["arn"])
@ -52,40 +47,29 @@ class CodeSeriesChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def codedeploy_auto_rollback_monitor_enabled(self):
def codedeploy_auto_rollback_monitor_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
applications = deploy_client.list_applications()["applications"]
applications = self.deploy_client.list_applications()["applications"]
for application in applications: for application in applications:
deployment_group_names = self.deploy_client.list_deployment_groups( deployment_groups = deploy_client.list_deployment_groups(applicationName=application)["deploymentGroups"]
applicationName=application
)["deploymentGroups"]
if not deployment_group_names:
continue
deployment_groups = self.deploy_client.batch_get_deployment_groups(
applicationName=application, deploymentGroupNames=deployment_group_names
)["deploymentGroupsInfo"]
for deployment_group in deployment_groups: for deployment_group in deployment_groups:
deployment_group = deploy_client.get_deployment_group(
applicationName=application, deploymentGroupName=deployment_group
)["deploymentGroupInfo"]
if ( if (
deployment_group["alarmConfiguration"]["enabled"] deployment_group["alarmConfiguration"]["enabled"] == True
and deployment_group["autoRollbackConfiguration"]["enabled"] and deployment_group["autoRollbackConfiguration"]["enabled"] == True
): ):
compliant_resources.append(deployment_group["deploymentGroupId"]) compliant_resources.append(deployment_group["deploymentGroupId"])
else: else:
non_compliant_resources.append( non_compliant_resources.append(deployment_group["deploymentGroupId"])
deployment_group["deploymentGroupId"]
)
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = CodeSeriesChecker

View File

@ -1,43 +1,30 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property import datetime
from datetime import datetime, timedelta
from dateutil.tz import tzlocal from dateutil.tz import tzlocal
import boto3 import boto3
class DynamoDBRuleChecker(RuleChecker): client = boto3.client("dynamodb")
def __init__(self): backup_client = boto3.client("backup")
self.client = boto3.client("dynamodb") autoscaling_client = boto3.client("application-autoscaling")
self.backup_client = boto3.client("backup")
self.autoscaling_client = boto3.client("application-autoscaling")
@cached_property
def tables(self):
table_names = self.client.list_tables()["TableNames"]
return [
self.client.describe_table(TableName=table_name)["Table"]
for table_name in table_names
]
def dynamodb_autoscaling_enabled(self): def dynamodb_autoscaling_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table in self.tables: for table_name in table_names:
if ( table = client.describe_table(TableName=table_name)["Table"]
table.get("BillingModeSummary", {}).get("BillingMode")
== "PAY_PER_REQUEST" if table.get("BillingModeSummary", {}).get("BillingMode") == "PAY_PER_REQUEST":
):
compliant_resources.append(table["TableArn"]) compliant_resources.append(table["TableArn"])
continue continue
scaling_policies = self.autoscaling_client.describe_scaling_policies( scaling_policies = autoscaling_client.describe_scaling_policies(
ServiceNamespace="dynamodb", ResourceId=f"table/{table['TableName']}" ServiceNamespace="dynamodb", ResourceId=f"table/{table_name}"
)["ScalingPolicies"] )["ScalingPolicies"]
scaling_policy_dimensions = [ scaling_policy_dimensions = [i["ScalableDimension"] for i in scaling_policies]
policy["ScalableDimension"] for policy in scaling_policies
]
if ( if (
"dynamodb:table:ReadCapacityUnits" in scaling_policy_dimensions "dynamodb:table:ReadCapacityUnits" in scaling_policy_dimensions
and "dynamodb:table:WriteCapacityUnits" in scaling_policy_dimensions and "dynamodb:table:WriteCapacityUnits" in scaling_policy_dimensions
@ -52,46 +39,24 @@ class DynamoDBRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def dynamodb_last_backup_recovery_point_created(self):
def dynamodb_last_backup_recovery_point_created():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table in self.tables: for table_name in table_names:
recovery_points = self.backup_client.list_recovery_points_by_resource( table = client.describe_table(TableName=table_name)["Table"]
ResourceArn=table["TableArn"] recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=table["TableArn"])[
)["RecoveryPoints"] "RecoveryPoints"
if not recovery_points: ]
recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points])
if len(recovery_point_creation_dates) == 0:
non_compliant_resources.append(table["TableArn"]) non_compliant_resources.append(table["TableArn"])
continue continue
latest_recovery_point = sorted( if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1):
[recovery_point["CreationDate"] for recovery_point in recovery_points]
)[-1]
if datetime.now(tz=tzlocal()) - latest_recovery_point > timedelta(days=1):
non_compliant_resources.append(table["TableArn"])
else:
compliant_resources.append(table["TableArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def dynamodb_pitr_enabled(self):
compliant_resources = []
non_compliant_resources = []
for table in self.tables:
backup = self.client.describe_continuous_backups(
TableName=table["TableName"]
)["ContinuousBackupsDescription"]
if (
backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"]
== "ENABLED"
):
compliant_resources.append(table["TableArn"]) compliant_resources.append(table["TableArn"])
else: else:
non_compliant_resources.append(table["TableArn"]) non_compliant_resources.append(table["TableArn"])
@ -102,11 +67,36 @@ class DynamoDBRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def dynamodb_table_deletion_protection_enabled(self):
def dynamodb_pitr_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
backup = client.describe_continuous_backups(TableName=table_name)["ContinuousBackupsDescription"]
table = client.describe_table(TableName=table_name)["Table"]
if backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"] == "ENABLED":
compliant_resources.append(table["TableArn"])
else:
non_compliant_resources.append(table["TableArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def dynamodb_table_deletion_protection_enabled():
compliant_resources = []
non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
table = client.describe_table(TableName=table_name)["Table"]
for table in self.tables:
if table["DeletionProtectionEnabled"] == True: if table["DeletionProtectionEnabled"] == True:
compliant_resources.append(table["TableArn"]) compliant_resources.append(table["TableArn"])
else: else:
@ -118,11 +108,15 @@ class DynamoDBRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def dynamodb_table_encrypted_kms(self):
def dynamodb_table_encrypted_kms():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
table = client.describe_table(TableName=table_name)["Table"]
for table in self.tables:
if ( if (
"SSEDescription" in table "SSEDescription" in table
and table["SSEDescription"]["Status"] == "ENABLED" and table["SSEDescription"]["Status"] == "ENABLED"
@ -138,15 +132,16 @@ class DynamoDBRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def dynamodb_table_encryption_enabled(self):
def dynamodb_table_encryption_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table in self.tables: for table_name in table_names:
if ( table = client.describe_table(TableName=table_name)["Table"]
"SSEDescription" in table
and table["SSEDescription"]["Status"] == "ENABLED" if "SSEDescription" in table and table["SSEDescription"]["Status"] == "ENABLED":
):
compliant_resources.append(table["TableArn"]) compliant_resources.append(table["TableArn"])
else: else:
non_compliant_resources.append(table["TableArn"]) non_compliant_resources.append(table["TableArn"])
@ -156,6 +151,3 @@ class DynamoDBRuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = DynamoDBRuleChecker

View File

@ -1,33 +1,22 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class EC2RuleChecker(RuleChecker): client = boto3.client("ec2")
def __init__(self): autoscaling_client = boto3.client("autoscaling")
self.client = boto3.client("ec2") ssm_client = boto3.client("ssm")
self.ssm_client = boto3.client("ssm")
@cached_property
def instances(self):
valid_instances = [
instance
for reservation in self.client.describe_instances()["Reservations"]
for instance in reservation["Instances"]
if instance["State"]["Name"] != "terminated"
]
return valid_instances
def ec2_ebs_encryption_by_default(self): def autoscaling_launch_template():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
asgs = autoscaling_client.describe_auto_scaling_groups()["AutoScalingGroups"]
volumes = self.client.describe_volumes()["Volumes"] for asg in asgs:
for volume in volumes: if "LaunchConfigurationName" in asg:
if volume["Encrypted"]: non_compliant_resources.append(asg["AutoScalingGroupARN"])
compliant_resources.append(volume["VolumeId"])
else: else:
non_compliant_resources.append(volume["VolumeId"]) compliant_resources.append(asg["AutoScalingGroupARN"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
@ -35,11 +24,34 @@ class EC2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_imdsv2_check(self):
def ec2_ebs_encryption_by_default():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
ebses = client.describe_volumes()["Volumes"]
for instance in self.instances: for ebs in ebses:
if ebs["Encrypted"] == True:
compliant_resources.append(ebs["VolumeId"])
else:
non_compliant_resources.append(ebs["VolumeId"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ec2_imdsv2_check():
compliant_resources = []
non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if instance["MetadataOptions"]["HttpTokens"] == "required": if instance["MetadataOptions"]["HttpTokens"] == "required":
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -51,11 +63,16 @@ class EC2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_instance_detailed_monitoring_enabled(self):
def ec2_instance_detailed_monitoring_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for instance in self.instances: for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if instance["Monitoring"]["State"] == "enabled": if instance["Monitoring"]["State"] == "enabled":
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -67,18 +84,18 @@ class EC2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_instance_managed_by_systems_manager(self):
def ec2_instance_managed_by_systems_manager():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
informations = ssm_client.describe_instance_information()["InstanceInformationList"]
managed_instance_ids = [i["InstanceId"] for i in informations if i["PingStatus"]]
informations = self.ssm_client.describe_instance_information()[ for reservation in reservations:
"InstanceInformationList" for instance in reservation["Instances"]:
] if instance["State"]["Name"] == "terminated":
managed_instance_ids = [ continue
info["InstanceId"] for info in informations if info["PingStatus"]
]
for instance in self.instances:
if instance["InstanceId"] in managed_instance_ids: if instance["InstanceId"] in managed_instance_ids:
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -90,11 +107,16 @@ class EC2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_instance_profile_attached(self):
def ec2_instance_profile_attached():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for instance in self.instances: for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if "IamInstanceProfile" in instance: if "IamInstanceProfile" in instance:
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -106,11 +128,16 @@ class EC2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_no_amazon_key_pair(self):
def ec2_no_amazon_key_pair():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for instance in self.instances: for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if "KeyName" in instance: if "KeyName" in instance:
non_compliant_resources.append(instance["InstanceId"]) non_compliant_resources.append(instance["InstanceId"])
else: else:
@ -122,11 +149,16 @@ class EC2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_stopped_instance(self):
def ec2_stopped_instance():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for instance in self.instances: for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if instance["State"]["Name"] != "stopped": if instance["State"]["Name"] != "stopped":
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -138,11 +170,16 @@ class EC2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_token_hop_limit_check(self):
def ec2_token_hop_limit_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for instance in self.instances: for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if instance["MetadataOptions"]["HttpPutResponseHopLimit"] < 2: if instance["MetadataOptions"]["HttpPutResponseHopLimit"] < 2:
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -153,6 +190,3 @@ class EC2RuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = EC2RuleChecker

View File

@ -1,21 +1,17 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
import botocore
class ECRRuleChecker(RuleChecker): client = boto3.client("ecr")
def __init__(self):
self.client = boto3.client("ecr")
@cached_property
def repositories(self):
return self.client.describe_repositories()["repositories"]
def ecr_private_image_scanning_enabled(self): def ecr_private_image_scanning_enabled():
repositories = client.describe_repositories()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for repository in self.repositories: for repository in repositories["repositories"]:
if repository["imageScanningConfiguration"]["scanOnPush"] == True: if repository["imageScanningConfiguration"]["scanOnPush"] == True:
compliant_resource.append(repository["repositoryArn"]) compliant_resource.append(repository["repositoryArn"])
else: else:
@ -27,13 +23,15 @@ class ECRRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecr_private_lifecycle_policy_configured(self):
def ecr_private_lifecycle_policy_configured():
repositories = client.describe_repositories()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for repository in self.repositories: for repository in repositories["repositories"]:
try: try:
response = self.client.get_lifecycle_policy( response = client.get_lifecycle_policy(
registryId=repository["registryId"], registryId=repository["registryId"],
repositoryName=repository["repositoryName"], repositoryName=repository["repositoryName"],
) )
@ -50,11 +48,13 @@ class ECRRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecr_private_tag_immutability_enabled(self):
def ecr_private_tag_immutability_enabled():
repositories = client.describe_repositories()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for repository in self.repositories: for repository in repositories["repositories"]:
if repository["imageTagMutability"] == "IMMUTABLE": if repository["imageTagMutability"] == "IMMUTABLE":
compliant_resource.append(repository["repositoryArn"]) compliant_resource.append(repository["repositoryArn"])
else: else:
@ -66,11 +66,13 @@ class ECRRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecr_kms_encryption_1(self):
def ecr_kms_encryption_1():
repositories = client.describe_repositories()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for repository in self.repositories: for repository in repositories["repositories"]:
if repository["encryptionConfiguration"]["encryptionType"] == "KMS": if repository["encryptionConfiguration"]["encryptionType"] == "KMS":
compliant_resource.append(repository["repositoryArn"]) compliant_resource.append(repository["repositoryArn"])
else: else:
@ -81,6 +83,3 @@ class ECRRuleChecker(RuleChecker):
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = ECRRuleChecker

View File

@ -1,57 +1,24 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class ECSRuleChecker(RuleChecker): client = boto3.client("ecs")
def __init__(self):
self.client = boto3.client("ecs")
@cached_property
def task_definitions(self):
task_definition_arns = self.client.list_task_definitions(status="ACTIVE")[
"taskDefinitionArns"
]
latest_task_definitions = {}
# Filter latest task definition arns def ecs_awsvpc_networking_enabled():
for task_definition_arn in task_definition_arns:
family, revision = task_definition_arn.rsplit(":", 1)
latest_task_definitions[family] = max(
latest_task_definitions.get(family, 0), int(revision)
)
# Fetch latest task definition details
task_definitions = [
self.client.describe_task_definition(taskDefinition=f"{family}:{revision}")[
"taskDefinition"
]
for family, revision in latest_task_definitions.items()
]
return task_definitions
@cached_property
def clusters(self):
return self.client.describe_clusters(include=["SETTINGS"])["clusters"]
@cached_property
def services(self):
services = []
for cluster in self.clusters:
service_arns = self.client.list_services(
cluster=cluster["clusterArn"], launchType="FARGATE"
)["serviceArns"]
services += self.client.describe_services(
cluster=cluster["clusterArn"], services=service_arns
)["services"]
return services
def ecs_awsvpc_networking_enabled(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
for task_definition in self.task_definitions:
if task_definition.get("networkMode") == "awsvpc": if task_definition.get("networkMode") == "awsvpc":
compliant_resources.append(task_definition["taskDefinitionArn"]) compliant_resources.append(task_definition["taskDefinitionArn"])
else: else:
@ -63,18 +30,26 @@ class ECSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_containers_nonprivileged(self):
def ecs_containers_nonprivileged():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in self.task_definitions: for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"] containers = task_definition["containerDefinitions"]
privileged_containers = [
container for container in containers if container.get("privileged")
]
if privileged_containers: for container in containers:
if container.get("privileged"):
non_compliant_resources.append(task_definition["taskDefinitionArn"]) non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else: else:
compliant_resources.append(task_definition["taskDefinitionArn"]) compliant_resources.append(task_definition["taskDefinitionArn"])
@ -84,20 +59,26 @@ class ECSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_containers_readonly_access(self):
def ecs_containers_readonly_access():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in self.task_definitions: for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"] containers = task_definition["containerDefinitions"]
not_readonly_containers = [
container
for container in containers
if not container.get("readonlyRootFilesystem")
]
if not_readonly_containers: for container in containers:
if not container.get("readonlyRootFilesystem"):
non_compliant_resources.append(task_definition["taskDefinitionArn"]) non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else: else:
compliant_resources.append(task_definition["taskDefinitionArn"]) compliant_resources.append(task_definition["taskDefinitionArn"])
@ -107,21 +88,17 @@ class ECSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_container_insights_enabled(self):
def ecs_container_insights_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in self.clusters: clusters = client.describe_clusters(include=["SETTINGS"])["clusters"]
container_insights_setting = [
setting
for setting in cluster["settings"]
if setting["name"] == "containerInsights"
]
if ( for cluster in clusters:
container_insights_setting container_insights_setting = [setting for setting in cluster["settings"] if setting["name"] == "containerInsights"]
and container_insights_setting[0]["value"] == "enabled"
): if container_insights_setting and container_insights_setting[0]["value"] == "enabled":
compliant_resources.append(cluster["clusterArn"]) compliant_resources.append(cluster["clusterArn"])
else: else:
non_compliant_resources.append(cluster["clusterArn"]) non_compliant_resources.append(cluster["clusterArn"])
@ -132,11 +109,17 @@ class ECSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_fargate_latest_platform_version(self):
def ecs_fargate_latest_platform_version():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
cluster_arns = client.list_clusters()["clusterArns"]
for service in self.services: for cluster_arn in cluster_arns:
service_arns = client.list_services(cluster=cluster_arn, launchType="FARGATE")["serviceArns"]
services = client.describe_services(cluster=cluster_arn, services=service_arns)["services"]
for service in services:
if service["platformVersion"] == "LATEST": if service["platformVersion"] == "LATEST":
compliant_resources.append(service["serviceArn"]) compliant_resources.append(service["serviceArn"])
else: else:
@ -148,67 +131,26 @@ class ECSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_task_definition_log_configuration(self):
def ecs_task_definition_log_configuration():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in self.task_definitions: for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"] containers = task_definition["containerDefinitions"]
log_disabled_containers = [ for container in containers:
container if "logConfiguration" not in container:
for container in containers
if "logConfiguration" not in container
]
if log_disabled_containers:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_memory_hard_limit(self):
compliant_resources = []
non_compliant_resources = []
for task_definition in self.task_definitions:
containers = task_definition["containerDefinitions"]
containers_without_memory_limit = [
container for container in containers if "memory" not in container
]
if containers_without_memory_limit:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_nonroot_user(self):
compliant_resources = []
non_compliant_resources = []
for task_definition in self.task_definitions:
containers = task_definition["containerDefinitions"]
privileged_containers = [
container
for container in containers
if container.get("user") in [None, "root"]
]
if privileged_containers:
non_compliant_resources.append(task_definition["taskDefinitionArn"]) non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else: else:
compliant_resources.append(task_definition["taskDefinitionArn"]) compliant_resources.append(task_definition["taskDefinitionArn"])
@ -219,4 +161,59 @@ class ECSRuleChecker(RuleChecker):
) )
rule_checker = ECSRuleChecker def ecs_task_definition_memory_hard_limit():
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"]
for container in containers:
if "memory" not in container:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_nonroot_user():
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"]
for container in containers:
if container.get("user") in [None, "root"]:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)

View File

@ -1,26 +1,17 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class EFSRuleChecker(RuleChecker): client = boto3.client("efs")
def __init__(self): ec2_client = boto3.client("ec2")
self.client = boto3.client("efs")
self.ec2_client = boto3.client("ec2")
@cached_property
def access_points(self):
return self.client.describe_access_points()["AccessPoints"]
@cached_property def efs_access_point_enforce_root_directory():
def file_systems(self): access_points = client.describe_access_points()["AccessPoints"]
return self.client.describe_file_systems()["FileSystems"]
def efs_access_point_enforce_root_directory(self):
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for access_point in self.access_points: for access_point in access_points:
if access_point["RootDirectory"]["Path"] != "/": if access_point["RootDirectory"]["Path"] != "/":
compliant_resource.append(access_point["AccessPointArn"]) compliant_resource.append(access_point["AccessPointArn"])
else: else:
@ -32,11 +23,13 @@ class EFSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def efs_access_point_enforce_user_identity(self):
def efs_access_point_enforce_user_identity():
access_points = client.describe_access_points()["AccessPoints"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for access_point in self.access_points: for access_point in access_points:
if "PosixUser" in access_point: if "PosixUser" in access_point:
compliant_resource.append(access_point["AccessPointArn"]) compliant_resource.append(access_point["AccessPointArn"])
else: else:
@ -48,15 +41,16 @@ class EFSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def efs_automatic_backups_enabled(self):
def efs_automatic_backups_enabled():
file_systems = client.describe_file_systems()["FileSystems"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for file_system in self.file_systems: for file_system in file_systems:
response = self.client.describe_backup_policy( response = client.describe_backup_policy(
FileSystemId=file_system["FileSystemId"] FileSystemId=file_system["FileSystemId"]
) )
if response["BackupPolicy"]["Status"] == "ENABLED": if response["BackupPolicy"]["Status"] == "ENABLED":
compliant_resource.append(file_system["FileSystemArn"]) compliant_resource.append(file_system["FileSystemArn"])
else: else:
@ -68,12 +62,14 @@ class EFSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def efs_encrypted_check(self):
def efs_encrypted_check():
file_systems = client.describe_file_systems()["FileSystems"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for file_system in self.file_systems: for file_system in file_systems:
if file_system["Encrypted"]: if file_system["Encrypted"] == True:
compliant_resource.append(file_system["FileSystemArn"]) compliant_resource.append(file_system["FileSystemArn"])
else: else:
non_compliant_resources.append(file_system["FileSystemArn"]) non_compliant_resources.append(file_system["FileSystemArn"])
@ -84,18 +80,19 @@ class EFSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def efs_mount_target_public_accessible(self):
def efs_mount_target_public_accessible():
file_systems = client.describe_file_systems()["FileSystems"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for file_system in self.file_systems: for file_system in file_systems:
mount_targets = self.client.describe_mount_targets( mount_targets = client.describe_mount_targets(
FileSystemId=file_system["FileSystemId"] FileSystemId=file_system["FileSystemId"]
)["MountTargets"] )["MountTargets"]
for mount_target in mount_targets: for mount_target in mount_targets:
subnet_id = mount_target["SubnetId"] subnet_id = mount_target["SubnetId"]
routes = self.ec2_client.describe_route_tables( routes = ec2_client.describe_route_tables(
Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}] Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}]
)["RouteTables"][0]["Routes"] )["RouteTables"][0]["Routes"]
@ -108,17 +105,14 @@ class EFSRuleChecker(RuleChecker):
): ):
non_compliant_resources.append(file_system["FileSystemArn"]) non_compliant_resources.append(file_system["FileSystemArn"])
break break
else:
compliant_resource.append(file_system["FileSystemArn"])
compliant_resource = list(set(compliant_resource))
non_compliant_resources = list(set(non_compliant_resources)) non_compliant_resources = list(set(non_compliant_resources))
compliant_resource = list(
set(compliant_resource) - set(non_compliant_resources)
)
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = EFSRuleChecker

View File

@ -1,32 +1,24 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class EKSRuleChecker(RuleChecker): client = boto3.client("eks")
def __init__(self):
self.client = boto3.client("eks")
@cached_property
def clusters(self):
cluster_names = self.client.list_clusters()["clusters"]
return [
self.client.describe_cluster(name=cluster_name)["cluster"]
for cluster_name in cluster_names
]
def eks_cluster_logging_enabled(self): def eks_cluster_logging_enabled():
clusters = client.list_clusters()["clusters"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in self.clusters: for cluster in clusters:
response = client.describe_cluster(name=cluster)["cluster"]
if ( if (
cluster["logging"]["clusterLogging"][0]["enabled"] len(response["logging"]["clusterLogging"][0]["types"]) == 5
and len(cluster["logging"]["clusterLogging"][0]["types"]) == 5 and response["logging"]["clusterLogging"][0]["enabled"] == True
): ):
compliant_resource.append(cluster["arn"]) compliant_resource.append(response["arn"])
else: else:
non_compliant_resources.append(cluster["arn"]) non_compliant_resources.append(response["arn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
@ -34,18 +26,21 @@ class EKSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def eks_cluster_secrets_encrypted(self):
def eks_cluster_secrets_encrypted():
clusters = client.list_clusters()["clusters"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in self.clusters: for cluster in clusters:
response = client.describe_cluster(name=cluster)["cluster"]
if ( if (
"encryptionConfig" in cluster "encryptionConfig" in response
and "secrets" in cluster["encryptionConfig"][0]["resources"] and "secrets" in response["encryptionConfig"][0]["resources"]
): ):
compliant_resource.append(cluster["arn"]) compliant_resource.append(response["arn"])
else: else:
non_compliant_resources.append(cluster["arn"]) non_compliant_resources.append(response["arn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
@ -53,21 +48,21 @@ class EKSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def eks_endpoint_no_public_access(self):
def eks_endpoint_no_public_access():
clusters = client.list_clusters()["clusters"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in self.clusters: for cluster in clusters:
if cluster["resourcesVpcConfig"]["endpointPublicAccess"]: response = client.describe_cluster(name=cluster)["cluster"]
non_compliant_resources.append(cluster["arn"]) if response["resourcesVpcConfig"]["endpointPublicAccess"] == False:
compliant_resource.append(response["arn"])
else: else:
compliant_resource.append(cluster["arn"]) non_compliant_resources.append(response["arn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = EKSRuleChecker

View File

@ -1,26 +1,17 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class ElastiCacheRuleChecker(RuleChecker): client = boto3.client("elasticache")
def __init__(self):
self.client = boto3.client("elasticache")
@cached_property
def clusters(self):
return self.client.describe_cache_clusters()["CacheClusters"]
@cached_property def elasticache_auto_minor_version_upgrade_check():
def replication_groups(self): clusters = client.describe_cache_clusters()["CacheClusters"]
return self.client.describe_replication_groups()["ReplicationGroups"]
def elasticache_auto_minor_version_upgrade_check(self):
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in self.clusters: for cluster in clusters:
if cluster["AutoMinorVersionUpgrade"]: if cluster["AutoMinorVersionUpgrade"] == True:
compliant_resource.append(cluster["ARN"]) compliant_resource.append(cluster["ARN"])
else: else:
non_compliant_resources.append(cluster["ARN"]) non_compliant_resources.append(cluster["ARN"])
@ -31,11 +22,13 @@ class ElastiCacheRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_redis_cluster_automatic_backup_check(self):
def elasticache_redis_cluster_automatic_backup_check():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for replication_group in self.replication_groups: for replication_group in replication_groups:
if "SnapshottingClusterId" in replication_group: if "SnapshottingClusterId" in replication_group:
compliant_resource.append(replication_group["ARN"]) compliant_resource.append(replication_group["ARN"])
else: else:
@ -47,11 +40,13 @@ class ElastiCacheRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_repl_grp_auto_failover_enabled(self):
def elasticache_repl_grp_auto_failover_enabled():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for replication_group in self.replication_groups: for replication_group in replication_groups:
if replication_group["AutomaticFailover"] == "enabled": if replication_group["AutomaticFailover"] == "enabled":
compliant_resource.append(replication_group["ARN"]) compliant_resource.append(replication_group["ARN"])
else: else:
@ -63,11 +58,13 @@ class ElastiCacheRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_repl_grp_encrypted_at_rest(self):
def elasticache_repl_grp_encrypted_at_rest():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for replication_group in self.replication_groups: for replication_group in replication_groups:
if replication_group["AtRestEncryptionEnabled"] == True: if replication_group["AtRestEncryptionEnabled"] == True:
compliant_resource.append(replication_group["ARN"]) compliant_resource.append(replication_group["ARN"])
else: else:
@ -79,11 +76,13 @@ class ElastiCacheRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_repl_grp_encrypted_in_transit(self):
def elasticache_repl_grp_encrypted_in_transit():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for replication_group in self.replication_groups: for replication_group in replication_groups:
if replication_group["TransitEncryptionEnabled"] == True: if replication_group["TransitEncryptionEnabled"] == True:
compliant_resource.append(replication_group["ARN"]) compliant_resource.append(replication_group["ARN"])
else: else:
@ -95,11 +94,13 @@ class ElastiCacheRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_subnet_group_check(self):
def elasticache_subnet_group_check():
clusters = client.describe_cache_clusters()["CacheClusters"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in self.clusters: for cluster in clusters:
if cluster["CacheSubnetGroupName"] != "default": if cluster["CacheSubnetGroupName"] != "default":
compliant_resource.append(cluster["ARN"]) compliant_resource.append(cluster["ARN"])
else: else:
@ -110,6 +111,3 @@ class ElastiCacheRuleChecker(RuleChecker):
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = ElastiCacheRuleChecker

View File

@ -1,36 +1,19 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class IAMRuleChecker(RuleChecker): client = boto3.client("iam")
def __init__(self):
self.client = boto3.client("iam")
@cached_property
def policies(self):
return self.client.list_policies(Scope="Local")["Policies"]
@cached_property def iam_policy_no_statements_with_admin_access():
def policy_default_versions(self):
responses = [
self.client.get_policy_version(
PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"]
)["PolicyVersion"]
for policy in self.policies
]
return {
policy["Arn"]: response
for policy, response in zip(self.policies, responses)
}
def iam_policy_no_statements_with_admin_access(self):
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
policies = client.list_policies(Scope="Local")["Policies"]
for policy in self.policies: for policy in policies:
policy_version = self.policy_default_versions[policy["Arn"]] policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[
"PolicyVersion"
]
for statement in policy_version["Document"]["Statement"]: for statement in policy_version["Document"]["Statement"]:
if ( if (
@ -49,12 +32,16 @@ class IAMRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def iam_policy_no_statements_with_full_access(self):
def iam_policy_no_statements_with_full_access():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
policies = client.list_policies(Scope="Local")["Policies"]
for policy in self.policies: for policy in policies:
policy_version = self.policy_default_versions[policy["Arn"]] policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[
"PolicyVersion"
]
for statement in policy_version["Document"]["Statement"]: for statement in policy_version["Document"]["Statement"]:
if statement["Effect"] == "Deny": if statement["Effect"] == "Deny":
@ -63,9 +50,7 @@ class IAMRuleChecker(RuleChecker):
if type(statement["Action"]) == str: if type(statement["Action"]) == str:
statement["Action"] = [statement["Action"]] statement["Action"] = [statement["Action"]]
full_access_actions = [ full_access_actions = [action for action in statement["Action"] if action.endswith(":*")]
action for action in statement["Action"] if action.endswith(":*")
]
if full_access_actions: if full_access_actions:
non_compliant_resources.append(policy["Arn"]) non_compliant_resources.append(policy["Arn"])
break break
@ -78,18 +63,15 @@ class IAMRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def iam_role_managed_policy_check(self):
def iam_role_managed_policy_check():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
policy_arns = [] # 검사할 managed policy arn 목록 policy_arns = [] # 검사할 managed policy arn 목록
for policy in policy_arns: for policy in policy_arns:
response = self.client.list_entities_for_policy(PolicyArn=policy) response = client.list_entities_for_policy(PolicyArn=policy)
if ( if response["PolicyGroups"] == [] and response["PolicyUsers"] == [] and response["PolicyRoles"] == []:
response["PolicyGroups"] == []
and response["PolicyUsers"] == []
and response["PolicyRoles"] == []
):
non_compliant_resources.append(policy) non_compliant_resources.append(policy)
else: else:
compliant_resource.append(policy) compliant_resource.append(policy)
@ -99,6 +81,3 @@ class IAMRuleChecker(RuleChecker):
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = IAMRuleChecker

View File

@ -1,18 +1,17 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
import boto3 import boto3
class KMSRuleChecker(RuleChecker): client = boto3.client("kms")
def __init__(self):
self.client = boto3.client("kms")
def cmk_backing_key_rotation_enabled(self):
def cmk_backing_key_rotation_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
keys = self.client.list_keys()["Keys"] keys = client.list_keys()["Keys"]
for key in keys: for key in keys:
response = self.client.get_key_rotation_status(KeyId=key["KeyId"]) response = client.get_key_rotation_status(KeyId=key["KeyId"])
if response["KeyRotationEnabled"] == True: if response["KeyRotationEnabled"] == True:
compliant_resources.append(response["KeyId"]) compliant_resources.append(response["KeyId"])
@ -24,6 +23,3 @@ class KMSRuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = KMSRuleChecker

View File

@ -1,39 +1,24 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import datetime import datetime
from dateutil.tz import tzlocal from dateutil.tz import tzlocal
import boto3 import boto3
client = boto3.client("rds")
backup_client = boto3.client("backup")
ec2_client = boto3.client("ec2")
class RDSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("rds")
self.backup_client = boto3.client("backup")
self.ec2_client = boto3.client("ec2")
@cached_property def aurora_last_backup_recovery_point_created():
def db_clusters(self):
return self.client.describe_db_clusters()["DBClusters"]
@cached_property
def db_instances(self):
return self.client.describe_db_instances()["DBInstances"]
def aurora_last_backup_recovery_point_created(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
recovery_points = self.backup_client.list_recovery_points_by_resource( recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=cluster["DBClusterArn"])[
ResourceArn=cluster["DBClusterArn"] "RecoveryPoints"
)["RecoveryPoints"] ]
recovery_point_creation_dates = sorted( recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points])
[i["CreationDate"] for i in recovery_points] if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1):
)
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[
-1
] < datetime.timedelta(days=1):
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
else: else:
non_compliant_resources.append(cluster["DBClusterArn"]) non_compliant_resources.append(cluster["DBClusterArn"])
@ -44,16 +29,14 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def aurora_mysql_backtracking_enabled(self):
def aurora_mysql_backtracking_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if ( if cluster["Engine"] == "aurora-mysql" and cluster.get("EarliestBacktrackTime", None) == None:
cluster["Engine"] == "aurora-mysql"
and cluster.get("EarliestBacktrackTime", None) == None
):
non_compliant_resources.append(cluster["DBClusterArn"]) non_compliant_resources.append(cluster["DBClusterArn"])
else: else:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -64,11 +47,12 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def db_instance_backup_enabled(self):
def db_instance_backup_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if "BackupRetentionPeriod" in cluster: if "BackupRetentionPeriod" in cluster:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -81,11 +65,12 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_auto_minor_version_upgrade_enable(self):
def rds_cluster_auto_minor_version_upgrade_enable():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["Engine"] == "docdb" or cluster.get("AutoMinorVersionUpgrade"): if cluster["Engine"] == "docdb" or cluster.get("AutoMinorVersionUpgrade"):
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -98,11 +83,12 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_default_admin_check(self):
def rds_cluster_default_admin_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["MasterUsername"] not in ["admin", "postgres"]: if cluster["MasterUsername"] not in ["admin", "postgres"]:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -115,11 +101,12 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_deletion_protection_enabled(self):
def rds_cluster_deletion_protection_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["DeletionProtection"]: if cluster["DeletionProtection"]:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -132,11 +119,12 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_encrypted_at_rest(self):
def rds_cluster_encrypted_at_rest():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["StorageEncrypted"]: if cluster["StorageEncrypted"]:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -149,15 +137,14 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_iam_authentication_enabled(self):
def rds_cluster_iam_authentication_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["Engine"] == "docdb" or cluster.get( if cluster["Engine"] == "docdb" or cluster.get("IAMDatabaseAuthenticationEnabled"):
"IAMDatabaseAuthenticationEnabled"
):
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
else: else:
non_compliant_resources.append(cluster["DBClusterArn"]) non_compliant_resources.append(cluster["DBClusterArn"])
@ -168,11 +155,12 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_multi_az_enabled(self):
def rds_cluster_multi_az_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if len(cluster.get("AvailabilityZones", [])) > 1: if len(cluster.get("AvailabilityZones", [])) > 1:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -185,22 +173,17 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_db_security_group_not_allowed(self):
def rds_db_security_group_not_allowed():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters security_groups = ec2_client.describe_security_groups()["SecurityGroups"]
security_groups = self.ec2_client.describe_security_groups()["SecurityGroups"] default_security_group_ids = [i["GroupId"] for i in security_groups if i["GroupName"] == "default"]
default_security_group_ids = [
i["GroupId"] for i in security_groups if i["GroupName"] == "default"
]
for cluster in clusters: for cluster in clusters:
db_security_groups = [ db_security_groups = [i["VpcSecurityGroupId"] for i in cluster["VpcSecurityGroups"] if i["Status"] == "active"]
i["VpcSecurityGroupId"]
for i in cluster["VpcSecurityGroups"]
if i["Status"] == "active"
]
for default_security_group_id in default_security_group_ids: for default_security_group_id in default_security_group_ids:
if default_security_group_id in db_security_groups: if default_security_group_id in db_security_groups:
@ -215,11 +198,12 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_enhanced_monitoring_enabled(self):
def rds_enhanced_monitoring_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
instances = client.describe_db_instances()["DBInstances"]
instances = self.db_instances
for instance in instances: for instance in instances:
if instance.get("MonitoringInterval", 0): if instance.get("MonitoringInterval", 0):
compliant_resources.append(instance["DBInstanceArn"]) compliant_resources.append(instance["DBInstanceArn"])
@ -232,11 +216,12 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_instance_public_access_check(self):
def rds_instance_public_access_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
instances = client.describe_db_instances()["DBInstances"]
instances = self.db_instances
for instance in instances: for instance in instances:
if instance["PubliclyAccessible"]: if instance["PubliclyAccessible"]:
non_compliant_resources.append(instance["DBInstanceArn"]) non_compliant_resources.append(instance["DBInstanceArn"])
@ -249,21 +234,20 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_logging_enabled(self):
def rds_logging_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
logs_for_engine = { logs_for_engine = {
"aurora-mysql": ["audit", "error", "general", "slowquery"], "aurora-mysql": ["audit", "error", "general", "slowquery"],
"aurora-postgresql": ["postgresql"], "aurora-postgresql": ["postgresql"],
"docdb": ["audit", "profiler"], "docdb": ["audit", "profiler"]
} }
for cluster in clusters: for cluster in clusters:
if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get( if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get(cluster["Engine"]):
cluster["Engine"]
):
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
else: else:
non_compliant_resources.append(cluster["DBClusterArn"]) non_compliant_resources.append(cluster["DBClusterArn"])
@ -274,13 +258,12 @@ class RDSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_snapshot_encrypted(self):
def rds_snapshot_encrypted():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
cluster_snapshots = self.client.describe_db_cluster_snapshots()[ cluster_snapshots = client.describe_db_cluster_snapshots()["DBClusterSnapshots"]
"DBClusterSnapshots"
]
for snapshot in cluster_snapshots: for snapshot in cluster_snapshots:
if snapshot.get("StorageEncrypted") == True: if snapshot.get("StorageEncrypted") == True:
@ -293,6 +276,3 @@ class RDSRuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = RDSRuleChecker

View File

@ -1,31 +1,20 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
import botocore.exceptions import botocore.exceptions
class S3RuleChecker(RuleChecker): client = boto3.client("s3")
def __init__(self): sts_client = boto3.client("sts")
self.client = boto3.client("s3") s3control_client = boto3.client("s3control")
self.sts_client = boto3.client("sts") backup_client = boto3.client("backup")
self.s3control_client = boto3.client("s3control")
self.backup_client = boto3.client("backup")
@cached_property
def account_id(self):
return self.sts_client.get_caller_identity().get("Account")
@cached_property def s3_access_point_in_vpc_only():
def buckets(self):
return self.client.list_buckets()["Buckets"]
def s3_access_point_in_vpc_only(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
account_id = sts_client.get_caller_identity().get("Account")
access_points = s3control_client.list_access_points(AccountId=account_id)["AccessPointList"]
access_points = self.s3control_client.list_access_points(
AccountId=self.account_id
)["AccessPointList"]
for access_point in access_points: for access_point in access_points:
if access_point["NetworkOrigin"] == "VPC": if access_point["NetworkOrigin"] == "VPC":
compliant_resources.append(access_point["AccessPointArn"]) compliant_resources.append(access_point["AccessPointArn"])
@ -38,21 +27,18 @@ class S3RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_default_lock_enabled(self):
def s3_bucket_default_lock_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in self.buckets: for bucket in buckets:
try: try:
response = self.client.get_object_lock_configuration( response = client.get_object_lock_configuration(Bucket=bucket["Name"])
Bucket=bucket["Name"]
)
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
except botocore.exceptions.ClientError as e: except botocore.exceptions.ClientError as e:
if ( if e.response['Error']['Code'] == "ObjectLockConfigurationNotFoundError":
e.response["Error"]["Code"]
== "ObjectLockConfigurationNotFoundError"
):
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
raise e raise e
@ -63,12 +49,14 @@ class S3RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_level_public_access_prohibited(self):
def s3_bucket_level_public_access_prohibited():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in self.buckets: for bucket in buckets:
response = self.client.get_public_access_block(Bucket=bucket["Name"]) response = client.get_public_access_block(Bucket=bucket["Name"])
if False not in response["PublicAccessBlockConfiguration"].values(): if False not in response["PublicAccessBlockConfiguration"].values():
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
@ -80,12 +68,14 @@ class S3RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_logging_enabled(self):
def s3_bucket_logging_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in self.buckets: for bucket in buckets:
response = self.client.get_bucket_logging(Bucket=bucket["Name"]) response = client.get_bucket_logging(Bucket=bucket["Name"])
if "LoggingEnabled" in response: if "LoggingEnabled" in response:
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
@ -97,12 +87,14 @@ class S3RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_ssl_requests_only(self):
def s3_bucket_ssl_requests_only():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in self.buckets: for bucket in buckets:
policy = self.client.get_bucket_policy(Bucket=bucket["Name"])["Policy"] policy = client.get_bucket_policy(Bucket=bucket["Name"])["Policy"]
if "aws:SecureTransport" in policy: if "aws:SecureTransport" in policy:
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
@ -114,12 +106,14 @@ class S3RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_versioning_enabled(self):
def s3_bucket_versioning_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in self.buckets: for bucket in buckets:
response = self.client.get_bucket_versioning(Bucket=bucket["Name"]) response = client.get_bucket_versioning(Bucket=bucket["Name"])
if "Status" in response and response["Status"] == "Enabled": if "Status" in response and response["Status"] == "Enabled":
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
@ -131,21 +125,16 @@ class S3RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_default_encryption_kms(self):
def s3_default_encryption_kms():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in self.buckets: for bucket in buckets:
configuration = self.client.get_bucket_encryption(Bucket=bucket["Name"])[ configuration = client.get_bucket_encryption(Bucket=bucket["Name"])["ServerSideEncryptionConfiguration"]
"ServerSideEncryptionConfiguration"
]
if ( if configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] == "aws:kms":
configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"][
"SSEAlgorithm"
]
== "aws:kms"
):
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
@ -156,14 +145,14 @@ class S3RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_event_notifications_enabled(self):
def s3_event_notifications_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in self.buckets: for bucket in buckets:
configuration = self.client.get_bucket_notification_configuration( configuration = client.get_bucket_notification_configuration(Bucket=bucket["Name"])
Bucket=bucket["Name"]
)
if ( if (
"LambdaFunctionConfigurations" in configuration "LambdaFunctionConfigurations" in configuration
or "QueueConfigurations" in configuration or "QueueConfigurations" in configuration
@ -179,14 +168,14 @@ class S3RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_last_backup_recovery_point_created(self):
def s3_last_backup_recovery_point_created():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in self.buckets: for bucket in buckets:
backups = self.backup_client.list_recovery_points_by_resource( backups = backup_client.list_recovery_points_by_resource(ResourceArn=f"arn:aws:s3:::{bucket['Name']}")
ResourceArn=f"arn:aws:s3:::{bucket['Name']}"
)
if backups["RecoveryPoints"] != []: if backups["RecoveryPoints"] != []:
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
@ -199,18 +188,18 @@ class S3RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_lifecycle_policy_check(self):
def s3_lifecycle_policy_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in self.buckets: for bucket in buckets:
try: try:
configuration = self.client.get_bucket_lifecycle_configuration( configuration = client.get_bucket_lifecycle_configuration(Bucket=bucket["Name"])
Bucket=bucket["Name"]
)
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
except botocore.exceptions.ClientError as e: except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "NoSuchLifecycleConfiguration": if e.response['Error']['Code'] == "NoSuchLifecycleConfiguration":
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
raise e raise e
@ -220,6 +209,3 @@ class S3RuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = S3RuleChecker

View File

@ -1,24 +1,19 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
from datetime import datetime, timedelta import datetime
from dateutil.tz import tzlocal from dateutil.tz import tzlocal
class SecretsManagerRuleChecker(RuleChecker): client = boto3.client("secretsmanager")
def __init__(self):
self.client = boto3.client("secretsmanager")
@cached_property
def secrets(self):
return self.client.list_secrets()["SecretList"]
def secretsmanager_rotation_enabled_check(self): def secretsmanager_rotation_enabled_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
secrets = client.list_secrets()["SecretList"]
for secret in self.secrets: for secret in secrets:
if secret.get("RotationEnabled", False): if secret.get("RotationEnabled") == True:
compliant_resources.append(secret["ARN"]) compliant_resources.append(secret["ARN"])
else: else:
non_compliant_resources.append(secret["ARN"]) non_compliant_resources.append(secret["ARN"])
@ -29,18 +24,20 @@ class SecretsManagerRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def secretsmanager_scheduled_rotation_success_check(self):
def secretsmanager_scheduled_rotation_success_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
secrets = client.list_secrets()["SecretList"]
for secret in self.secrets: for secret in secrets:
if secret.get("RotationEnabled", False): if secret.get("RotationEnabled") == True:
if "LastRotatedDate" not in secret: if 'LastRotatedDate' not in secret:
non_compliant_resources.append(secret["ARN"]) non_compliant_resources.append(secret["ARN"])
continue continue
now = datetime.now(tz=tzlocal()) now = datetime.datetime.now(tz=tzlocal())
rotation_period = timedelta( rotation_period = datetime.timedelta(
days=secret["RotationRules"]["AutomaticallyAfterDays"] + 2 days=secret["RotationRules"]["AutomaticallyAfterDays"] + 2
) # 최대 2일 지연 가능 (aws) ) # 최대 2일 지연 가능 (aws)
elapsed_time_after_rotation = now - secret["LastRotatedDate"] elapsed_time_after_rotation = now - secret["LastRotatedDate"]
@ -56,20 +53,22 @@ class SecretsManagerRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def secretsmanager_secret_periodic_rotation(self):
def secretsmanager_secret_periodic_rotation():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
secrets = client.list_secrets()["SecretList"]
for secret in self.secrets: for secret in secrets:
if secret.get("RotationEnabled") == True: if secret.get("RotationEnabled") == True:
if "LastRotatedDate" not in secret: if 'LastRotatedDate' not in secret:
non_compliant_resources.append(secret["ARN"]) non_compliant_resources.append(secret["ARN"])
continue continue
now = datetime.now(tz=tzlocal()) now = datetime.datetime.now(tz=tzlocal())
elapsed_time_after_rotation = now - secret["LastRotatedDate"] elapsed_time_after_rotation = now - secret["LastRotatedDate"]
if elapsed_time_after_rotation > timedelta(days=90): if elapsed_time_after_rotation > datetime.timedelta(days=90):
non_compliant_resources.append(secret["ARN"]) non_compliant_resources.append(secret["ARN"])
else: else:
compliant_resources.append(secret["ARN"]) compliant_resources.append(secret["ARN"])
@ -79,6 +78,3 @@ class SecretsManagerRuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = SecretsManagerRuleChecker

View File

@ -1,31 +1,11 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
import boto3 import boto3
class SecurityHubRuleChecker(RuleChecker): # client = boto3.client("")
def __init__(self):
self.client = boto3.client("securityhub")
self.sts_client = boto3.client("sts")
def securityhub_enabled(self):
compliant_resources = []
non_compliant_resources = []
aws_account_id = self.sts_client.get_caller_identity()["Account"]
try:
hub = self.client.describe_hub()
compliant_resources.append(aws_account_id)
except Exception as e:
if e.__class__.__name__ == "InvalidAccessException":
non_compliant_resources.append(aws_account_id)
else:
raise e
def securityhub_enabled():
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=False, compliant_resources=[], non_compliant_resources=[]
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
) )
rule_checker = SecurityHubRuleChecker

View File

@ -1,25 +1,17 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class SNSRuleChecker(RuleChecker): client = boto3.client("sns")
def __init__(self):
self.client = boto3.client("sns")
@cached_property
def topics(self):
topics = self.client.list_topics()["Topics"]
return [
self.client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
for topic in topics
]
def sns_encrypted_kms(self): def sns_encrypted_kms():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
topics = client.list_topics()["Topics"]
for topic in self.topics: for topic in topics:
topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
if "KmsMasterKeyId" in topic: if "KmsMasterKeyId" in topic:
compliant_resources.append(topic["TopicArn"]) compliant_resources.append(topic["TopicArn"])
else: else:
@ -31,19 +23,19 @@ class SNSRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def sns_topic_message_delivery_notification_enabled(self):
def sns_topic_message_delivery_notification_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
topics = client.list_topics()["Topics"]
for topic in self.topics: for topic in topics:
notification_roles = [ topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
attribute
for attribute in topic.keys()
if attribute.endswith("FeedbackRoleArn")
]
if notification_roles: for key in topic.keys():
if key.endswith("FeedbackRoleArn") == True:
compliant_resources.append(topic["TopicArn"]) compliant_resources.append(topic["TopicArn"])
break
else: else:
non_compliant_resources.append(topic["TopicArn"]) non_compliant_resources.append(topic["TopicArn"])
@ -52,6 +44,3 @@ class SNSRuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = SNSRuleChecker

11
services/tags.py Normal file
View File

@ -0,0 +1,11 @@
from models import RuleCheckResult
import boto3
# client = boto3.client("")
def required_tags():
return RuleCheckResult(
passed=False, compliant_resources=[], non_compliant_resources=[]
)

View File

@ -1,18 +1,13 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property from pprint import pprint
import boto3 import boto3
class VPCRuleChecker(RuleChecker): ec2 = boto3.client("ec2")
def __init__(self):
self.ec2 = boto3.client("ec2")
@cached_property
def security_group_rules(self):
return self.ec2.describe_security_group_rules()["SecurityGroupRules"]
def ec2_transit_gateway_auto_vpc_attach_disabled(self): def ec2_transit_gateway_auto_vpc_attach_disabled():
response = self.ec2.describe_transit_gateways() response = ec2.describe_transit_gateways()
non_compliant_resources = [ non_compliant_resources = [
resource["TransitGatewayArn"] resource["TransitGatewayArn"]
@ -23,12 +18,7 @@ class VPCRuleChecker(RuleChecker):
] ]
compliant_resources = list( compliant_resources = list(
set( set([resource["TransitGatewayArn"] for resource in response["TransitGateways"]])
[
resource["TransitGatewayArn"]
for resource in response["TransitGateways"]
]
)
- set(non_compliant_resources) - set(non_compliant_resources)
) )
@ -38,7 +28,10 @@ class VPCRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def restricted_ssh(self):
def restricted_ssh():
response = ec2.describe_security_group_rules()
non_compliant_resources = [ non_compliant_resources = [
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in filter( for resource in filter(
@ -46,7 +39,7 @@ class VPCRuleChecker(RuleChecker):
and x["FromPort"] <= 22 and x["FromPort"] <= 22
and x["ToPort"] >= 22 and x["ToPort"] >= 22
and x.get("CidrIpv4") == "0.0.0.0/0", and x.get("CidrIpv4") == "0.0.0.0/0",
self.security_group_rules, response["SecurityGroupRules"],
) )
] ]
@ -54,7 +47,7 @@ class VPCRuleChecker(RuleChecker):
set( set(
[ [
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in self.security_group_rules for resource in response["SecurityGroupRules"]
] ]
) )
- set(non_compliant_resources) - set(non_compliant_resources)
@ -65,9 +58,9 @@ class VPCRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def restricted_common_ports(self):
def restricted_common_ports():
common_ports = [ common_ports = [
-1, # All
22, # SSH 22, # SSH
80, # HTTP 80, # HTTP
3306, # MySQL 3306, # MySQL
@ -76,6 +69,7 @@ class VPCRuleChecker(RuleChecker):
6379, # Redis 6379, # Redis
11211, # Memcached 11211, # Memcached
] ]
response = ec2.describe_security_group_rules()
non_compliant_resources = [ non_compliant_resources = [
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
@ -84,14 +78,14 @@ class VPCRuleChecker(RuleChecker):
and x["FromPort"] in common_ports and x["FromPort"] in common_ports
and x["ToPort"] in common_ports and x["ToPort"] in common_ports
and x.get("PrefixListId") is None, and x.get("PrefixListId") is None,
self.security_group_rules, response["SecurityGroupRules"],
) )
] ]
compliant_resources = list( compliant_resources = list(
set( set(
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in self.security_group_rules for resource in response["SecurityGroupRules"]
) )
- set(non_compliant_resources) - set(non_compliant_resources)
) )
@ -102,14 +96,13 @@ class VPCRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def subnet_auto_assign_public_ip_disabled(self):
response = self.ec2.describe_subnets() def subnet_auto_assign_public_ip_disabled():
response = ec2.describe_subnets()
non_compliant_resources = [ non_compliant_resources = [
resource["SubnetId"] resource["SubnetId"]
for resource in filter( for resource in filter(lambda x: x["MapPublicIpOnLaunch"], response["Subnets"])
lambda x: x["MapPublicIpOnLaunch"], response["Subnets"]
)
] ]
compliant_resources = list( compliant_resources = list(
@ -123,8 +116,9 @@ class VPCRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_default_security_group_closed(self):
response = self.ec2.describe_security_groups( def vpc_default_security_group_closed():
response = ec2.describe_security_groups(
Filters=[{"Name": "group-name", "Values": ["default"]}] Filters=[{"Name": "group-name", "Values": ["default"]}]
) )
@ -147,13 +141,14 @@ class VPCRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_flow_logs_enabled(self):
response = self.ec2.describe_flow_logs() def vpc_flow_logs_enabled():
response = ec2.describe_flow_logs()
flow_log_enabled_vpcs = [ flow_log_enabled_vpcs = [
resource["ResourceId"] for resource in response["FlowLogs"] resource["ResourceId"] for resource in response["FlowLogs"]
] ]
response = self.ec2.describe_vpcs() response = ec2.describe_vpcs()
non_compliant_resources = [ non_compliant_resources = [
resource["VpcId"] resource["VpcId"]
@ -173,14 +168,13 @@ class VPCRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_network_acl_unused_check(self):
response = self.ec2.describe_network_acls() def vpc_network_acl_unused_check():
response = ec2.describe_network_acls()
non_compliant_resources = [ non_compliant_resources = [
resource["NetworkAclId"] resource["NetworkAclId"]
for resource in filter( for resource in filter(lambda x: not x["Associations"], response["NetworkAcls"])
lambda x: not x["Associations"], response["NetworkAcls"]
)
] ]
compliant_resources = list( compliant_resources = list(
@ -194,8 +188,9 @@ class VPCRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_peering_dns_resolution_check(self):
response = self.ec2.describe_vpc_peering_connections() def vpc_peering_dns_resolution_check():
response = ec2.describe_vpc_peering_connections()
non_compliant_resources = [ non_compliant_resources = [
resource["VpcPeeringConnectionId"] resource["VpcPeeringConnectionId"]
@ -228,7 +223,10 @@ class VPCRuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_sg_open_only_to_authorized_ports(self):
def vpc_sg_open_only_to_authorized_ports():
response = ec2.describe_security_group_rules()
authorized_port = [ authorized_port = [
# 80 # 80
] ]
@ -240,14 +238,14 @@ class VPCRuleChecker(RuleChecker):
and (x.get("CidrIpv4") == "0.0.0.0/0" or x.get("CidrIpv6") == "::/0") and (x.get("CidrIpv4") == "0.0.0.0/0" or x.get("CidrIpv6") == "::/0")
and x["FromPort"] not in authorized_port and x["FromPort"] not in authorized_port
and x["ToPort"] not in authorized_port, and x["ToPort"] not in authorized_port,
self.security_group_rules, response["SecurityGroupRules"],
) )
] ]
compliant_resources = list( compliant_resources = list(
set( set(
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in self.security_group_rules for resource in response["SecurityGroupRules"]
) )
- set(non_compliant_resources) - set(non_compliant_resources)
) )
@ -257,6 +255,3 @@ class VPCRuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = VPCRuleChecker

View File

@ -1,48 +1,20 @@
from models import RuleCheckResult, RuleChecker from models import RuleCheckResult
from functools import cached_property
import boto3 import boto3
class WAFv2RuleChecker(RuleChecker): client = boto3.client("wafv2")
def __init__(self): global_client = boto3.client("wafv2", region_name="us-east-1")
self.client = boto3.client("wafv2")
self.global_client = boto3.client("wafv2", region_name="us-east-1")
@cached_property
def regional_web_acls(self):
return self.client.list_web_acls(Scope="REGIONAL")["WebACLs"]
@cached_property def wafv2_logging_enabled():
def cloudfront_web_acls(self):
return self.global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
@cached_property
def regional_rule_groups(self):
rule_groups = self.client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
return [
self.client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"]
for rule_group in rule_groups
]
@cached_property
def cloudfront_rule_groups(self):
rule_groups = self.global_client.list_rule_groups(Scope="CLOUDFRONT")[
"RuleGroups"
]
return [
self.global_client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"]
for rule_group in rule_groups
]
def wafv2_logging_enabled(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"]
cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
for web_acl in self.regional_web_acls: for web_acl in regional_web_acls:
try: try:
configuration = self.client.get_logging_configuration( configuration = client.get_logging_configuration(ResourceArn=web_acl["ARN"])
ResourceArn=web_acl["ARN"]
)
compliant_resources.append(web_acl["ARN"]) compliant_resources.append(web_acl["ARN"])
except Exception as e: except Exception as e:
if e.__class__.__name__ == "WAFNonexistentItemException": if e.__class__.__name__ == "WAFNonexistentItemException":
@ -50,11 +22,9 @@ class WAFv2RuleChecker(RuleChecker):
else: else:
raise e raise e
for web_acl in self.cloudfront_web_acls: for web_acl in cloudfront_web_acls:
try: try:
configuration = self.global_client.get_logging_configuration( configuration = global_client.get_logging_configuration(ResourceArn=web_acl["ARN"])
ResourceArn=web_acl["ARN"]
)
compliant_resources.append(web_acl["ARN"]) compliant_resources.append(web_acl["ARN"])
except Exception as e: except Exception as e:
if e.__class__.__name__ == "WAFNonexistentItemException": if e.__class__.__name__ == "WAFNonexistentItemException":
@ -68,18 +38,24 @@ class WAFv2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def wafv2_rulegroup_logging_enabled(self):
def wafv2_rulegroup_logging_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"]
for rule_group in self.regional_rule_groups:
if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True: for rule_group in regional_rule_groups:
configuration = client.get_rule_group(ARN=rule_group["ARN"])
if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
compliant_resources.append(rule_group["ARN"]) compliant_resources.append(rule_group["ARN"])
else: else:
non_compliant_resources.append(rule_group["ARN"]) non_compliant_resources.append(rule_group["ARN"])
for rule_group in self.cloudfront_rule_groups: for rule_group in cloudfront_rule_groups:
if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True: configuration = global_client.get_rule_group(ARN=rule_group["ARN"])
if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
compliant_resources.append(rule_group["ARN"]) compliant_resources.append(rule_group["ARN"])
else: else:
non_compliant_resources.append(rule_group["ARN"]) non_compliant_resources.append(rule_group["ARN"])
@ -90,18 +66,23 @@ class WAFv2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def wafv2_rulegroup_not_empty(self):
def wafv2_rulegroup_not_empty():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"]
for rule_group in self.regional_rule_groups: for rule_group in regional_rule_groups:
if len(rule_group["Rules"]) > 0: configuration = client.get_rule_group(ARN=rule_group["ARN"])
if len(configuration["RuleGroup"]["Rules"]) > 0:
compliant_resources.append(rule_group["ARN"]) compliant_resources.append(rule_group["ARN"])
else: else:
non_compliant_resources.append(rule_group["ARN"]) non_compliant_resources.append(rule_group["ARN"])
for rule_group in self.cloudfront_rule_groups: for rule_group in cloudfront_rule_groups:
if len(rule_group["Rules"]) > 0: configuration = global_client.get_rule_group(ARN=rule_group["ARN"])
if len(configuration["RuleGroup"]["Rules"]) > 0:
compliant_resources.append(rule_group["ARN"]) compliant_resources.append(rule_group["ARN"])
else: else:
non_compliant_resources.append(rule_group["ARN"]) non_compliant_resources.append(rule_group["ARN"])
@ -112,23 +93,21 @@ class WAFv2RuleChecker(RuleChecker):
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def wafv2_webacl_not_empty(self):
def wafv2_webacl_not_empty():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"]
cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
for web_acl in self.regional_web_acls: for web_acl in regional_web_acls:
response = self.client.get_web_acl( response = client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL")
Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL"
)
if len(response["WebACL"]["Rules"]) > 0: if len(response["WebACL"]["Rules"]) > 0:
compliant_resources.append(web_acl["ARN"]) compliant_resources.append(web_acl["ARN"])
else: else:
non_compliant_resources.append(web_acl["ARN"]) non_compliant_resources.append(web_acl["ARN"])
for web_acl in cloudfront_web_acls:
for web_acl in self.cloudfront_web_acls: response = global_client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT")
response = self.global_client.get_web_acl(
Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT"
)
if len(response["WebACL"]["Rules"]) > 0: if len(response["WebACL"]["Rules"]) > 0:
compliant_resources.append(web_acl["ARN"]) compliant_resources.append(web_acl["ARN"])
else: else:
@ -139,6 +118,3 @@ class WAFv2RuleChecker(RuleChecker):
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = WAFv2RuleChecker

View File

@ -2,10 +2,7 @@ import json
import shutil import shutil
def load_bp_from_file(filepath="bp.json", default_ruleset=None): def load_bp_from_file(filepath="bp.json"):
if default_ruleset:
shutil.copy(default_ruleset, filepath)
try: try:
with open(filepath, "r") as f: with open(filepath, "r") as f:
content = "".join(f.readlines()) content = "".join(f.readlines())
@ -39,21 +36,6 @@ def convert_bp_to_snake_case(bp):
return bp return bp
def parse_excluded_resources():
with open("exclude.csv", "r") as f:
content = f.readlines()
excluded_resources = {}
for line in content:
if "," in line:
resource, scope = line.strip().split(",")
else:
resource = line.strip()
scope = "all"
excluded_resources[resource] = scope
return excluded_resources
if __name__ == "__main__": if __name__ == "__main__":
bp = load_bp_from_file() bp = load_bp_from_file()
rules = [ rules = [