From 3f7e9f6b8cda41d1c8698dc624b468e62afc7ae4 Mon Sep 17 00:00:00 2001 From: Benjamin Morris <93620006+bmorrissirromb@users.noreply.github.com> Date: Fri, 28 Apr 2023 16:20:25 -0700 Subject: [PATCH 01/12] remove typo in help output for #408 --- rdk/rdk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rdk/rdk.py b/rdk/rdk.py index a8274675..cebffb57 100644 --- a/rdk/rdk.py +++ b/rdk/rdk.py @@ -442,7 +442,7 @@ def get_command_parser(): parser.add_argument( "command", metavar="", - help=f"Command to run. Refer to the usage instructions for each command for more details. Commands are: {rdk_commands})", + help=f"Command to run. Refer to the usage instructions for each command for more details. Commands are: {rdk_commands}", choices=rdk_commands, ) parser.add_argument( From de7414ea11004c04ef4563ed0c7d024664e763c9 Mon Sep 17 00:00:00 2001 From: Benjamin Morris Date: Mon, 1 May 2023 13:52:35 -0700 Subject: [PATCH 02/12] bulk bugfix commit --- NEW_RUNTIME_PROCESS.md | 31 + README.rst | 6 +- docs/getting_started.rst | 2 +- pyproject.toml | 3 + rdk-workshop/instructions.md | 2 +- rdk/rdk.py | 1669 ++++++++++++----- rdk/template/configRule.json | 9 - rdk/template/configRuleOrganization.json | 9 - .../example_ci/AWS_R53_HostedZone.json | 39 + .../AWS_S3_AccountPublicAccessBlock.json | 23 + .../AWS_SSM_ManagedInstanceInventory.json | 765 +++----- .../dotnetcore1.0/CustomConfigHandler.cs | 189 -- .../runtime/dotnetcore1.0/RuleCode.cs | 27 - .../aws-lambda-tools-defaults.json | 19 - .../runtime/dotnetcore1.0/csharp7.0.csproj | 28 - .../dotnetcore2.0/CustomConfigHandler.cs | 189 -- .../runtime/dotnetcore2.0/RuleCode.cs | 27 - .../aws-lambda-tools-defaults.json | 19 - .../runtime/dotnetcore2.0/csharp7.0.csproj | 28 - rdk/template/runtime/nodejs4.3/rule_code.js | 183 -- rdk/template/runtime/nodejs6.10/rule_code.js | 215 --- .../runtime/python3.10-lib/rule_code.py | 25 + .../runtime/python3.10-lib/rule_test.py | 157 ++ rdk/template/runtime/python3.10/rule_code.py | 437 +++++ rdk/template/runtime/python3.10/rule_test.py | 177 ++ .../runtime/python3.7-lib/rule_test.py | 124 +- .../runtime/python3.8-lib/rule_test.py | 124 +- .../runtime/python3.9-lib/rule_test.py | 124 +- rdk/template/terraform/0.11/config_rule.tf | 8 - rdk/template/terraform/0.12/config_rule.tf | 7 - testing/linux-python3-buildspec.yaml | 18 +- testing/windows-python2-buildspec.yaml | 13 - testing/windows-python3-buildspec.yaml | 22 +- tox.ini | 2 + 34 files changed, 2588 insertions(+), 2132 deletions(-) create mode 100644 NEW_RUNTIME_PROCESS.md create mode 100644 rdk/template/example_ci/AWS_R53_HostedZone.json create mode 100644 rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json delete mode 100644 rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs delete mode 100755 rdk/template/runtime/dotnetcore1.0/RuleCode.cs delete mode 100755 rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json delete mode 100644 rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj delete mode 100644 rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs delete mode 100644 rdk/template/runtime/dotnetcore2.0/RuleCode.cs delete mode 100644 rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json delete mode 100644 rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj delete mode 100644 rdk/template/runtime/nodejs4.3/rule_code.js delete mode 100644 rdk/template/runtime/nodejs6.10/rule_code.js create mode 100644 rdk/template/runtime/python3.10-lib/rule_code.py create mode 100644 rdk/template/runtime/python3.10-lib/rule_test.py create mode 100644 rdk/template/runtime/python3.10/rule_code.py create mode 100644 rdk/template/runtime/python3.10/rule_test.py delete mode 100644 testing/windows-python2-buildspec.yaml create mode 100644 tox.ini diff --git a/NEW_RUNTIME_PROCESS.md b/NEW_RUNTIME_PROCESS.md new file mode 100644 index 00000000..3961a50c --- /dev/null +++ b/NEW_RUNTIME_PROCESS.md @@ -0,0 +1,31 @@ +# New Runtime Support Process +These instructions document the parts of the repository that need to be updated when support for a new Lambda runtime is added. + +## Update pyproject.toml + +- Add to `classifiers` list: +``` +"Programming Language :: Python :: ," +``` + +- Add to `include` list: +``` +"rdk/template/runtime/python/*", +"rdk/template/runtime/python-lib/*", +``` + +## Update README.rst + +- Update documentation and examples + +## Update getting_started.rst + +- Update examples + +## Update rdk.py + +- Update references to include new version + +## Update Linux and Windows Buildspec files (`testing` folder) + +- Add new test cases for the new version \ No newline at end of file diff --git a/README.rst b/README.rst index daa4aa41..d5cfe7ec 100644 --- a/README.rst +++ b/README.rst @@ -18,7 +18,7 @@ For complete documentation, including command reference, check out the `ReadTheD Getting Started =============== -Uses python 3.7/3.8/3.9 and is installed via pip. Requires you to have an AWS account and sufficient permissions to manage the Config service, and to create S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy Document that describes the minimum necessary permissions can be found at policy/rdk-minimum-permissions.json. +Uses python 3.7/3.8/3.9/3.10 and is installed via pip. Requires you to have an AWS account and sufficient permissions to manage the Config service, and to create S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy Document that describes the minimum necessary permissions can be found at policy/rdk-minimum-permissions.json. Under the hood, rdk uses boto3 to make API calls to AWS, so you can set your credentials any way that boto3 recognizes (options 3 through 8 here: http://boto3.readthedocs.io/en/latest/guide/configuration.html) or pass them in with the command-line parameters --profile, --region, --access-key-id, or --secret-access-key @@ -148,7 +148,7 @@ If you need to change the parameters of a Config rule in your working directory :: - $ rdk modify MyRule --runtime python3.9 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' + $ rdk modify MyRule --runtime python3.10 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' Running modify! Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. @@ -220,7 +220,7 @@ You can use the ``-n`` and ``-f`` command line flags just like the UNIX ``tail`` Running the tests ================= -The `testing` directory contains scripts and buildspec files that I use to run basic functionality tests across a variety of CLI environments (currently Ubuntu linux running python 3.7/3.8/3.9, and Windows Server running python3.9). If there is interest I can release a CloudFormation template that could be used to build the test environment, let me know if this is something you want! +The `testing` directory contains scripts and buildspec files that I use to run basic functionality tests across a variety of CLI environments (currently Ubuntu linux running python 3.7/3.8/3.9/3.10, and Windows Server running python3.10). If there is interest I can release a CloudFormation template that could be used to build the test environment, let me know if this is something you want! Advanced Features diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 4ae573cd..a66052ed 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -124,7 +124,7 @@ If you need to change the parameters of a Config rule in your working directory :: - $ rdk modify MyRule --runtime python3.9 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' + $ rdk modify MyRule --runtime python3.10 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' Running modify! Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. diff --git a/pyproject.toml b/pyproject.toml index b4428400..c3751717 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,7 @@ classifiers = [ "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ] include = [ "README.rst", @@ -46,6 +47,8 @@ include = [ "rdk/template/runtime/python3.8-lib/*", "rdk/template/runtime/python3.9/*", "rdk/template/runtime/python3.9-lib/*", + "rdk/template/runtime/python3.10/*", + "rdk/template/runtime/python3.10-lib/*", "rdk/template/runtime/dotnetcore1.0/*", "rdk/template/runtime/dotnetcore1.0/bin/*", "rdk/template/runtime/dotnetcore1.0/obj/*", diff --git a/rdk-workshop/instructions.md b/rdk-workshop/instructions.md index 0bbafc2a..210f13c5 100644 --- a/rdk-workshop/instructions.md +++ b/rdk-workshop/instructions.md @@ -113,7 +113,7 @@ Note: It might take up to 2 hours to get the information about the CIS benchmark ## (Optional) Going further 7. Discover all the available [Managed Config Rules](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html). -8. Navigate to [AWS System Manager Automation Documents](https://eu-west-1.console.aws.amazon.com/systems-manager/documents?region=eu-west-1) to discover all existing remediation actions. +8. Navigate to [AWS System Manager Automation Documents](https://us-east-1.console.aws.amazon.com/systems-manager/documents?region=us-east-1) to discover all existing remediation actions. # Lab 2: Writing Your First Config Rule diff --git a/rdk/rdk.py b/rdk/rdk.py index cebffb57..12052e51 100644 --- a/rdk/rdk.py +++ b/rdk/rdk.py @@ -1,10 +1,16 @@ # Copyright 2017-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +# Licensed under the Apache License, Version 2.0 (the "License"). +# +# You may not use this file except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # -# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +# or in the "license" file accompanying this file. +# +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# +# See the License for the specific language governing permissions and limitations under the License. import argparse import base64 import fileinput @@ -69,12 +75,17 @@ "sa-east-1": "5", } -RDKLIB_LAYER_SAR_ID = "arn:aws:serverlessrepo:ap-southeast-1:711761543063:applications/rdklib" +RDKLIB_LAYER_SAR_ID = ( + "arn:aws:serverlessrepo:ap-southeast-1:711761543063:applications/rdklib" +) RDKLIB_ARN_STRING = "arn:aws:lambda:{region}:711761543063:layer:rdklib-layer:{version}" -PARALLEL_COMMAND_THROTTLE_PERIOD = 2 # 2 seconds, used in running commands in parallel over multiple regions +PARALLEL_COMMAND_THROTTLE_PERIOD = ( + 2 # 2 seconds, used in running commands in parallel over multiple regions +) -# this need to be update whenever config service supports more resource types : https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html +# This need to be update whenever config service supports more resource types +# See: https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html accepted_resource_types = [ "AWS::AccessAnalyzer::Analyzer", "AWS::ACM::Certificate", @@ -372,7 +383,9 @@ { "Sid": "REMOTE", "Effect": "Allow", - "Principal": {"AWS": {"Fn::Sub": "arn:${AWS::Partition}:iam::${LambdaAccountId}:root"}}, + "Principal": { + "AWS": {"Fn::Sub": "arn:${AWS::Partition}:iam::${LambdaAccountId}:root"} + }, "Action": "sts:AssumeRole", }, ], @@ -383,7 +396,9 @@ { "Effect": "Allow", "Action": "s3:PutObject*", - "Resource": {"Fn::Sub": "arn:${AWS::Partition}:s3:::${ConfigBucket}/AWSLogs/${AWS::AccountId}/*"}, + "Resource": { + "Fn::Sub": "arn:${AWS::Partition}:s3:::${ConfigBucket}/AWSLogs/${AWS::AccountId}/*" + }, "Condition": {"StringLike": {"s3:x-amz-acl": "bucket-owner-full-control"}}, }, { @@ -404,10 +419,18 @@ def get_command_parser(): # formatter_class=argparse.RawDescriptionHelpFormatter, description="The RDK is a command-line utility for authoring, deploying, and testing custom AWS Config rules." ) - parser.add_argument("-p", "--profile", help="[optional] indicate which Profile to use.") - parser.add_argument("-k", "--access-key-id", help="[optional] Access Key ID to use.") - parser.add_argument("-s", "--secret-access-key", help="[optional] Secret Access Key to use.") - parser.add_argument("-r", "--region", help="Select the region to run the command in.") + parser.add_argument( + "-p", "--profile", help="[optional] indicate which Profile to use." + ) + parser.add_argument( + "-k", "--access-key-id", help="[optional] Access Key ID to use." + ) + parser.add_argument( + "-s", "--secret-access-key", help="[optional] Secret Access Key to use." + ) + parser.add_argument( + "-r", "--region", help="Select the region to run the command in." + ) parser.add_argument( "-f", "--region-file", @@ -452,7 +475,11 @@ def get_command_parser(): help="Run `rdk --help` to see command-specific arguments.", ) parser.add_argument( - "-v", "--version", help="Display the version of this tool", action="version", version="%(prog)s " + MY_VERSION + "-v", + "--version", + help="Display the version of this tool", + action="version", + version="%(prog)s " + MY_VERSION, ) return parser @@ -534,7 +561,9 @@ def get_rule_parser(is_required, command): + command + " the Rule and metadata.", ) - parser.add_argument("rulename", metavar="", help="Rule name to create/modify") + parser.add_argument( + "rulename", metavar="", help="Rule name to create/modify" + ) runtime_group = parser.add_mutually_exclusive_group() runtime_group.add_argument( "-R", @@ -542,7 +571,6 @@ def get_rule_parser(is_required, command): required=False, help="Runtime for lambda function", choices=[ - "nodejs6.10", "java8", "python3.7", "python3.7-lib", @@ -550,16 +578,23 @@ def get_rule_parser(is_required, command): "python3.8-lib", "python3.9", "python3.9-lib", - "dotnetcore1.0", - "dotnetcore2.0", + "python3.10", + "python3.10-lib", ], metavar="", ) runtime_group.add_argument( - "--source-identifier", required=False, help="[optional] Used only for creating Managed Rules." + "--source-identifier", + required=False, + help="[optional] Used only for creating Managed Rules.", ) - parser.add_argument("-l", "--custom-lambda-name", required=False, help="[optional] Provide custom lambda name") - parser.set_defaults(runtime="python3.9-lib") + parser.add_argument( + "-l", + "--custom-lambda-name", + required=False, + help="[optional] Provide custom lambda name", + ) + parser.set_defaults(runtime="python3.10-lib") parser.add_argument( "-r", "--resource-types", @@ -571,17 +606,41 @@ def get_rule_parser(is_required, command): "--maximum-frequency", required=False, help="[optional] Maximum execution frequency for scheduled Rules", - choices=["One_Hour", "Three_Hours", "Six_Hours", "Twelve_Hours", "TwentyFour_Hours"], + choices=[ + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + ], + ) + parser.add_argument( + "-i", + "--input-parameters", + help="[optional] JSON for required Config parameters.", + ) + parser.add_argument( + "--optional-parameters", help="[optional] JSON for optional Config parameters." + ) + parser.add_argument( + "--tags", + help="[optional] JSON for tags to be applied to all CFN created resources.", + ) + parser.add_argument( + "-s", + "--rulesets", + required=False, + help="[optional] comma-delimited list of RuleSet names to add this Rule to.", ) - parser.add_argument("-i", "--input-parameters", help="[optional] JSON for required Config parameters.") - parser.add_argument("--optional-parameters", help="[optional] JSON for optional Config parameters.") - parser.add_argument("--tags", help="[optional] JSON for tags to be applied to all CFN created resources.") parser.add_argument( - "-s", "--rulesets", required=False, help="[optional] comma-delimited list of RuleSet names to add this Rule to." + "--remediation-action", + required=False, + help="[optional] SSM document for remediation.", ) - parser.add_argument("--remediation-action", required=False, help="[optional] SSM document for remediation.") parser.add_argument( - "--remediation-action-version", required=False, help="[optional] SSM document version for remediation action." + "--remediation-action-version", + required=False, + help="[optional] SSM document version for remediation action.", ) parser.add_argument( "--auto-remediate", @@ -595,7 +654,9 @@ def get_rule_parser(is_required, command): help="[optional] Number of times to retry automated remediation.", ) parser.add_argument( - "--auto-remediation-retry-time", required=False, help="[optional] Duration of automated remediation retries." + "--auto-remediation-retry-time", + required=False, + help="[optional] Duration of automated remediation retries.", ) parser.add_argument( "--remediation-concurrent-execution-percent", @@ -646,13 +707,27 @@ def get_deployment_parser(ForceArgument=False, Command="deploy"): parser = argparse.ArgumentParser( prog="rdk " + Command, - description="Used to " + Command + " the Config Rule " + direction + " the target account.", + description="Used to " + + Command + + " the Config Rule " + + direction + + " the target account.", + ) + parser.add_argument( + "rulename", + metavar="", + nargs="*", + help="Rule name(s) to deploy. Rule(s) will be pushed to AWS.", + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="All rules in the working directory will be deployed.", ) parser.add_argument( - "rulename", metavar="", nargs="*", help="Rule name(s) to deploy. Rule(s) will be pushed to AWS." + "-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names" ) - parser.add_argument("--all", "-a", action="store_true", help="All rules in the working directory will be deployed.") - parser.add_argument("-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names") parser.add_argument( "-f", "--functions-only", @@ -736,20 +811,36 @@ def get_deployment_parser(ForceArgument=False, Command="deploy"): return parser -def get_deployment_organization_parser(ForceArgument=False, Command="deploy-organization"): +def get_deployment_organization_parser( + ForceArgument=False, Command="deploy-organization" +): direction = "to" if Command == "undeploy": direction = "from" parser = argparse.ArgumentParser( prog="rdk " + Command, - description="Used to " + Command + " the Config Rule " + direction + " the target Organization.", + description="Used to " + + Command + + " the Config Rule " + + direction + + " the target Organization.", ) parser.add_argument( - "rulename", metavar="", nargs="*", help="Rule name(s) to deploy. Rule(s) will be pushed to AWS." + "rulename", + metavar="", + nargs="*", + help="Rule name(s) to deploy. Rule(s) will be pushed to AWS.", + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="All rules in the working directory will be deployed.", + ) + parser.add_argument( + "-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names" ) - parser.add_argument("--all", "-a", action="store_true", help="All rules in the working directory will be deployed.") - parser.add_argument("-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names") parser.add_argument( "-f", "--functions-only", @@ -834,13 +925,25 @@ def get_deployment_organization_parser(ForceArgument=False, Command="deploy-orga def get_export_parser(ForceArgument=False, Command="export"): - parser = argparse.ArgumentParser( - prog="rdk " + Command, description="Used to " + Command + " the Config Rule to terraform file." + prog="rdk " + Command, + description="Used to " + Command + " the Config Rule to terraform file.", + ) + parser.add_argument( + "rulename", + metavar="", + nargs="*", + help="Rule name(s) to export to a file.", + ) + parser.add_argument( + "-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names" + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="All rules in the working directory will be deployed.", ) - parser.add_argument("rulename", metavar="", nargs="*", help="Rule name(s) to export to a file.") - parser.add_argument("-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names") - parser.add_argument("--all", "-a", action="store_true", help="All rules in the working directory will be deployed.") parser.add_argument( "--lambda-layers", required=False, @@ -878,8 +981,16 @@ def get_export_parser(ForceArgument=False, Command="export"): required=False, help="[optional] Lambda Layer ARN that contains the desired rdklib. Note that Lambda Layers are region-specific.", ) - parser.add_argument("-v", "--version", required=True, help="Terraform version", choices=["0.11", "0.12"]) - parser.add_argument("-f", "--format", required=True, help="Export Format", choices=["terraform"]) + parser.add_argument( + "-v", + "--version", + required=True, + help="Terraform version", + choices=["0.11", "0.12"], + ) + parser.add_argument( + "-f", "--format", required=True, help="Export Format", choices=["terraform"] + ) parser.add_argument( "-g", "--generated-lambda-layer", @@ -897,15 +1008,36 @@ def get_export_parser(ForceArgument=False, Command="export"): def get_test_parser(command): - parser = argparse.ArgumentParser(prog="rdk " + command, description="Used to run tests on your Config Rule code.") - parser.add_argument("rulename", metavar="[,,...]", nargs="*", help="Rule name(s) to test") + parser = argparse.ArgumentParser( + prog="rdk " + command, description="Used to run tests on your Config Rule code." + ) parser.add_argument( - "--all", "-a", action="store_true", help="Test will be run against all rules in the working directory." + "rulename", + metavar="[,,...]", + nargs="*", + help="Rule name(s) to test", + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="Test will be run against all rules in the working directory.", + ) + parser.add_argument( + "--test-ci-json", "-j", help="[optional] JSON for test CI for testing." + ) + parser.add_argument( + "--test-ci-types", "-t", help="[optional] CI type to use for testing." + ) + parser.add_argument( + "--verbose", "-v", action="store_true", help="[optional] Enable full log output" + ) + parser.add_argument( + "-s", + "--rulesets", + required=False, + help="[optional] comma-delimited list of RuleSet names", ) - parser.add_argument("--test-ci-json", "-j", help="[optional] JSON for test CI for testing.") - parser.add_argument("--test-ci-types", "-t", help="[optional] CI type to use for testing.") - parser.add_argument("--verbose", "-v", action="store_true", help="[optional] Enable full log output") - parser.add_argument("-s", "--rulesets", required=False, help="[optional] comma-delimited list of RuleSet names") return parser @@ -933,11 +1065,21 @@ def get_logs_parser(): usage="rdk logs [-n/--number NUMBER] [-f/--follow]", description="Displays CloudWatch logs for the Lambda Function for the specified Rule.", ) - parser.add_argument("rulename", metavar="", help="Rule whose logs will be displayed") parser.add_argument( - "-f", "--follow", action="store_true", help="[optional] Continuously poll Lambda logs and write to stdout." + "rulename", metavar="", help="Rule whose logs will be displayed" + ) + parser.add_argument( + "-f", + "--follow", + action="store_true", + help="[optional] Continuously poll Lambda logs and write to stdout.", + ) + parser.add_argument( + "-n", + "--number", + default=3, + help="[optional] Number of previous logged events to display.", ) - parser.add_argument("-n", "--number", default=3, help="[optional] Number of previous logged events to display.") return parser @@ -949,7 +1091,9 @@ def get_rulesets_parser(): ) parser.add_argument("subcommand", help="One of list, add, or remove") parser.add_argument("ruleset", nargs="?", help="Name of RuleSet") - parser.add_argument("rulename", nargs="?", help="Name of Rule to be added or removed") + parser.add_argument( + "rulename", nargs="?", help="Name of Rule to be added or removed" + ) return parser @@ -1004,10 +1148,15 @@ def get_create_rule_template_parser(): def get_create_region_set_parser(): parser = argparse.ArgumentParser( - prog="rdk create-region-set", description="Outputs a YAML region set file for multi-region deployment." + prog="rdk create-region-set", + description="Outputs a YAML region set file for multi-region deployment.", ) parser.add_argument( - "-o", "--output-file", required=False, default="regions", help="Filename of the generated region set file" + "-o", + "--output-file", + required=False, + default="regions", + help="Filename of the generated region set file", ) return parser @@ -1020,7 +1169,9 @@ def parse_region_file(args): region_text = yaml.safe_load(open(args.region_file, "r")) return region_text[region_set] except Exception: - raise SyntaxError(f"Error reading regions: {region_set} in file: {args.region_file}") + raise SyntaxError( + f"Error reading regions: {region_set} in file: {args.region_file}" + ) def run_multi_region(args): @@ -1069,7 +1220,9 @@ def init(self): config_bucket_exists = False if self.args.config_bucket_exists_in_another_account: - print(f"[{my_session.region_name}]: Skipping Config Bucket check due to command line args") + print( + f"[{my_session.region_name}]: Skipping Config Bucket check due to command line args" + ) config_bucket_exists = True config_bucket_name = config_bucket_prefix + "-" + account_id @@ -1082,9 +1235,14 @@ def init(self): control_tower = True if self.args.generate_lambda_layer: - lambda_layer_version = self.__get_existing_lambda_layer(my_session, layer_name=self.args.custom_layer_name) + lambda_layer_version = self.__get_existing_lambda_layer( + my_session, layer_name=self.args.custom_layer_name + ) if lambda_layer_version: - print(f"[{my_session.region_name}]: Found Version: " + lambda_layer_version) + print( + f"[{my_session.region_name}]: Found Version: " + + lambda_layer_version + ) if self.args.generate_lambda_layer: print( f"[{my_session.region_name}]: --generate-lambda-layer Flag received, forcing update of the Lambda Layer in {my_session.region_name}" @@ -1094,8 +1252,12 @@ def init(self): f"[{my_session.region_name}]: Lambda Layer not found in {my_session.region_name}. Creating one now" ) # Try to generate lambda layer with ServerlessAppRepo, manually generate if impossible - self.__create_new_lambda_layer(my_session, layer_name=self.args.custom_layer_name) - lambda_layer_version = self.__get_existing_lambda_layer(my_session, layer_name=self.args.custom_layer_name) + self.__create_new_lambda_layer( + my_session, layer_name=self.args.custom_layer_name + ) + lambda_layer_version = self.__get_existing_lambda_layer( + my_session, layer_name=self.args.custom_layer_name + ) # Check to see if the ConfigRecorder has been created. recorders = my_config.describe_configuration_recorders() @@ -1103,13 +1265,18 @@ def init(self): config_recorder_exists = True config_recorder_name = recorders["ConfigurationRecorders"][0]["name"] config_role_arn = recorders["ConfigurationRecorders"][0]["roleARN"] - print(f"[{my_session.region_name}]: Found Config Recorder: " + config_recorder_name) + print( + f"[{my_session.region_name}]: Found Config Recorder: " + + config_recorder_name + ) print(f"[{my_session.region_name}]: Found Config Role: " + config_role_arn) delivery_channels = my_config.describe_delivery_channels() if len(delivery_channels["DeliveryChannels"]) > 0: delivery_channel_exists = True - config_bucket_name = delivery_channels["DeliveryChannels"][0]["s3BucketName"] + config_bucket_name = delivery_channels["DeliveryChannels"][0][ + "s3BucketName" + ] my_s3 = my_session.client("s3") @@ -1119,18 +1286,26 @@ def init(self): bucket_exists = False for bucket in response["Buckets"]: if bucket["Name"] == config_bucket_name: - print(f"[{my_session.region_name}]: Found Bucket: " + config_bucket_name) + print( + f"[{my_session.region_name}]: Found Bucket: " + + config_bucket_name + ) config_bucket_exists = True bucket_exists = True if not bucket_exists: - print(f"[{my_session.region_name}]: Creating Config bucket " + config_bucket_name) + print( + f"[{my_session.region_name}]: Creating Config bucket " + + config_bucket_name + ) if my_session.region_name == "us-east-1": my_s3.create_bucket(Bucket=config_bucket_name) else: my_s3.create_bucket( Bucket=config_bucket_name, - CreateBucketConfiguration={"LocationConstraint": my_session.region_name}, + CreateBucketConfiguration={ + "LocationConstraint": my_session.region_name + }, ) if not config_role_arn: @@ -1149,29 +1324,49 @@ def init(self): elif partition == "aws-cn": partition_url = ".com.cn" assume_role_policy_template = open( - os.path.join(path.dirname(__file__), "template", assume_role_policy_file), "r" + os.path.join( + path.dirname(__file__), "template", assume_role_policy_file + ), + "r", ).read() - assume_role_policy = json.loads(assume_role_policy_template.replace("${PARTITIONURL}", partition_url)) + assume_role_policy = json.loads( + assume_role_policy_template.replace( + "${PARTITIONURL}", partition_url + ) + ) assume_role_policy["Statement"].append( - {"Effect": "Allow", "Principal": {"AWS": str(account_id)}, "Action": "sts:AssumeRole"} + { + "Effect": "Allow", + "Principal": {"AWS": str(account_id)}, + "Action": "sts:AssumeRole", + } ) my_iam.create_role( - RoleName=config_role_name, AssumeRolePolicyDocument=json.dumps(assume_role_policy), Path="/rdk/" + RoleName=config_role_name, + AssumeRolePolicyDocument=json.dumps(assume_role_policy), + Path="/rdk/", ) # attach role policy my_iam.attach_role_policy( - RoleName=config_role_name, PolicyArn="arn:" + partition + ":iam::aws:policy/service-role/AWS_ConfigRole" + RoleName=config_role_name, + PolicyArn="arn:" + + partition + + ":iam::aws:policy/service-role/AWS_ConfigRole", ) my_iam.attach_role_policy( - RoleName=config_role_name, PolicyArn="arn:" + partition + ":iam::aws:policy/ReadOnlyAccess" + RoleName=config_role_name, + PolicyArn="arn:" + partition + ":iam::aws:policy/ReadOnlyAccess", ) policy_template = open( - os.path.join(path.dirname(__file__), "template", delivery_permission_policy_file), "r" + os.path.join( + path.dirname(__file__), "template", delivery_permission_policy_file + ), + "r", ).read() - delivery_permissions_policy = policy_template.replace("${ACCOUNTID}", account_id).replace( - "${PARTITION}", partition - ) + delivery_permissions_policy = policy_template.replace( + "${ACCOUNTID}", account_id + ).replace("${PARTITION}", partition) my_iam.put_role_policy( RoleName=config_role_name, PolicyName="ConfigDeliveryPermissions", @@ -1184,30 +1379,42 @@ def init(self): # create or update config recorder if not config_role_arn: - config_role_arn = "arn:" + partition + ":iam::" + account_id + ":role/rdk/config-role" + config_role_arn = ( + "arn:" + partition + ":iam::" + account_id + ":role/rdk/config-role" + ) if not control_tower: my_config.put_configuration_recorder( ConfigurationRecorder={ "name": config_recorder_name, "roleARN": config_role_arn, - "recordingGroup": {"allSupported": True, "includeGlobalResourceTypes": True}, + "recordingGroup": { + "allSupported": True, + "includeGlobalResourceTypes": True, + }, } ) if not delivery_channel_exists: # create delivery channel - print(f"[{my_session.region_name}]: Creating delivery channel to bucket " + config_bucket_name) + print( + f"[{my_session.region_name}]: Creating delivery channel to bucket " + + config_bucket_name + ) my_config.put_delivery_channel( DeliveryChannel={ "name": "default", "s3BucketName": config_bucket_name, - "configSnapshotDeliveryProperties": {"deliveryFrequency": "Six_Hours"}, + "configSnapshotDeliveryProperties": { + "deliveryFrequency": "Six_Hours" + }, } ) # start config recorder - my_config.start_configuration_recorder(ConfigurationRecorderName=config_recorder_name) + my_config.start_configuration_recorder( + ConfigurationRecorderName=config_recorder_name + ) print(f"[{my_session.region_name}]: Config Service is ON") else: print( @@ -1217,26 +1424,39 @@ def init(self): print(f"[{my_session.region_name}]: Config setup complete.") # create code bucket - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) response = my_s3.list_buckets() bucket_exists = False for bucket in response["Buckets"]: if bucket["Name"] == code_bucket_name: bucket_exists = True - print(f"[{my_session.region_name}]: Found code bucket: " + code_bucket_name) + print( + f"[{my_session.region_name}]: Found code bucket: " + + code_bucket_name + ) if not bucket_exists: if self.args.skip_code_bucket_creation: - print(f"[{my_session.region_name}]: Skipping Code Bucket creation due to command line args") + print( + f"[{my_session.region_name}]: Skipping Code Bucket creation due to command line args" + ) else: - print(f"[{my_session.region_name}]: Creating Code bucket " + code_bucket_name) + print( + f"[{my_session.region_name}]: Creating Code bucket " + + code_bucket_name + ) # Consideration for us-east-1 S3 API if my_session.region_name == "us-east-1": my_s3.create_bucket(Bucket=code_bucket_name) else: my_s3.create_bucket( - Bucket=code_bucket_name, CreateBucketConfiguration={"LocationConstraint": my_session.region_name} + Bucket=code_bucket_name, + CreateBucketConfiguration={ + "LocationConstraint": my_session.region_name + }, ) return 0 @@ -1283,10 +1503,14 @@ def clean(self): try: # First delete the Config Recorder itself. Do we need to stop it first? Let's stop it just to be safe. my_config.stop_configuration_recorder( - ConfigurationRecorderName=recorders["ConfigurationRecorders"][0]["name"] + ConfigurationRecorderName=recorders["ConfigurationRecorders"][0][ + "name" + ] ) my_config.delete_configuration_recorder( - ConfigurationRecorderName=recorders["ConfigurationRecorders"][0]["name"] + ConfigurationRecorderName=recorders["ConfigurationRecorders"][0][ + "name" + ] ) except Exception as e: print("Error encountered removing Configuration Recorder: " + str(e)) @@ -1296,13 +1520,21 @@ def clean(self): try: response = iam_client.get_role(RoleName=config_role_name) try: - role_policy_results = iam_client.list_role_policies(RoleName=config_role_name) + role_policy_results = iam_client.list_role_policies( + RoleName=config_role_name + ) for policy_name in role_policy_results["PolicyNames"]: - iam_client.delete_role_policy(RoleName=config_role_name, PolicyName=policy_name) + iam_client.delete_role_policy( + RoleName=config_role_name, PolicyName=policy_name + ) - role_policy_results = iam_client.list_attached_role_policies(RoleName=config_role_name) + role_policy_results = iam_client.list_attached_role_policies( + RoleName=config_role_name + ) for policy in role_policy_results["AttachedPolicies"]: - iam_client.detach_role_policy(RoleName=config_role_name, PolicyArn=policy["PolicyArn"]) + iam_client.detach_role_policy( + RoleName=config_role_name, PolicyArn=policy["PolicyArn"] + ) # Once all policies are detached we should be able to delete the Role. iam_client.delete_role(RoleName=config_role_name) @@ -1315,11 +1547,17 @@ def clean(self): delivery_channels = my_config.describe_delivery_channels() if len(delivery_channels["DeliveryChannels"]) > 0: for delivery_channel in delivery_channels["DeliveryChannels"]: - config_bucket_names.append(delivery_channels["DeliveryChannels"][0]["s3BucketName"]) + config_bucket_names.append( + delivery_channels["DeliveryChannels"][0]["s3BucketName"] + ) try: - my_config.delete_delivery_channel(DeliveryChannelName=delivery_channel["name"]) + my_config.delete_delivery_channel( + DeliveryChannelName=delivery_channel["name"] + ) except Exception as e: - print("Error encountered trying to delete Delivery Channel: " + str(e)) + print( + "Error encountered trying to delete Delivery Channel: " + str(e) + ) if config_bucket_names: # empty and then delete the config bucket. @@ -1353,7 +1591,9 @@ def clean(self): print("Error encountered deleting Functions stack: " + str(e)) # Delete the code bucket, if one exists. - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) try: code_bucket = my_session.resource("s3").Bucket(code_bucket_name) code_bucket.objects.all().delete() @@ -1380,16 +1620,14 @@ def create(self): extension_mapping = { "java8": ".java", - "python3.6-managed": ".py", "python3.7": ".py", "python3.7-lib": ".py", "python3.8": ".py", "python3.8-lib": ".py", "python3.9": ".py", "python3.9-lib": ".py", - "nodejs6.10": ".js", - "dotnetcore1.0": "cs", - "dotnetcore2.0": "cs", + "python3.10": ".py", + "python3.10-lib": ".py", } if self.args.runtime not in extension_mapping: print("rdk does not support that runtime yet.") @@ -1411,8 +1649,6 @@ def create(self): # copy rule template into rule directory if self.args.runtime == "java8": self.__create_java_rule() - elif self.args.runtime in ["dotnetcore1.0", "dotnetcore2.0"]: - self.__create_dotnet_rule() else: src = os.path.join( path.dirname(__file__), @@ -1430,18 +1666,32 @@ def create(self): shutil.copyfile(src, dst) f = fileinput.input(files=dst, inplace=True) for line in f: - if self.args.runtime in ["python3.7-lib", "python3.8-lib", "python3.9-lib"]: + if self.args.runtime in [ + "python3.7-lib", + "python3.8-lib", + "python3.9-lib", + "python3.10-lib", + ]: if self.args.resource_types: applicable_resource_list = "" - for resource_type in self.args.resource_types.split(","): - applicable_resource_list += "'" + resource_type + "', " + for resource_type in self.args.resource_types.split( + "," + ): + applicable_resource_list += ( + "'" + resource_type + "', " + ) print( line.replace("<%RuleName%>", self.args.rulename) .replace( "<%ApplicableResources1%>", - "\nAPPLICABLE_RESOURCES = [" + applicable_resource_list[:-2] + "]\n", + "\nAPPLICABLE_RESOURCES = [" + + applicable_resource_list[:-2] + + "]\n", ) - .replace("<%ApplicableResources2%>", ", APPLICABLE_RESOURCES"), + .replace( + "<%ApplicableResources2%>", + ", APPLICABLE_RESOURCES", + ), end="", ) else: @@ -1452,7 +1702,9 @@ def create(self): end="", ) else: - print(line.replace("<%RuleName%>", self.args.rulename), end="") + print( + line.replace("<%RuleName%>", self.args.rulename), end="" + ) f.close() src = os.path.join( @@ -1467,12 +1719,16 @@ def create(self): os.getcwd(), rules_dir, self.args.rulename, - self.args.rulename + "_test" + extension_mapping[self.args.runtime], + self.args.rulename + + "_test" + + extension_mapping[self.args.runtime], ) shutil.copyfile(src, dst) f = fileinput.input(files=dst, inplace=True) for line in f: - print(line.replace("<%RuleName%>", self.args.rulename), end="") + print( + line.replace("<%RuleName%>", self.args.rulename), end="" + ) f.close() src = os.path.join( @@ -1548,10 +1804,18 @@ def modify(self): self.args.remediation_concurrent_execution_percent = ssm_controls.get( "ConcurrentExecutionRatePercentage", "" ) - self.args.remediation_error_rate_percent = ssm_controls.get("ErrorPercentage", "") - self.args.remediation_parameters = json.dumps(params["Parameters"]) if params.get("Parameters") else None - self.args.auto_remediation_retry_attempts = params.get("MaximumAutomaticAttempts", "") - self.args.auto_remediation_retry_time = params.get("RetryAttemptSeconds", "") + self.args.remediation_error_rate_percent = ssm_controls.get( + "ErrorPercentage", "" + ) + self.args.remediation_parameters = ( + json.dumps(params["Parameters"]) if params.get("Parameters") else None + ) + self.args.auto_remediation_retry_attempts = params.get( + "MaximumAutomaticAttempts", "" + ) + self.args.auto_remediation_retry_time = params.get( + "RetryAttemptSeconds", "" + ) self.args.remediation_action = params.get("TargetId", "") self.args.remediation_action_version = params.get("TargetVersion", "") @@ -1562,7 +1826,11 @@ def modify(self): # Write the parameters to a file in the rule directory. self.__populate_params() - print("Modified Rule '" + self.args.rulename + "'. Use the `deploy` command to push your changes to AWS.") + print( + "Modified Rule '" + + self.args.rulename + + "'. Use the `deploy` command to push your changes to AWS." + ) def undeploy(self): self.__parse_deploy_args(ForceArgument=True) @@ -1570,7 +1838,9 @@ def undeploy(self): if not self.args.force: confirmation = False while not confirmation: - my_input = input("Delete specified Rules and Lambda Functions from your AWS Account? (y/N): ") + my_input = input( + "Delete specified Rules and Lambda Functions from your AWS Account? (y/N): " + ) if my_input.lower() == "y": confirmation = True if my_input.lower() == "n" or my_input == "": @@ -1608,7 +1878,9 @@ def undeploy(self): for rule_name in rule_names: try: - cfn_client.delete_stack(StackName=self.__get_stack_name_from_rule_name(rule_name)) + cfn_client.delete_stack( + StackName=self.__get_stack_name_from_rule_name(rule_name) + ) deleted_stacks.append(self.__get_stack_name_from_rule_name(rule_name)) except ClientError as ce: print( @@ -1621,12 +1893,16 @@ def undeploy(self): + str(e) ) - print(f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete.") + print( + f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete." + ) for stack_name in deleted_stacks: self.__wait_for_cfn_stack(cfn_client, stack_name) - print(f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved.") + print( + f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved." + ) print(f"[{my_session.region_name}]: To re-deploy, use the 'deploy' command.") def undeploy_organization(self): @@ -1635,7 +1911,9 @@ def undeploy_organization(self): if not self.args.force: confirmation = False while not confirmation: - my_input = input("Delete specified Rules and Lambda Functions from your Organization? (y/N): ") + my_input = input( + "Delete specified Rules and Lambda Functions from your Organization? (y/N): " + ) if my_input.lower() == "y": confirmation = True if my_input.lower() == "n" or my_input == "": @@ -1673,7 +1951,9 @@ def undeploy_organization(self): for rule_name in rule_names: try: - cfn_client.delete_stack(StackName=self.__get_stack_name_from_rule_name(rule_name)) + cfn_client.delete_stack( + StackName=self.__get_stack_name_from_rule_name(rule_name) + ) deleted_stacks.append(self.__get_stack_name_from_rule_name(rule_name)) except ClientError as ce: print( @@ -1686,13 +1966,19 @@ def undeploy_organization(self): + str(e) ) - print(f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete.") + print( + f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete." + ) for stack_name in deleted_stacks: self.__wait_for_cfn_stack(cfn_client, stack_name) - print(f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved.") - print(f"[{my_session.region_name}]: To re-deploy, use the 'deploy-organization' command.") + print( + f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved." + ) + print( + f"[{my_session.region_name}]: To re-deploy, use the 'deploy-organization' command." + ) def deploy(self): self.__parse_deploy_args() @@ -1713,7 +1999,9 @@ def deploy(self): if self.args.custom_code_bucket: code_bucket_name = self.args.custom_code_bucket else: - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) # If we're only deploying the Lambda functions (and role + permissions), branch here. Someday the "main" execution path should use the same generated CFN templates for single-account deployment. if self.args.functions_only: @@ -1741,7 +2029,9 @@ def deploy(self): for rule_name in rule_names: rule_params, cfn_tags = self.__get_rule_parameters(rule_name) if "SourceIdentifier" in rule_params: - print(f"[{my_session.region_name}]: Skipping code packaging for Managed Rule.") + print( + f"[{my_session.region_name}]: Skipping code packaging for Managed Rule." + ) else: s3_dst = self.__upload_function_code( rule_name, rule_params, account_id, my_session, code_bucket_name @@ -1754,7 +2044,12 @@ def deploy(self): config = my_s3_client._client_config config.signature_version = botocore.UNSIGNED template_url = boto3.client("s3", config=config).generate_presigned_url( - "get_object", ExpiresIn=0, Params={"Bucket": code_bucket_name, "Key": self.args.stack_name + ".json"} + "get_object", + ExpiresIn=0, + Params={ + "Bucket": code_bucket_name, + "Key": self.args.stack_name + ".json", + }, ) # Check if stack exists. If it does, update it. If it doesn't, create it. @@ -1763,9 +2058,10 @@ def deploy(self): my_stack = my_cfn.describe_stacks(StackName=self.args.stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for Lambda functions.") + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for Lambda functions." + ) try: - cfn_args = { "StackName": self.args.stack_name, "TemplateURL": template_url, @@ -1785,7 +2081,9 @@ def deploy(self): if e.response["Error"]["Code"] == "ValidationError": if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule configurations.") + print( + f"[{my_session.region_name}]: No changes to Config Rule configurations." + ) else: # Something unexpected has gone wrong. Emit an error and bail. print(f"[{my_session.region_name}]: {e}") @@ -1797,10 +2095,16 @@ def deploy(self): for rule_name in rule_names: rule_params, cfn_tags = self.__get_rule_parameters(rule_name) my_lambda_arn = self.__get_lambda_arn_for_rule( - rule_name, partition, my_session.region_name, account_id, rule_params + rule_name, + partition, + my_session.region_name, + account_id, + rule_params, ) if "SourceIdentifier" in rule_params: - print(f"[{my_session.region_name}]: Skipping Lambda upload for Managed Rule.") + print( + f"[{my_session.region_name}]: Skipping Lambda upload for Managed Rule." + ) continue print(f"[{my_session.region_name}]: Publishing Lambda code...") @@ -1812,9 +2116,11 @@ def deploy(self): Publish=True, ) print(f"[{my_session.region_name}]: Lambda code updated.") - except ClientError as e: + except ClientError: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for Lambda Functions.") + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for Lambda Functions." + ) cfn_args = { "StackName": self.args.stack_name, @@ -1850,7 +2156,9 @@ def deploy(self): combined_input_parameters = {} if "InputParameters" in rule_params: - combined_input_parameters.update(json.loads(rule_params["InputParameters"])) + combined_input_parameters.update( + json.loads(rule_params["InputParameters"]) + ) if "OptionalParameters" in rule_params: # Remove empty parameters @@ -1892,65 +2200,112 @@ def deploy(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceIdentifier", "ParameterValue": rule_params["SourceIdentifier"]}, + { + "ParameterKey": "SourceIdentifier", + "ParameterValue": rule_params["SourceIdentifier"], + }, ] my_cfn = my_session.client("cloudformation") if "Remediation" in rule_params: - print(f"[{my_session.region_name}]: Build The CFN Template with Remediation Settings") - cfn_body = os.path.join(path.dirname(__file__), "template", "configManagedRuleWithRemediation.json") + print( + f"[{my_session.region_name}]: Build The CFN Template with Remediation Settings" + ) + cfn_body = os.path.join( + path.dirname(__file__), + "template", + "configManagedRuleWithRemediation.json", + ) template_body = open(cfn_body, "r").read() json_body = json.loads(template_body) - remediation = self.__create_remediation_cloudformation_block(rule_params["Remediation"]) + remediation = self.__create_remediation_cloudformation_block( + rule_params["Remediation"] + ) json_body["Resources"]["Remediation"] = remediation if "SSMAutomation" in rule_params: # Reference the SSM Automation Role Created, if IAM is created - print(f"[{my_session.region_name}]: Building SSM Automation Section") + print( + f"[{my_session.region_name}]: Building SSM Automation Section" + ) ssm_automation = self.__create_automation_cloudformation_block( - rule_params["SSMAutomation"], self.__get_alphanumeric_rule_name(rule_name) + rule_params["SSMAutomation"], + self.__get_alphanumeric_rule_name(rule_name), ) json_body["Resources"][ - self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) ] = ssm_automation if "IAM" in rule_params["SSMAutomation"]: - print(f"[{my_session.region_name}]: Lets Build IAM Role and Policy") + print( + f"[{my_session.region_name}]: Lets Build IAM Role and Policy" + ) # TODO Check For IAM Settings - json_body["Resources"]["Remediation"]["Properties"]["Parameters"]["AutomationAssumeRole"][ - "StaticValue" - ]["Values"] = [ - {"Fn::GetAtt": [self.__get_alphanumeric_rule_name(rule_name + "Role"), "Arn"]} + json_body["Resources"]["Remediation"]["Properties"][ + "Parameters" + ]["AutomationAssumeRole"]["StaticValue"]["Values"] = [ + { + "Fn::GetAtt": [ + self.__get_alphanumeric_rule_name( + rule_name + "Role" + ), + "Arn", + ] + } ] - ssm_iam_role, ssm_iam_policy = self.__create_automation_iam_cloudformation_block( - rule_params["SSMAutomation"], self.__get_alphanumeric_rule_name(rule_name) + ( + ssm_iam_role, + ssm_iam_policy, + ) = self.__create_automation_iam_cloudformation_block( + rule_params["SSMAutomation"], + self.__get_alphanumeric_rule_name(rule_name), ) - json_body["Resources"][self.__get_alphanumeric_rule_name(rule_name + "Role")] = ssm_iam_role + json_body["Resources"][ + self.__get_alphanumeric_rule_name(rule_name + "Role") + ] = ssm_iam_role json_body["Resources"][ self.__get_alphanumeric_rule_name(rule_name + "Policy") ] = ssm_iam_policy - print(f"[{my_session.region_name}]: Build Supporting SSM Resources") + print( + f"[{my_session.region_name}]: Build Supporting SSM Resources" + ) resource_depends_on = [ "rdkConfigRule", - self.__get_alphanumeric_rule_name(rule_name + "RemediationAction"), + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ), ] # Builds SSM Document Before Config RUle - json_body["Resources"]["Remediation"]["DependsOn"] = resource_depends_on - json_body["Resources"]["Remediation"]["Properties"]["TargetId"] = { - "Ref": self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") + json_body["Resources"]["Remediation"][ + "DependsOn" + ] = resource_depends_on + json_body["Resources"]["Remediation"]["Properties"][ + "TargetId" + ] = { + "Ref": self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) } try: my_stack_name = self.__get_stack_name_from_rule_name(rule_name) my_stack = my_cfn.describe_stacks(StackName=my_stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for " + + rule_name + ) try: cfn_args = { "StackName": my_stack_name, "TemplateBody": json.dumps(json_body, indent=2), "Parameters": my_params, - "Capabilities": ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], + "Capabilities": [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + ], } # If no tags key is specified, or if the tags dict is empty @@ -1962,23 +2317,31 @@ def deploy(self): if e.response["Error"]["Code"] == "ValidationError": if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule.") + print( + f"[{my_session.region_name}]: No changes to Config Rule." + ) else: # Something unexpected has gone wrong. Emit an error and bail. print(f"[{my_session.region_name}]: {e}") return 1 else: raise - except ClientError as e: + except ClientError: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for " + + rule_name + ) if "Remediation" in rule_params: cfn_args = { "StackName": my_stack_name, "TemplateBody": json.dumps(json_body, indent=2), "Parameters": my_params, - "Capabilities": ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], + "Capabilities": [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + ], } else: @@ -1999,13 +2362,18 @@ def deploy(self): else: # deploy config rule - cfn_body = os.path.join(path.dirname(__file__), "template", "configManagedRule.json") + cfn_body = os.path.join( + path.dirname(__file__), "template", "configManagedRule.json" + ) try: my_stack_name = self.__get_stack_name_from_rule_name(rule_name) my_stack = my_cfn.describe_stacks(StackName=my_stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for " + + rule_name + ) try: cfn_args = { "StackName": my_stack_name, @@ -2022,7 +2390,9 @@ def deploy(self): if e.response["Error"]["Code"] == "ValidationError": if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule.") + print( + f"[{my_session.region_name}]: No changes to Config Rule." + ) else: # Something unexpected has gone wrong. Emit an error and bail. print(f"[{my_session.region_name}]: {e}") @@ -2031,7 +2401,10 @@ def deploy(self): raise except ClientError as e: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for " + + rule_name + ) cfn_args = { "StackName": my_stack_name, "TemplateBody": open(cfn_body, "r").read(), @@ -2055,20 +2428,31 @@ def deploy(self): print(f"[{my_session.region_name}]: Found Custom Rule.") s3_src = "" - s3_dst = self.__upload_function_code(rule_name, rule_params, account_id, my_session, code_bucket_name) + s3_dst = self.__upload_function_code( + rule_name, rule_params, account_id, my_session, code_bucket_name + ) # create CFN Parameters for Custom Rules lambdaRoleArn = "" if self.args.lambda_role_arn: - print(f"[{my_session.region_name}]: Existing IAM Role provided: " + self.args.lambda_role_arn) + print( + f"[{my_session.region_name}]: Existing IAM Role provided: " + + self.args.lambda_role_arn + ) lambdaRoleArn = self.args.lambda_role_arn elif self.args.lambda_role_name: - print(f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + self.args.lambda_role_name) + print( + f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + + self.args.lambda_role_name + ) arn = f"arn:{partition}:iam::{account_id}:role/{self.args.lambda_role_name}" lambdaRoleArn = arn if self.args.boundary_policy_arn: - print(f"[{my_session.region_name}]: Boundary Policy provided: " + self.args.boundary_policy_arn) + print( + f"[{my_session.region_name}]: Boundary Policy provided: " + + self.args.boundary_policy_arn + ) boundaryPolicyArn = self.args.boundary_policy_arn else: boundaryPolicyArn = "" @@ -2123,8 +2507,14 @@ def deploy(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceHandler", "ParameterValue": self.__get_handler(rule_name, rule_params)}, - {"ParameterKey": "Timeout", "ParameterValue": str(self.args.lambda_timeout)}, + { + "ParameterKey": "SourceHandler", + "ParameterValue": self.__get_handler(rule_name, rule_params), + }, + { + "ParameterKey": "Timeout", + "ParameterValue": str(self.args.lambda_timeout), + }, ] layers = self.__get_lambda_layers(my_session, self.args, rule_params) @@ -2133,55 +2523,89 @@ def deploy(self): layers.extend(additional_layers) if layers: - my_params.append({"ParameterKey": "Layers", "ParameterValue": ",".join(layers)}) + my_params.append( + {"ParameterKey": "Layers", "ParameterValue": ",".join(layers)} + ) if self.args.lambda_security_groups and self.args.lambda_subnets: my_params.append( - {"ParameterKey": "SecurityGroupIds", "ParameterValue": self.args.lambda_security_groups} + { + "ParameterKey": "SecurityGroupIds", + "ParameterValue": self.args.lambda_security_groups, + } + ) + my_params.append( + { + "ParameterKey": "SubnetIds", + "ParameterValue": self.args.lambda_subnets, + } ) - my_params.append({"ParameterKey": "SubnetIds", "ParameterValue": self.args.lambda_subnets}) # create json of CFN template - cfn_body = os.path.join(path.dirname(__file__), "template", "configRule.json") + cfn_body = os.path.join( + path.dirname(__file__), "template", "configRule.json" + ) template_body = open(cfn_body, "r").read() json_body = json.loads(template_body) remediation = "" if "Remediation" in rule_params: - remediation = self.__create_remediation_cloudformation_block(rule_params["Remediation"]) + remediation = self.__create_remediation_cloudformation_block( + rule_params["Remediation"] + ) json_body["Resources"]["Remediation"] = remediation if "SSMAutomation" in rule_params: ##AWS needs to build the SSM before the Config Rule resource_depends_on = [ "rdkConfigRule", - self.__get_alphanumeric_rule_name(rule_name + "RemediationAction"), + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ), ] remediation["DependsOn"] = resource_depends_on # Add JSON Reference to SSM Document { "Ref" : "MyEC2Instance" } remediation["Properties"]["TargetId"] = { - "Ref": self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") + "Ref": self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) } if "SSMAutomation" in rule_params: print(f"[{my_session.region_name}]: Building SSM Automation Section") - ssm_automation = self.__create_automation_cloudformation_block(rule_params["SSMAutomation"], rule_name) + ssm_automation = self.__create_automation_cloudformation_block( + rule_params["SSMAutomation"], rule_name + ) json_body["Resources"][ self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") ] = ssm_automation if "IAM" in rule_params["SSMAutomation"]: print("Lets Build IAM Role and Policy") # TODO Check For IAM Settings - json_body["Resources"]["Remediation"]["Properties"]["Parameters"]["AutomationAssumeRole"][ - "StaticValue" - ]["Values"] = [{"Fn::GetAtt": [self.__get_alphanumeric_rule_name(rule_name + "Role"), "Arn"]}] + json_body["Resources"]["Remediation"]["Properties"]["Parameters"][ + "AutomationAssumeRole" + ]["StaticValue"]["Values"] = [ + { + "Fn::GetAtt": [ + self.__get_alphanumeric_rule_name(rule_name + "Role"), + "Arn", + ] + } + ] - ssm_iam_role, ssm_iam_policy = self.__create_automation_iam_cloudformation_block( + ( + ssm_iam_role, + ssm_iam_policy, + ) = self.__create_automation_iam_cloudformation_block( rule_params["SSMAutomation"], rule_name ) - json_body["Resources"][self.__get_alphanumeric_rule_name(rule_name + "Role")] = ssm_iam_role - json_body["Resources"][self.__get_alphanumeric_rule_name(rule_name + "Policy")] = ssm_iam_policy + json_body["Resources"][ + self.__get_alphanumeric_rule_name(rule_name + "Role") + ] = ssm_iam_role + json_body["Resources"][ + self.__get_alphanumeric_rule_name(rule_name + "Policy") + ] = ssm_iam_policy # debugging # print(json.dumps(json_body, indent=2)) @@ -2192,7 +2616,10 @@ def deploy(self): my_stack_name = self.__get_stack_name_from_rule_name(rule_name) my_stack = my_cfn.describe_stacks(StackName=my_stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for " + + rule_name + ) try: cfn_args = { "StackName": my_stack_name, @@ -2208,14 +2635,21 @@ def deploy(self): response = my_cfn.update_stack(**cfn_args) except ClientError as e: if e.response["Error"]["Code"] == "ValidationError": - if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule.") + print( + f"[{my_session.region_name}]: No changes to Config Rule." + ) else: # Something unexpected has gone wrong. Emit an error and bail. - print(f"[{my_session.region_name}]: Validation Error on CFN\n") - print(f"[{my_session.region_name}]: " + json.dumps(cfn_args) + "\n") + print( + f"[{my_session.region_name}]: Validation Error on CFN\n" + ) + print( + f"[{my_session.region_name}]: " + + json.dumps(cfn_args) + + "\n" + ) print(f"[{my_session.region_name}]: {e}\n") return 1 else: @@ -2226,12 +2660,18 @@ def deploy(self): print(f"[{my_session.region_name}]: Publishing Lambda code...") my_lambda_client = my_session.client("lambda") my_lambda_client.update_function_code( - FunctionName=my_lambda_arn, S3Bucket=code_bucket_name, S3Key=s3_dst, Publish=True + FunctionName=my_lambda_arn, + S3Bucket=code_bucket_name, + S3Key=s3_dst, + Publish=True, ) print(f"[{my_session.region_name}]: Lambda code updated.") except ClientError as e: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for " + + rule_name + ) cfn_args = { "StackName": my_stack_name, "TemplateBody": json.dumps(json_body, indent=2), @@ -2275,7 +2715,9 @@ def deploy_organization(self): if self.args.custom_code_bucket: code_bucket_name = self.args.custom_code_bucket else: - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) # If we're only deploying the Lambda functions (and role + permissions), branch here. Someday the "main" execution path should use the same generated CFN templates for single-account deployment. if self.args.functions_only: @@ -2302,7 +2744,9 @@ def deploy_organization(self): combined_input_parameters = {} if "InputParameters" in rule_params: - combined_input_parameters.update(json.loads(rule_params["InputParameters"])) + combined_input_parameters.update( + json.loads(rule_params["InputParameters"]) + ) if "OptionalParameters" in rule_params: # Remove empty parameters @@ -2344,12 +2788,19 @@ def deploy_organization(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceIdentifier", "ParameterValue": rule_params["SourceIdentifier"]}, + { + "ParameterKey": "SourceIdentifier", + "ParameterValue": rule_params["SourceIdentifier"], + }, ] my_cfn = my_session.client("cloudformation") # deploy config rule - cfn_body = os.path.join(path.dirname(__file__), "template", "configManagedRuleOrganization.json") + cfn_body = os.path.join( + path.dirname(__file__), + "template", + "configManagedRuleOrganization.json", + ) try: my_stack_name = self.__get_stack_name_from_rule_name(rule_name) @@ -2407,7 +2858,9 @@ def deploy_organization(self): print("Found Custom Rule.") s3_src = "" - s3_dst = self.__upload_function_code(rule_name, rule_params, account_id, my_session, code_bucket_name) + s3_dst = self.__upload_function_code( + rule_name, rule_params, account_id, my_session, code_bucket_name + ) # create CFN Parameters for Custom Rules lambdaRoleArn = "" @@ -2415,7 +2868,10 @@ def deploy_organization(self): print("Existing IAM Role provided: " + self.args.lambda_role_arn) lambdaRoleArn = self.args.lambda_role_arn elif self.args.lambda_role_name: - print(f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + self.args.lambda_role_name) + print( + f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + + self.args.lambda_role_name + ) arn = f"arn:{partition}:iam::{account_id}:role/{self.args.lambda_role_name}" lambdaRoleArn = arn @@ -2475,8 +2931,14 @@ def deploy_organization(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceHandler", "ParameterValue": self.__get_handler(rule_name, rule_params)}, - {"ParameterKey": "Timeout", "ParameterValue": str(self.args.lambda_timeout)}, + { + "ParameterKey": "SourceHandler", + "ParameterValue": self.__get_handler(rule_name, rule_params), + }, + { + "ParameterKey": "Timeout", + "ParameterValue": str(self.args.lambda_timeout), + }, ] layers = self.__get_lambda_layers(my_session, self.args, rule_params) @@ -2485,16 +2947,28 @@ def deploy_organization(self): layers.extend(additional_layers) if layers: - my_params.append({"ParameterKey": "Layers", "ParameterValue": ",".join(layers)}) + my_params.append( + {"ParameterKey": "Layers", "ParameterValue": ",".join(layers)} + ) if self.args.lambda_security_groups and self.args.lambda_subnets: my_params.append( - {"ParameterKey": "SecurityGroupIds", "ParameterValue": self.args.lambda_security_groups} + { + "ParameterKey": "SecurityGroupIds", + "ParameterValue": self.args.lambda_security_groups, + } + ) + my_params.append( + { + "ParameterKey": "SubnetIds", + "ParameterValue": self.args.lambda_subnets, + } ) - my_params.append({"ParameterKey": "SubnetIds", "ParameterValue": self.args.lambda_subnets}) # create json of CFN template - cfn_body = os.path.join(path.dirname(__file__), "template", "configRuleOrganization.json") + cfn_body = os.path.join( + path.dirname(__file__), "template", "configRuleOrganization.json" + ) template_body = open(cfn_body, "r").read() json_body = json.loads(template_body) @@ -2523,7 +2997,6 @@ def deploy_organization(self): response = my_cfn.update_stack(**cfn_args) except ClientError as e: if e.response["Error"]["Code"] == "ValidationError": - if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. print("No changes to Config Rule.") @@ -2541,7 +3014,10 @@ def deploy_organization(self): print("Publishing Lambda code...") my_lambda_client = my_session.client("lambda") my_lambda_client.update_function_code( - FunctionName=my_lambda_arn, S3Bucket=code_bucket_name, S3Key=s3_dst, Publish=True + FunctionName=my_lambda_arn, + S3Bucket=code_bucket_name, + S3Key=s3_dst, + Publish=True, ) print("Lambda code updated.") except ClientError as e: @@ -2573,7 +3049,6 @@ def deploy_organization(self): return 0 def export(self): - self.__parse_export_args() # get the rule names @@ -2600,7 +3075,9 @@ def export(self): combined_input_parameters = {} if "InputParameters" in rule_params: - combined_input_parameters.update(json.loads(rule_params["InputParameters"])) + combined_input_parameters.update( + json.loads(rule_params["InputParameters"]) + ) if "OptionalParameters" in rule_params: # Remove empty parameters @@ -2654,22 +3131,36 @@ def export(self): "lambda_timeout": str(self.args.lambda_timeout), } - params_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + ".tfvars.json") + params_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, rule_name.lower() + ".tfvars.json" + ) parameters_file = open(params_file_path, "w") json.dump(my_params, parameters_file, indent=4) parameters_file.close() # create json of CFN template print(self.args.format + " version: " + self.args.version) tf_file_body = os.path.join( - path.dirname(__file__), "template", self.args.format, self.args.version, "config_rule.tf" + path.dirname(__file__), + "template", + self.args.format, + self.args.version, + "config_rule.tf", + ) + tf_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_rule.tf" ) - tf_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_rule.tf") shutil.copy(tf_file_body, tf_file_path) variables_file_body = os.path.join( - path.dirname(__file__), "template", self.args.format, self.args.version, "variables.tf" + path.dirname(__file__), + "template", + self.args.format, + self.args.version, + "variables.tf", + ) + variables_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_variables.tf" ) - variables_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_variables.tf") shutil.copy(variables_file_body, variables_file_path) print("Export completed.This will generate three .tf files.") @@ -2691,8 +3182,14 @@ def test_local(self): "python3.8-lib", "python3.9", "python3.9-lib", + "python3.10", + "python3.10-lib", ): - print("Skipping " + rule_name + " - Runtime not supported for local testing.") + print( + "Skipping " + + rule_name + + " - Runtime not supported for local testing." + ) continue print("Testing " + rule_name) @@ -2700,9 +3197,13 @@ def test_local(self): print("Looking for tests in " + test_dir) if args.verbose == True: - results = unittest.TextTestRunner(buffer=False, verbosity=2).run(self.__create_test_suite(test_dir)) + results = unittest.TextTestRunner(buffer=False, verbosity=2).run( + self.__create_test_suite(test_dir) + ) else: - results = unittest.TextTestRunner(buffer=True, verbosity=2).run(self.__create_test_suite(test_dir)) + results = unittest.TextTestRunner(buffer=True, verbosity=2).run( + self.__create_test_suite(test_dir) + ) print(results) @@ -2735,11 +3236,19 @@ def test_remote(self): # Generate test event from templates test_event = json.load( - open(os.path.join(path.dirname(__file__), "template", event_template_filename), "r"), strict=False + open( + os.path.join( + path.dirname(__file__), "template", event_template_filename + ), + "r", + ), + strict=False, ) my_invoking_event = json.loads(test_event["invokingEvent"]) my_invoking_event["configurationItem"] = my_ci - my_invoking_event["notificationCreationTime"] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z") + my_invoking_event[ + "notificationCreationTime" + ] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z") test_event["invokingEvent"] = json.dumps(my_invoking_event) test_event["ruleParameters"] = json.dumps(my_parameters) @@ -2792,11 +3301,15 @@ def logs(self): logGroupName=log_group_name, orderBy="LastEventTime", descending=True, - limit=int(self.args.number), # This is the worst-case scenario if there is only one event per stream + limit=int( + self.args.number + ), # This is the worst-case scenario if there is only one event per stream ) # Sadly we can't just use filter_log_events, since we don't know the timestamps yet and filter_log_events doesn't appear to support ordering. - my_events = self.__get_log_events(cw_logs, log_streams, int(self.args.number)) + my_events = self.__get_log_events( + cw_logs, log_streams, int(self.args.number) + ) latest_timestamp = 0 @@ -2841,8 +3354,12 @@ def logs(self): def rulesets(self): self.args = get_rulesets_parser().parse_args(self.args.command_args, self.args) - if self.args.subcommand in ["add", "remove"] and (not self.args.ruleset or not self.args.rulename): - print("You must specify a ruleset name and a rule for the `add` and `remove` commands.") + if self.args.subcommand in ["add", "remove"] and ( + not self.args.ruleset or not self.args.rulename + ): + print( + "You must specify a ruleset name and a rule for the `add` and `remove` commands." + ) return 1 if self.args.subcommand == "list": @@ -2855,7 +3372,9 @@ def rulesets(self): print("Unknown subcommand.") def create_terraform_template(self): - self.args = get_create_rule_template_parser().parse_args(self.args.command_args, self.args) + self.args = get_create_rule_template_parser().parse_args( + self.args.command_args, self.args + ) if self.args.rulesets: self.args.rulesets = self.args.rulesets.split(",") @@ -2877,7 +3396,9 @@ def create_terraform_template(self): print("CloudFormation template written to " + self.args.output_file) def create_rule_template(self): - self.args = get_create_rule_template_parser().parse_args(self.args.command_args, self.args) + self.args = get_create_rule_template_parser().parse_args( + self.args.command_args, self.args + ) if self.args.rulesets: self.args.rulesets = self.args.rulesets.split(",") @@ -2899,7 +3420,9 @@ def create_rule_template(self): parameters = {} parameters["LambdaAccountId"] = {} - parameters["LambdaAccountId"]["Description"] = "Account ID that contains Lambda functions for Config Rules." + parameters["LambdaAccountId"][ + "Description" + ] = "Account ID that contains Lambda functions for Config Rules." parameters["LambdaAccountId"]["Type"] = "String" parameters["LambdaAccountId"]["MinLength"] = "12" parameters["LambdaAccountId"]["MaxLength"] = "12" @@ -2916,17 +3439,29 @@ def create_rule_template(self): "RoleName": config_role_name, "Path": "/rdk/", "ManagedPolicyArns": [ - {"Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWS_ConfigRole"}, + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWS_ConfigRole" + }, {"Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/ReadOnlyAccess"}, ], "AssumeRolePolicyDocument": CONFIG_ROLE_ASSUME_ROLE_POLICY_DOCUMENT, - "Policies": [{"PolicyName": "DeliveryPermission", "PolicyDocument": CONFIG_ROLE_POLICY_DOCUMENT}], + "Policies": [ + { + "PolicyName": "DeliveryPermission", + "PolicyDocument": CONFIG_ROLE_POLICY_DOCUMENT, + } + ], } # Create Bucket for Config Data resources["ConfigBucket"] = { "Type": "AWS::S3::Bucket", - "Properties": {"BucketName": {"Fn::Sub": config_bucket_prefix + "-${AWS::AccountId}-${AWS::Region}"}}, + "Properties": { + "BucketName": { + "Fn::Sub": config_bucket_prefix + + "-${AWS::AccountId}-${AWS::Region}" + } + }, } # Create ConfigurationRecorder and DeliveryChannel @@ -2935,18 +3470,25 @@ def create_rule_template(self): "Properties": { "Name": "default", "RoleARN": {"Fn::GetAtt": ["ConfigRole", "Arn"]}, - "RecordingGroup": {"AllSupported": True, "IncludeGlobalResourceTypes": True}, + "RecordingGroup": { + "AllSupported": True, + "IncludeGlobalResourceTypes": True, + }, }, } if self.args.config_role_arn: - resources["ConfigurationRecorder"]["Properties"]["RoleARN"] = self.args.config_role_arn + resources["ConfigurationRecorder"]["Properties"][ + "RoleARN" + ] = self.args.config_role_arn resources["DeliveryChannel"] = { "Type": "AWS::Config::DeliveryChannel", "Properties": { "Name": "default", "S3BucketName": {"Ref": "ConfigBucket"}, - "ConfigSnapshotDeliveryProperties": {"DeliveryFrequency": "One_Hour"}, + "ConfigSnapshotDeliveryProperties": { + "DeliveryFrequency": "One_Hour" + }, }, } @@ -2958,7 +3500,10 @@ def create_rule_template(self): for input_param in input_params: cfn_param = {} cfn_param["Description"] = ( - "Pass-through to required Input Parameter " + input_param + " for Config Rule " + rule_name + "Pass-through to required Input Parameter " + + input_param + + " for Config Rule " + + rule_name ) if len(str(input_params[input_param]).strip()) == 0: default = "" @@ -2978,17 +3523,24 @@ def create_rule_template(self): for optional_param in optional_params: cfn_param = {} cfn_param["Description"] = ( - "Pass-through to optional Input Parameter " + optional_param + " for Config Rule " + rule_name + "Pass-through to optional Input Parameter " + + optional_param + + " for Config Rule " + + rule_name ) cfn_param["Default"] = optional_params[optional_param] cfn_param["Type"] = "String" - param_name = self.__get_alphanumeric_rule_name(rule_name) + optional_param + param_name = ( + self.__get_alphanumeric_rule_name(rule_name) + optional_param + ) parameters[param_name] = cfn_param optional_parameter_group["Parameters"].append(param_name) - conditions[param_name] = {"Fn::Not": [{"Fn::Equals": ["", {"Ref": param_name}]}]} + conditions[param_name] = { + "Fn::Not": [{"Fn::Equals": ["", {"Ref": param_name}]}] + } config_rule = {} config_rule["Type"] = "AWS::Config::ConfigRule" @@ -3013,7 +3565,10 @@ def create_rule_template(self): # Also add the appropriate event source. source["SourceDetails"].append( - {"EventSource": "aws.config", "MessageType": "ConfigurationItemChangeNotification"} + { + "EventSource": "aws.config", + "MessageType": "ConfigurationItemChangeNotification", + } ) if "SourcePeriodic" in params: source["SourceDetails"].append( @@ -3045,56 +3600,102 @@ def create_rule_template(self): if "InputParameters" in params: for required_param in json.loads(params["InputParameters"]): - cfn_param_name = self.__get_alphanumeric_rule_name(rule_name) + required_param - properties["InputParameters"][required_param] = {"Ref": cfn_param_name} + cfn_param_name = ( + self.__get_alphanumeric_rule_name(rule_name) + required_param + ) + properties["InputParameters"][required_param] = { + "Ref": cfn_param_name + } if "OptionalParameters" in params: for optional_param in json.loads(params["OptionalParameters"]): - cfn_param_name = self.__get_alphanumeric_rule_name(rule_name) + optional_param + cfn_param_name = ( + self.__get_alphanumeric_rule_name(rule_name) + optional_param + ) properties["InputParameters"][optional_param] = { - "Fn::If": [cfn_param_name, {"Ref": cfn_param_name}, {"Ref": "AWS::NoValue"}] + "Fn::If": [ + cfn_param_name, + {"Ref": cfn_param_name}, + {"Ref": "AWS::NoValue"}, + ] } config_rule["Properties"] = properties - config_rule_resource_name = self.__get_alphanumeric_rule_name(rule_name) + "ConfigRule" + config_rule_resource_name = ( + self.__get_alphanumeric_rule_name(rule_name) + "ConfigRule" + ) resources[config_rule_resource_name] = config_rule # If Remediation create the remediation section with potential links to the SSM Details if "Remediation" in params: - remediation = self.__create_remediation_cloudformation_block(params["Remediation"]) + remediation = self.__create_remediation_cloudformation_block( + params["Remediation"] + ) remediation["DependsOn"] = [config_rule_resource_name] if not self.args.rules_only: remediation["DependsOn"].append("ConfigRole") if "SSMAutomation" in params: - ssm_automation = self.__create_automation_cloudformation_block(params["SSMAutomation"], rule_name) + ssm_automation = self.__create_automation_cloudformation_block( + params["SSMAutomation"], rule_name + ) # AWS needs to build the SSM before the Config Rule - remediation["DependsOn"].append(self.__get_alphanumeric_rule_name(rule_name + "RemediationAction")) + remediation["DependsOn"].append( + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) + ) # Add JSON Reference to SSM Document { "Ref" : "MyEC2Instance" } remediation["Properties"]["TargetId"] = { - "Ref": self.__get_alphanumeric_rule_name(rule_name) + "RemediationAction" + "Ref": self.__get_alphanumeric_rule_name(rule_name) + + "RemediationAction" } if "IAM" in params["SSMAutomation"]: print("Lets Build IAM Role and Policy For the SSM Document") - ssm_iam_role, ssm_iam_policy = self.__create_automation_iam_cloudformation_block( + ( + ssm_iam_role, + ssm_iam_policy, + ) = self.__create_automation_iam_cloudformation_block( params["SSMAutomation"], rule_name ) - resources[self.__get_alphanumeric_rule_name(rule_name + "Role")] = ssm_iam_role - resources[self.__get_alphanumeric_rule_name(rule_name + "Policy")] = ssm_iam_policy - remediation["Properties"]["Parameters"]["AutomationAssumeRole"]["StaticValue"]["Values"] = [ - {"Fn::GetAtt": [self.__get_alphanumeric_rule_name(rule_name + "Role"), "Arn"]} + resources[ + self.__get_alphanumeric_rule_name(rule_name + "Role") + ] = ssm_iam_role + resources[ + self.__get_alphanumeric_rule_name(rule_name + "Policy") + ] = ssm_iam_policy + remediation["Properties"]["Parameters"]["AutomationAssumeRole"][ + "StaticValue" + ]["Values"] = [ + { + "Fn::GetAtt": [ + self.__get_alphanumeric_rule_name( + rule_name + "Role" + ), + "Arn", + ] + } ] # Override the placeholder to associate the SSM Document Role with newly crafted role - resources[self.__get_alphanumeric_rule_name(rule_name + "RemediationAction")] = ssm_automation - resources[self.__get_alphanumeric_rule_name(rule_name) + "Remediation"] = remediation + resources[ + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) + ] = ssm_automation + resources[ + self.__get_alphanumeric_rule_name(rule_name) + "Remediation" + ] = remediation if tags: tags_str = "" for tag in tags: - tags_str += "Key={},Value={} ".format(tag["Key"], tag["Value"]) - script_for_tag += "aws configservice tag-resource --resources-arn $(aws configservice describe-config-rules --config-rule-names {} --query 'ConfigRules[0].ConfigRuleArn' | tr -d '\"') --tags {} \n".format( - rule_name, tags_str + key = tag["Key"] + val = tag["Value"] + tags_str += f"Key={key},Value={val} " + script_for_tag += ( + "aws configservice tag-resource --resources-arn $(aws configservice describe-config-rules " + + f"--config-rule-names {rule_name} --query 'ConfigRules[0].ConfigRuleArn' | tr -d '\"') --tags {tags_str} \n" ) template["Resources"] = resources @@ -3103,7 +3704,10 @@ def create_rule_template(self): template["Metadata"] = { "AWS::CloudFormation::Interface": { "ParameterGroups": [ - {"Label": {"default": "Lambda Account ID"}, "Parameters": ["LambdaAccountId"]}, + { + "Label": {"default": "Lambda Account ID"}, + "Parameters": ["LambdaAccountId"], + }, required_parameter_group, optional_parameter_group, ], @@ -3120,7 +3724,9 @@ def create_rule_template(self): print("CloudFormation template written to " + self.args.output_file) if script_for_tag: - print("Found tags on config rules. Cloudformation do not support tagging config rule at the moment") + print( + "Found tags on config rules. Cloudformation do not support tagging config rule at the moment" + ) print("Generating script for config rules tags") script_for_tag = "#! /bin/bash \n" + script_for_tag if self.args.tag_config_rules_script: @@ -3129,10 +3735,14 @@ def create_rule_template(self): else: print("=========SCRIPT=========") print(script_for_tag) - print("you can use flag [--tag-config-rules-script ] to output the script") + print( + "you can use flag [--tag-config-rules-script ] to output the script" + ) def create_region_set(self): - self.args = get_create_region_set_parser().parse_args(self.args.command_args, self.args) + self.args = get_create_region_set_parser().parse_args( + self.args.command_args, self.args + ) output_file = self.args.output_file output_dict = { "default": ["us-east-1", "us-west-1", "eu-north-1", "ap-southeast-1"], @@ -3184,7 +3794,8 @@ def __list_rulesets(self): rules = [] for obj_name in os.listdir("."): - # print(obj_name) + if obj_name.startswith("."): + continue # Skip hidden items params_file_path = os.path.join(".", obj_name, parameter_file_name) if os.path.isfile(params_file_path): parameters_file = open(params_file_path, "r") @@ -3211,7 +3822,7 @@ def __get_template_dir(self): def __create_test_suite(self, test_dir): tests = [] - for (top, dirs, filenames) in os.walk(test_dir): + for top, dirs, filenames in os.walk(test_dir): for filename in fnmatch.filter(filenames, "*_test.py"): print(filename) sys.path.append(top) @@ -3233,39 +3844,39 @@ def __clean_rule_name(self, rule_name): return output def __create_java_rule(self): - src = os.path.join(path.dirname(__file__), "template", "runtime", "java8", "src") + src = os.path.join( + path.dirname(__file__), "template", "runtime", "java8", "src" + ) dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, "src") shutil.copytree(src, dst) - src = os.path.join(path.dirname(__file__), "template", "runtime", "java8", "jars") + src = os.path.join( + path.dirname(__file__), "template", "runtime", "java8", "jars" + ) dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, "jars") shutil.copytree(src, dst) - src = os.path.join(path.dirname(__file__), "template", "runtime", "java8", "build.gradle") + src = os.path.join( + path.dirname(__file__), "template", "runtime", "java8", "build.gradle" + ) dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, "build.gradle") shutil.copyfile(src, dst) - def __create_dotnet_rule(self): - runtime_path = os.path.join(path.dirname(__file__), "template", "runtime", self.args.runtime) - dst_path = os.path.join(os.getcwd(), rules_dir, self.args.rulename) - for obj in os.listdir(runtime_path): - src = os.path.join(runtime_path, obj) - dst = os.path.join(dst_path, obj) - if os.path.isfile(src): - shutil.copyfile(src, dst) - else: - shutil.copytree(src, dst) - def __print_log_event(self, event): - time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(event["timestamp"] / 1000)) + time_string = time.strftime( + "%Y-%m-%d %H:%M:%S", time.localtime(event["timestamp"] / 1000) + ) rows = 24 columns = 80 - try: - rows, columns = os.popen("stty size", "r").read().split() - except ValueError as e: - # This was probably being run in a headless test environment which had no stty. - print("Using default terminal rows and columns.") + if shutil.which("stty") is not None: + try: + rows, columns = os.popen("stty size", "r").read().split() + except Exception as e: + # This was probably being run in a headless test environment which had no stty. + print("Using default terminal rows and columns.") + else: + print("stty not present -- using default terminal rows and columns.") line_wrap = int(columns) - 22 message_lines = str(event["message"]).splitlines() @@ -3273,7 +3884,11 @@ def __print_log_event(self, event): for line in message_lines: line = line.replace("\t", " ") - formatted_lines.append("\n".join(line[i : i + line_wrap] for i in range(0, len(line), line_wrap))) + formatted_lines.append( + "\n".join( + line[i : i + line_wrap] for i in range(0, len(line), line_wrap) + ) + ) message_string = "\n".join(formatted_lines) message_string = message_string.replace("\n", "\n ") @@ -3326,7 +3941,11 @@ def __get_caller_identity_details(self, my_session): response = my_sts.get_caller_identity() arn_split = response["Arn"].split(":") - return {"account_id": response["Account"], "partition": arn_split[1], "region": arn_split[3]} + return { + "account_id": response["Account"], + "partition": arn_split[1], + "region": arn_split[3], + } def __get_stack_name_from_rule_name(self, rule_name): output = rule_name.replace("_", "") @@ -3341,22 +3960,33 @@ def __get_alphanumeric_rule_name(self, rule_name): def __get_rule_list_for_command(self, Command="deploy"): rule_names = [] if self.args.all: - d = "." for obj_name in os.listdir("."): obj_path = os.path.join(".", obj_name) if os.path.isdir(obj_path) and not obj_name == "rdk": for file_name in os.listdir(obj_path): if obj_name not in rule_names: - if os.path.exists(os.path.join(obj_path, "parameters.json")): + if os.path.exists( + os.path.join(obj_path, "parameters.json") + ): rule_names.append(obj_name) else: if file_name.split(".")[0] == obj_name: rule_names.append(obj_name) if os.path.exists( - os.path.join(obj_path, "src", "main", "java", "com", "rdk", "RuleCode.java") + os.path.join( + obj_path, + "src", + "main", + "java", + "com", + "rdk", + "RuleCode.java", + ) ): rule_names.append(obj_name) - if os.path.exists(os.path.join(obj_path, "RuleCode.cs")): + if os.path.exists( + os.path.join(obj_path, "RuleCode.cs") + ): rule_names.append(obj_name) elif self.args.rulesets: for obj_name in os.listdir("."): @@ -3376,7 +4006,10 @@ def __get_rule_list_for_command(self, Command="deploy"): if os.path.isdir(cleaned_rule_name): rule_names.append(cleaned_rule_name) else: - print('Invalid Option: Specify Rule Name or RuleSet. Run "rdk %s -h" for more info.' % (Command)) + print( + 'Invalid Option: Specify Rule Name or RuleSet. Run "rdk %s -h" for more info.' + % (Command) + ) sys.exit(1) if len(rule_names) == 0: @@ -3387,21 +4020,21 @@ def __get_rule_list_for_command(self, Command="deploy"): for name in rule_names: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) return rule_names def __get_rule_parameters(self, rule_name): - params_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, parameter_file_name) + params_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, parameter_file_name + ) try: parameters_file = open(params_file_path, "r") except IOError as e: - print("Failed to open parameters file for rule '{}'".format(rule_name)) + print(f"Failed to open parameters file for rule '{rule_name}'") print(e.message) sys.exit(1) @@ -3410,12 +4043,12 @@ def __get_rule_parameters(self, rule_name): try: my_json = json.load(parameters_file) except ValueError as ve: # includes simplejson.decoder.JSONDecodeError - print("Failed to decode JSON in parameters file for Rule {}".format(rule_name)) + print(f"Failed to decode JSON in parameters file for Rule {rule_name}") print(ve.message) parameters_file.close() sys.exit(1) except Exception as e: - print("Error loading parameters file for Rule {}".format(rule_name)) + print(f"Error loading parameters file for Rule {rule_name}") print(e.message) parameters_file.close() sys.exit(1) @@ -3436,7 +4069,9 @@ def __get_rule_parameters(self, rule_name): return my_json["Parameters"], my_tags def __parse_rule_args(self, is_required): - self.args = get_rule_parser(is_required, self.args.command).parse_args(self.args.command_args, self.args) + self.args = get_rule_parser(is_required, self.args.command).parse_args( + self.args.command_args, self.args + ) if self.args.rulename: if len(self.args.rulename) > 128: @@ -3448,7 +4083,10 @@ def __parse_rule_args(self, is_required): for resource_type in self.args.resource_types.split(","): if resource_type not in accepted_resource_types: resource_type_error = ( - resource_type_error + ' "' + resource_type + '" not found in list of accepted resource types.' + resource_type_error + + ' "' + + resource_type + + '" not found in list of accepted resource types.' ) if resource_type_error: print(resource_type_error) @@ -3459,8 +4097,14 @@ def __parse_rule_args(self, is_required): "Skip-Supported-Resource-Check Flag set (--skip-supported-resource-check), ignoring missing resource type error." ) - if is_required and not self.args.resource_types and not self.args.maximum_frequency: - print("You must specify either a resource type trigger or a maximum frequency.") + if ( + is_required + and not self.args.resource_types + and not self.args.maximum_frequency + ): + print( + "You must specify either a resource type trigger or a maximum frequency." + ) sys.exit(1) if self.args.input_parameters: @@ -3473,16 +4117,20 @@ def __parse_rule_args(self, is_required): if self.args.optional_parameters: try: - optional_params_dict = json.loads(self.args.optional_parameters, strict=False) + optional_params_dict = json.loads( + self.args.optional_parameters, strict=False + ) except Exception as e: - print("Failed to parse optional parameters.") + print(f"Failed to parse optional parameters. {repr(e)}") sys.exit(1) if self.args.rulesets: self.args.rulesets = self.args.rulesets.split(",") def __parse_test_args(self): - self.args = get_test_parser(self.args.command).parse_args(self.args.command_args, self.args) + self.args = get_test_parser(self.args.command).parse_args( + self.args.command_args, self.args + ) if self.args.all and self.args.rulename: print("You may specify either specific rules or --all, but not both.") @@ -3494,12 +4142,15 @@ def __parse_test_args(self): return self.args def __parse_deploy_args(self, ForceArgument=False): + self.args = get_deployment_parser(ForceArgument).parse_args( + self.args.command_args, self.args + ) - self.args = get_deployment_parser(ForceArgument).parse_args(self.args.command_args, self.args) - - ### Validate inputs ### + # Validate inputs # if self.args.stack_name and not self.args.functions_only: - print("--stack-name can only be specified when using the --functions-only feature.") + print( + "--stack-name can only be specified when using the --functions-only feature." + ) sys.exit(1) # Make sure we're not exceeding Layer limits @@ -3508,12 +4159,20 @@ def __parse_deploy_args(self, ForceArgument=False): if layer_count > 5: print("You may only specify 5 Lambda Layers.") sys.exit(1) - if self.args.rdklib_layer_arn or self.args.generated_lambda_layer and layer_count > 4: - print("Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers.") + if ( + self.args.rdklib_layer_arn + or self.args.generated_lambda_layer + and layer_count > 4 + ): + print( + "Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers." + ) sys.exit(1) # RDKLib version and RDKLib Layer ARN/Generated RDKLib Layer are mutually exclusive. - if "rdk_lib_version" in self.args and (self.args.rdklib_layer_arn or self.args.generated_lambda_layer): + if "rdk_lib_version" in self.args and ( + self.args.rdklib_layer_arn or self.args.generated_lambda_layer + ): print( "Specify EITHER an RDK Lib version to use the official release OR a specific Layer ARN to use a custom implementation." ) @@ -3521,7 +4180,9 @@ def __parse_deploy_args(self, ForceArgument=False): # RDKLib version and RDKLib Layer ARN/Generated RDKLib Layer are mutually exclusive. if self.args.rdklib_layer_arn and self.args.generated_lambda_layer: - print("Specify EITHER an RDK Lib Layer ARN OR the generated lambda layer flag.") + print( + "Specify EITHER an RDK Lib Layer ARN OR the generated lambda layer flag." + ) sys.exit(1) # Check rule names to make sure none are too long. This is needed to catch Rules created before length constraint was added. @@ -3529,9 +4190,7 @@ def __parse_deploy_args(self, ForceArgument=False): for name in self.args.rulename: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) @@ -3542,12 +4201,15 @@ def __parse_deploy_args(self, ForceArgument=False): self.args.rulesets = self.args.rulesets.split(",") def __parse_deploy_organization_args(self, ForceArgument=False): + self.args = get_deployment_organization_parser(ForceArgument).parse_args( + self.args.command_args, self.args + ) - self.args = get_deployment_organization_parser(ForceArgument).parse_args(self.args.command_args, self.args) - - ### Validate inputs ### + # Validate inputs # if self.args.stack_name and not self.args.functions_only: - print("--stack-name can only be specified when using the --functions-only feature.") + print( + "--stack-name can only be specified when using the --functions-only feature." + ) sys.exit(1) # Make sure we're not exceeding Layer limits @@ -3557,7 +4219,9 @@ def __parse_deploy_organization_args(self, ForceArgument=False): print("You may only specify 5 Lambda Layers.") sys.exit(1) if self.args.rdklib_layer_arn and layer_count > 4: - print("Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers.") + print( + "Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers." + ) sys.exit(1) # RDKLib version and RDKLib Layer ARN are mutually exclusive. @@ -3572,9 +4236,7 @@ def __parse_deploy_organization_args(self, ForceArgument=False): for name in self.args.rulename: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) @@ -3585,17 +4247,16 @@ def __parse_deploy_organization_args(self, ForceArgument=False): self.args.rulesets = self.args.rulesets.split(",") def __parse_export_args(self, ForceArgument=False): - - self.args = get_export_parser(ForceArgument).parse_args(self.args.command_args, self.args) + self.args = get_export_parser(ForceArgument).parse_args( + self.args.command_args, self.args + ) # Check rule names to make sure none are too long. This is needed to catch Rules created before length constraint was added. if self.args.rulename: for name in self.args.rulename: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) @@ -3609,35 +4270,14 @@ def __package_function_code(self, rule_name, params): subprocess.call(command, cwd=working_dir) # set source as distribution zip - s3_src = os.path.join(os.getcwd(), rules_dir, rule_name, "build", "distributions", rule_name + ".zip") - elif params["SourceRuntime"] in ["dotnetcore1.0", "dotnetcore2.0"]: - print("Packaging " + rule_name) - working_dir = os.path.join(os.getcwd(), rules_dir, rule_name) - commands = [["dotnet", "restore"]] - - app_runtime = "netcoreapp1.0" - if params["SourceRuntime"] == "dotnetcore2.0": - app_runtime = "netcoreapp2.0" - - commands.append(["dotnet", "lambda", "package", "-c", "Release", "-f", app_runtime]) - - for command in commands: - subprocess.call(command, cwd=working_dir) - - # Remove old zip file if it already exists - package_file_dst = os.path.join(rule_name, rule_name + ".zip") - self.__delete_package_file(package_file_dst) - - # Create new package in temp directory, copy to rule directory - # This copy avoids the archiver trying to include the output zip in itself - s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name, "bin", "Release", app_runtime, "publish") - tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir + s3_src = os.path.join( + os.getcwd(), + rules_dir, + rule_name, + "build", + "distributions", + rule_name + ".zip", ) - if not (os.path.exists(package_file_dst)): - shutil.copy(tmp_src, package_file_dst) - s3_src = os.path.abspath(package_file_dst) - self.__delete_package_file(tmp_src) else: print("Zipping " + rule_name) @@ -3648,7 +4288,9 @@ def __package_function_code(self, rule_name, params): # zip rule code files and upload to s3 bucket s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name) tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir + os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), + "zip", + s3_src_dir, ) if not (os.path.exists(package_file_dst)): shutil.copy(tmp_src, package_file_dst) @@ -3687,7 +4329,9 @@ def __populate_params(self): if self.args.optional_parameters: # As above, but with the optional input parameters. try: - my_optional_params = json.loads(self.args.optional_parameters, strict=False) + my_optional_params = json.loads( + self.args.optional_parameters, strict=False + ) except Exception as e: print( "Error parsing optional input parameter JSON. Make sure your JSON keys and values are enclosed in properly escaped double quotes and your optional-parameters string is enclosed in single quotes." @@ -3699,7 +4343,7 @@ def __populate_params(self): # As above, but with the optional tag key value pairs. try: my_tags = json.loads(self.args.tags, strict=False) - except Exception as e: + except Exception: print( "Error parsing optional tags JSON. Make sure your JSON keys and values are enclosed in properly escaped double quotes and tags string is enclosed in single quotes." ) @@ -3719,12 +4363,14 @@ def __populate_params(self): ) and not self.args.remediation_action ): - print("Remediation Flags detected but no remediation action (--remediation-action) set") + print( + "Remediation Flags detected but no remediation action (--remediation-action) set" + ) if self.args.remediation_action: try: my_remediation = self.__generate_remediation_params() - except Exception as e: + except Exception: print("Error parsing remediation configuration.") # create config file and place in rule directory @@ -3732,7 +4378,7 @@ def __populate_params(self): "RuleName": self.args.rulename, "Description": self.args.rulename, "SourceRuntime": self.args.runtime, - #'CodeBucket': code_bucket_prefix + account_id, + # 'CodeBucket': code_bucket_prefix + account_id, "CodeKey": self.args.rulename + my_session.region_name + ".zip", "InputParameters": json.dumps(my_input_params), "OptionalParameters": json.dumps(my_optional_params), @@ -3771,7 +4417,9 @@ def __generate_remediation_params(self): ssm_controls = {} if self.args.remediation_concurrent_execution_percent: - ssm_controls["ConcurrentExecutionRatePercentage"] = self.args.remediation_concurrent_execution_percent + ssm_controls[ + "ConcurrentExecutionRatePercentage" + ] = self.args.remediation_concurrent_execution_percent if self.args.remediation_error_rate_percent: ssm_controls["ErrorPercentage"] = self.args.remediation_error_rate_percent @@ -3780,7 +4428,9 @@ def __generate_remediation_params(self): params["ExecutionControls"] = {"SsmControls": ssm_controls} if self.args.auto_remediation_retry_attempts: - params["MaximumAutomaticAttempts"] = self.args.auto_remediation_retry_attempts + params[ + "MaximumAutomaticAttempts" + ] = self.args.auto_remediation_retry_attempts if self.args.remediation_parameters: params["Parameters"] = json.loads(self.args.remediation_parameters) @@ -3801,7 +4451,9 @@ def __generate_remediation_params(self): def __write_params_file(self, rulename, parameters, tags): my_params = {"Version": "1.0", "Parameters": parameters, "Tags": tags} - params_file_path = os.path.join(os.getcwd(), rules_dir, rulename, parameter_file_name) + params_file_path = os.path.join( + os.getcwd(), rules_dir, rulename, parameter_file_name + ) parameters_file = open(params_file_path, "w") json.dump(my_params, parameters_file, indent=2) parameters_file.close() @@ -3814,8 +4466,8 @@ def __wait_for_cfn_stack(self, cfn_client, stackname): response = cfn_client.list_stacks() all_stacks = response["StackSummaries"] - while 'NextToken' in response: - response = cfn_client.list_stacks(NextToken=response['NextToken']) + while "NextToken" in response: + response = cfn_client.list_stacks(NextToken=response["NextToken"]) all_stacks += response["StackSummaries"] for stack in all_stacks: @@ -3833,26 +4485,44 @@ def __wait_for_cfn_stack(self, cfn_client, stackname): # If all stacks have been deleted, clearly we're done! if all_deleted: in_progress = False - print(f"[{my_session.region_name}]: CloudFormation stack operation complete.") + print( + f"[{my_session.region_name}]: CloudFormation stack operation complete." + ) continue else: if "FAILED" in active_stack["StackStatus"]: in_progress = False - print(f"[{my_session.region_name}]: CloudFormation stack operation Failed for " + stackname + ".") + print( + f"[{my_session.region_name}]: CloudFormation stack operation Failed for " + + stackname + + "." + ) if "StackStatusReason" in active_stack: - print(f"[{my_session.region_name}]: Reason: " + active_stack["StackStatusReason"]) + print( + f"[{my_session.region_name}]: Reason: " + + active_stack["StackStatusReason"] + ) elif active_stack["StackStatus"] == "ROLLBACK_COMPLETE": in_progress = False print( - f"[{my_session.region_name}]: CloudFormation stack operation Rolled Back for " + stackname + "." + f"[{my_session.region_name}]: CloudFormation stack operation Rolled Back for " + + stackname + + "." ) if "StackStatusReason" in active_stack: - print(f"[{my_session.region_name}]: Reason: " + active_stack["StackStatusReason"]) + print( + f"[{my_session.region_name}]: Reason: " + + active_stack["StackStatusReason"] + ) elif "COMPLETE" in active_stack["StackStatus"]: in_progress = False - print(f"[{my_session.region_name}]: CloudFormation stack operation complete.") + print( + f"[{my_session.region_name}]: CloudFormation stack operation complete." + ) else: - print(f"[{my_session.region_name}]: Waiting for CloudFormation stack operation to complete...") + print( + f"[{my_session.region_name}]: Waiting for CloudFormation stack operation to complete..." + ) time.sleep(5) def __get_handler(self, rule_name, params): @@ -3865,21 +4535,19 @@ def __get_handler(self, rule_name, params): "python3.8-lib", "python3.9", "python3.9-lib", - "nodejs6.10", - "nodejs8.10", + "python3.10", + "python3.10-lib", ]: return rule_name + ".lambda_handler" elif params["SourceRuntime"] in ["java8"]: return "com.rdk.RuleUtil::handler" - elif params["SourceRuntime"] in ["dotnetcore1.0", "dotnetcore2.0"]: - return "csharp7.0::Rdk.CustomConfigHandler::FunctionHandler" def __get_runtime_string(self, params): if params["SourceRuntime"] in [ - "python3.6-managed", "python3.7-lib", "python3.8-lib", "python3.9-lib", + "python3.10-lib", ]: runtime = params["SourceRuntime"].split("-") return runtime[0] @@ -3896,9 +4564,13 @@ def __get_test_CIs(self, rulename): test_ci_list.append(my_test_ci.get_json()) else: # Check to see if there is a test_ci.json file in the Rule directory - tests_path = os.path.join(os.getcwd(), rules_dir, rulename, test_ci_filename) + tests_path = os.path.join( + os.getcwd(), rules_dir, rulename, test_ci_filename + ) if os.path.exists(tests_path): - print("\tTesting with CI's provided in test_ci.json file. NOT YET IMPLEMENTED") # TODO + print( + "\tTesting with CI's provided in test_ci.json file. NOT YET IMPLEMENTED" + ) # TODO # test_ci_list self._load_cis_from_file(tests_path) else: print("\tTesting with generic CI for configured Resource Type(s)") @@ -3916,7 +4588,8 @@ def __get_lambda_arn_for_stack(self, stack_name): my_cfn = my_session.client("cloudformation") - # Since CFN won't detect changes to the lambda code stored in S3 as a reason to update the stack, we need to manually update the code reference in Lambda once the CFN has run. + # Since CFN won't detect changes to the lambda code stored in S3 as a reason to update the stack, + # we need to manually update the code reference in Lambda once the CFN has run. self.__wait_for_cfn_stack(my_cfn, stack_name) # Lambda function is an output of the stack. @@ -3928,7 +4601,9 @@ def __get_lambda_arn_for_stack(self, stack_name): my_lambda_arn = output["OutputValue"] if my_lambda_arn == "NOTFOUND": - print(f"[{my_session.region_name}]: Could not read CloudFormation stack output to find Lambda function.") + print( + f"[{my_session.region_name}]: Could not read CloudFormation stack output to find Lambda function." + ) sys.exit(1) return my_lambda_arn @@ -3938,27 +4613,27 @@ def __get_lambda_name(self, rule_name, params): lambda_name = params["CustomLambdaName"] if len(lambda_name) > 64: print( - "Error: Found Rule's Lambda function with name over 64 characters: {} \n Recreate the lambda name with a shorter name.".format( - lambda_name - ) + f"Error: Found Rule's Lambda function with name over 64 characters: {lambda_name}." + + "\nRecreate the lambda name with a shorter name." ) sys.exit(1) return lambda_name else: - lambda_name = "RDK-Rule-Function-" + self.__get_stack_name_from_rule_name(rule_name) + lambda_name = "RDK-Rule-Function-" + self.__get_stack_name_from_rule_name( + rule_name + ) if len(lambda_name) > 64: print( - "Error: Found Rule's Lambda function with name over 64 characters: {} \n Recreate the rule with a shorter name or with CustomLambdaName attribute in parameter.json. If you are using 'rdk create', you can add '--custom-lambda-name ' to create your RDK rules".format( - lambda_name - ) + f"Error: Found Rule's Lambda function with name over 64 characters: {lambda_name}." + + "\nRecreate the rule with a shorter name or with CustomLambdaName attribute in parameter.json." + + "\nIf you are using 'rdk create', you can add '--custom-lambda-name ' to create your RDK rules" ) sys.exit(1) return lambda_name def __get_lambda_arn_for_rule(self, rule_name, partition, region, account, params): - return "arn:{}:lambda:{}:{}:function:{}".format( - partition, region, account, self.__get_lambda_name(rule_name, params) - ) + lambda_name = self.__get_lambda_name(rule_name, params) + return f"arn:{partition}:lambda:{region}:{account}:function:{lambda_name}" def __delete_package_file(self, file): try: @@ -3966,7 +4641,9 @@ def __delete_package_file(self, file): except OSError: pass - def __upload_function_code(self, rule_name, params, account_id, my_session, code_bucket_name): + def __upload_function_code( + self, rule_name, params, account_id, my_session, code_bucket_name + ): if params["SourceRuntime"] == "java8": # Do java build and package. print(f"[{my_session.region_name}]: Running Gradle Build for " + rule_name) @@ -3976,7 +4653,12 @@ def __upload_function_code(self, rule_name, params, account_id, my_session, code # set source as distribution zip s3_src = os.path.join( - os.getcwd(), rules_dir, rule_name, "build", "distributions", rule_name + my_session.region_name + ".zip" + os.getcwd(), + rules_dir, + rule_name, + "build", + "distributions", + rule_name + my_session.region_name + ".zip", ) s3_dst = "/".join((rule_name, rule_name + ".zip")) @@ -3986,41 +4668,6 @@ def __upload_function_code(self, rule_name, params, account_id, my_session, code my_s3.meta.client.upload_file(s3_src, code_bucket_name, s3_dst) print(f"[{my_session.region_name}]: Upload complete.") - elif params["SourceRuntime"] in ["dotnetcore1.0", "dotnetcore2.0"]: - print("Packaging " + rule_name) - working_dir = os.path.join(os.getcwd(), rules_dir, rule_name) - commands = [["dotnet", "restore"]] - - app_runtime = "netcoreapp1.0" - if params["SourceRuntime"] == "dotnetcore2.0": - app_runtime = "netcoreapp2.0" - - commands.append(["dotnet", "lambda", "package", "-c", "Release", "-f", app_runtime]) - - for command in commands: - subprocess.call(command, cwd=working_dir) - - # Remove old zip file if it already exists - package_file_dst = os.path.join(rule_name, rule_name + ".zip") - self.__delete_package_file(package_file_dst) - - # Create new package in temp directory, copy to rule directory - # This copy avoids the archiver trying to include the output zip in itself - s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name, "bin", "Release", app_runtime, "publish") - tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir - ) - s3_dst = "/".join((rule_name, rule_name + ".zip")) - - my_s3 = my_session.resource("s3") - - print(f"[{my_session.region_name}]: Uploading " + rule_name) - my_s3.meta.client.upload_file(tmp_src, code_bucket_name, s3_dst) - print(f"[{my_session.region_name}]: Upload complete.") - if not (os.path.exists(package_file_dst)): - shutil.copy(tmp_src, package_file_dst) - self.__delete_package_file(tmp_src) - else: print(f"[{my_session.region_name}]: Zipping " + rule_name) # Remove old zip file if it already exists @@ -4031,7 +4678,9 @@ def __upload_function_code(self, rule_name, params, account_id, my_session, code s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name) tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir + os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), + "zip", + s3_src_dir, ) s3_dst = "/".join((rule_name, rule_name + ".zip")) @@ -4058,7 +4707,6 @@ def __create_remediation_cloudformation_block(self, remediation_config): def __create_automation_cloudformation_block(self, ssm_automation, rule_name): print("Generate SSM Resources") - current_working_direcoty = os.getcwd() ssm_json_dir = os.path.join(os.getcwd(), ssm_automation["Document"]) print("Reading SSM JSON From -> " + ssm_json_dir) # params_file_path = os.path.join(os.getcwd(), rules_dir, rulename, parameter_file_name) @@ -4066,19 +4714,28 @@ def __create_automation_cloudformation_block(self, ssm_automation, rule_name): ssm_automation_json = json.loads(ssm_automation_content) ssm_automation_config = { "Type": "AWS::SSM::Document", - "Properties": {"DocumentType": "Automation", "Content": ssm_automation_json}, + "Properties": { + "DocumentType": "Automation", + "Content": ssm_automation_json, + }, } return ssm_automation_config def __create_automation_iam_cloudformation_block(self, ssm_automation, rule_name): - - print("Generate IAM Role for SSM Document with these actions", str(ssm_automation["IAM"])) + print( + "Generate IAM Role for SSM Document with these actions", + str(ssm_automation["IAM"]), + ) assume_role_template = { "Version": "2012-10-17", "Statement": [ - {"Effect": "Allow", "Principal": {"Service": "ssm.amazonaws.com"}, "Action": "sts:AssumeRole"} + { + "Effect": "Allow", + "Principal": {"Service": "ssm.amazonaws.com"}, + "Action": "sts:AssumeRole", + } ], } @@ -4086,7 +4743,8 @@ def __create_automation_iam_cloudformation_block(self, ssm_automation, rule_name ssm_automation_iam_role = { "Type": "AWS::IAM::Role", "Properties": { - "Description": "IAM Role to Support Config Remediation for " + rule_name, + "Description": "IAM Role to Support Config Remediation for " + + rule_name, "Path": "/rdk-remediation-role/", # "RoleName": {"Fn::Sub": "" + rule_name + "-Remediation-Role-${AWS::Region}"}, "AssumeRolePolicyDocument": assume_role_template, @@ -4097,11 +4755,21 @@ def __create_automation_iam_cloudformation_block(self, ssm_automation, rule_name "Type": "AWS::IAM::Policy", "Properties": { "PolicyDocument": { - "Statement": [{"Action": ssm_automation["IAM"], "Effect": "Allow", "Resource": "*"}], + "Statement": [ + { + "Action": ssm_automation["IAM"], + "Effect": "Allow", + "Resource": "*", + } + ], "Version": "2012-10-17", }, - "PolicyName": {"Fn::Sub": "" + rule_name + "-Remediation-Policy-${AWS::Region}"}, - "Roles": [{"Ref": self.__get_alphanumeric_rule_name(rule_name + "Role")}], + "PolicyName": { + "Fn::Sub": "" + rule_name + "-Remediation-Policy-${AWS::Region}" + }, + "Roles": [ + {"Ref": self.__get_alphanumeric_rule_name(rule_name + "Role")} + ], }, } @@ -4119,7 +4787,9 @@ def __create_function_cloudformation_template(self): parameters = {} parameters["SourceBucket"] = {} - parameters["SourceBucket"]["Description"] = "Name of the S3 bucket that you have stored the rule zip files in." + parameters["SourceBucket"][ + "Description" + ] = "Name of the S3 bucket that you have stored the rule zip files in." parameters["SourceBucket"]["Type"] = "String" parameters["SourceBucket"]["MinLength"] = "1" parameters["SourceBucket"]["MaxLength"] = "255" @@ -4134,10 +4804,16 @@ def __create_function_cloudformation_template(self): partition = identity_details["partition"] lambdaRoleArn = "" if self.args.lambda_role_arn: - print(f"[{my_session.region_name}]: Existing IAM Role provided: " + self.args.lambda_role_arn) + print( + f"[{my_session.region_name}]: Existing IAM Role provided: " + + self.args.lambda_role_arn + ) lambdaRoleArn = self.args.lambda_role_arn elif self.args.lambda_role_name: - print(f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + self.args.lambda_role_name) + print( + f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + + self.args.lambda_role_name + ) arn = f"arn:{partition}:iam::{account_id}:role/{self.args.lambda_role_name}" lambdaRoleArn = arn else: @@ -4158,12 +4834,6 @@ def __create_function_cloudformation_template(self): ], } lambda_policy_statements = [ - { - "Sid": "1", - "Action": ["s3:GetObject"], - "Effect": "Allow", - "Resource": {"Fn::Sub": "arn:${AWS::Partition}:s3:::${SourceBucket}/*"}, - }, { "Sid": "2", "Action": [ @@ -4175,9 +4845,24 @@ def __create_function_cloudformation_template(self): "Effect": "Allow", "Resource": "*", }, - {"Sid": "3", "Action": ["config:PutEvaluations"], "Effect": "Allow", "Resource": "*"}, - {"Sid": "4", "Action": ["iam:List*", "iam:Describe*", "iam:Get*"], "Effect": "Allow", "Resource": "*"}, - {"Sid": "5", "Action": ["sts:AssumeRole"], "Effect": "Allow", "Resource": "*"}, + { + "Sid": "3", + "Action": ["config:PutEvaluations"], + "Effect": "Allow", + "Resource": "*", + }, + { + "Sid": "4", + "Action": ["iam:List*", "iam:Get*"], + "Effect": "Allow", + "Resource": "*", + }, + { + "Sid": "5", + "Action": ["sts:AssumeRole"], + "Effect": "Allow", + "Resource": "*", + }, ] if self.args.lambda_subnets and self.args.lambda_security_groups: vpc_policy = { @@ -4194,7 +4879,10 @@ def __create_function_cloudformation_template(self): lambda_role["Properties"]["Policies"] = [ { "PolicyName": "ConfigRulePolicy", - "PolicyDocument": {"Version": "2012-10-17", "Statement": lambda_policy_statements}, + "PolicyDocument": { + "Version": "2012-10-17", + "Statement": lambda_policy_statements, + }, } ] lambda_role["Properties"]["ManagedPolicyArns"] = [ @@ -4215,7 +4903,10 @@ def __create_function_cloudformation_template(self): lambda_function["Type"] = "AWS::Lambda::Function" properties = {} properties["FunctionName"] = self.__get_lambda_name(rule_name, params) - properties["Code"] = {"S3Bucket": {"Ref": "SourceBucket"}, "S3Key": rule_name + "/" + rule_name + ".zip"} + properties["Code"] = { + "S3Bucket": {"Ref": "SourceBucket"}, + "S3Key": rule_name + "/" + rule_name + ".zip", + } properties["Description"] = "Function for AWS Config Rule " + rule_name properties["Handler"] = self.__get_handler(rule_name, params) properties["MemorySize"] = "256" @@ -4248,7 +4939,9 @@ def __create_function_cloudformation_template(self): lambda_permissions["Type"] = "AWS::Lambda::Permission" lambda_permissions["DependsOn"] = alphanum_rule_name + "LambdaFunction" lambda_permissions["Properties"] = { - "FunctionName": {"Fn::GetAtt": [alphanum_rule_name + "LambdaFunction", "Arn"]}, + "FunctionName": { + "Fn::GetAtt": [alphanum_rule_name + "LambdaFunction", "Arn"] + }, "Action": "lambda:InvokeFunction", "Principal": "config.amazonaws.com", } @@ -4260,15 +4953,25 @@ def __create_function_cloudformation_template(self): def __tag_config_rule(self, rule_name, cfn_tags, my_session): config_client = my_session.client("config") - config_arn = config_client.describe_config_rules(ConfigRuleNames=[rule_name])["ConfigRules"][0]["ConfigRuleArn"] + config_arn = config_client.describe_config_rules(ConfigRuleNames=[rule_name])[ + "ConfigRules" + ][0]["ConfigRuleArn"] response = config_client.tag_resource(ResourceArn=config_arn, Tags=cfn_tags) return response def __get_lambda_layers(self, my_session, args, params): layers = [] if "SourceRuntime" in params: - if params["SourceRuntime"] in ["python3.7-lib", "python3.8-lib", "python3.9-lib"]: - if hasattr(args, "generated_lambda_layer") and args.generated_lambda_layer: + if params["SourceRuntime"] in [ + "python3.7-lib", + "python3.8-lib", + "python3.9-lib", + "python3.10-lib", + ]: + if ( + hasattr(args, "generated_lambda_layer") + and args.generated_lambda_layer + ): lambda_layer_version = self.__get_existing_lambda_layer( my_session, layer_name=args.custom_layer_name ) @@ -4276,7 +4979,9 @@ def __get_lambda_layers(self, my_session, args, params): print( f"{my_session.region_name} generated-lambda-layer flag received, but layer [{args.custom_layer_name}] not found in {my_session.region_name}. Creating one now" ) - self.__create_new_lambda_layer(my_session, layer_name=args.custom_layer_name) + self.__create_new_lambda_layer( + my_session, layer_name=args.custom_layer_name + ) lambda_layer_version = self.__get_existing_lambda_layer( my_session, layer_name=args.custom_layer_name ) @@ -4285,7 +4990,9 @@ def __get_lambda_layers(self, my_session, args, params): layers.append(args.rdklib_layer_arn) else: rdk_lib_version = RDKLIB_LAYER_VERSION[my_session.region_name] - rdklib_arn = RDKLIB_ARN_STRING.format(region=my_session.region_name, version=rdk_lib_version) + rdklib_arn = RDKLIB_ARN_STRING.format( + region=my_session.region_name, version=rdk_lib_version + ) layers.append(rdklib_arn) return layers @@ -4300,10 +5007,11 @@ def __get_existing_lambda_layer(self, my_session, layer_name="rdklib-layer"): return None def __create_new_lambda_layer(self, my_session, layer_name="rdklib-layer"): - successful_return = None if layer_name == "rdklib-layer": - successful_return = self.__create_new_lambda_layer_serverless_repo(my_session) + successful_return = self.__create_new_lambda_layer_serverless_repo( + my_session + ) # If that doesn't work, create it locally and upload - SAR doesn't support the custom layer name if layer_name != "rdklib-layer" or not successful_return: @@ -4334,7 +5042,9 @@ def __create_new_lambda_layer_serverless_repo(self, my_session): change_set_arn = sar_client.create_cloud_formation_change_set( ApplicationId=RDKLIB_LAYER_SAR_ID, StackName="rdklib" )["ChangeSetId"] - print(f"[{my_session.region_name}]: Creating change set to deploy rdklib-layer") + print( + f"[{my_session.region_name}]: Creating change set to deploy rdklib-layer" + ) code = self.__check_on_change_set(cfn_client, change_set_arn) if code == 1: print( @@ -4342,9 +5052,13 @@ def __create_new_lambda_layer_serverless_repo(self, my_session): ) return 1 if code == -1: - print(f"[{my_session.region_name}]: Error creating change set, attempting to use manual deployment") + print( + f"[{my_session.region_name}]: Error creating change set, attempting to use manual deployment" + ) raise ClientError() - print(f"[{my_session.region_name}]: Executing change set to deploy rdklib-layer") + print( + f"[{my_session.region_name}]: Executing change set to deploy rdklib-layer" + ) cfn_client.execute_change_set(ChangeSetName=change_set_arn) waiter = cfn_client.get_waiter(f"stack_{create_type}_complete") waiter.wait(StackName="serverlessrepo-rdklib") @@ -4358,7 +5072,9 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer region = my_session.region_name print(f"[{region}]: Creating new {layer_name}") folder_name = "lib" + str(uuid.uuid4()) - shell_command = f"pip3 install --target python boto3 botocore rdk rdklib future mock" + shell_command = ( + "pip3 install --target python boto3 botocore rdk rdklib future mock" + ) print(f"[{region}]: Installing Packages to {folder_name}/python") try: @@ -4367,7 +5083,7 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer print(e) sys.exit(1) os.chdir(folder_name) - ret = subprocess.run(shell_command, capture_output=True, shell=True) + _ = subprocess.run(shell_command, capture_output=True, shell=True) print(f"[{region}]: Creating rdk_lib_layer.zip") shutil.make_archive(f"rdk_lib_layer", "zip", ".", "python") @@ -4378,12 +5094,17 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer print(f"[{region}]: Creating temporary S3 Bucket") bucket_name = "rdkliblayertemp" + str(uuid.uuid4()) if region != "us-east-1": - s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={"LocationConstraint": region}) + s3_client.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": region}, + ) if region == "us-east-1": s3_client.create_bucket(Bucket=bucket_name) print(f"[{region}]: Uploading rdk_lib_layer.zip to S3") - s3_resource.Bucket(bucket_name).upload_file(f"{folder_name}/rdk_lib_layer.zip", layer_name) + s3_resource.Bucket(bucket_name).upload_file( + f"{folder_name}/rdk_lib_layer.zip", layer_name + ) lambda_client = my_session.client("lambda") @@ -4391,7 +5112,7 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer lambda_client.publish_layer_version( LayerName=layer_name, Content={"S3Bucket": bucket_name, "S3Key": layer_name}, - CompatibleRuntimes=["python3.7", "python3.8", "python3.9"], + CompatibleRuntimes=["python3.7", "python3.8", "python3.9", "python3.10"], ) print(f"[{region}]: Deleting temporary S3 Bucket") @@ -4424,14 +5145,20 @@ def __init__(self, ci_type): ci_file = ci_type.replace("::", "_") + ".json" try: self.ci_json = json.load( - open(os.path.join(path.dirname(__file__), "template", example_ci_dir, ci_file), "r") + open( + os.path.join( + path.dirname(__file__), "template", example_ci_dir, ci_file + ), + "r", + ) ) except FileNotFoundError: + resource_url = "https://github.com/awslabs/aws-config-resource-schema/blob/master/config/properties/resource-types/" print( "No sample CI found for " + ci_type + ", even though it appears to be a supported CI. Please log an issue at https://github.com/awslabs/aws-config-rdk." - + "\nLook here: https://github.com/awslabs/aws-config-resource-schema/blob/master/config/properties/resource-types/ for additional info" + + f"\nLook here: {resource_url} for additional info" ) exit(1) diff --git a/rdk/template/configRule.json b/rdk/template/configRule.json index e8fe4fa9..43ab74a5 100644 --- a/rdk/template/configRule.json +++ b/rdk/template/configRule.json @@ -221,14 +221,6 @@ "PolicyDocument": { "Version": "2012-10-17", "Statement": [ - { - "Sid": "1", - "Action": [ - "s3:GetObject" - ], - "Effect": "Allow", - "Resource": { "Fn::Sub": "arn:${AWS::Partition}:s3:::${SourceBucket}/${SourcePath}" } - }, { "Sid": "2", "Action": [ @@ -252,7 +244,6 @@ "Sid": "4", "Action": [ "iam:List*", - "iam:Describe*", "iam:Get*" ], "Effect": "Allow", diff --git a/rdk/template/configRuleOrganization.json b/rdk/template/configRuleOrganization.json index 59d1c58f..52dd506e 100644 --- a/rdk/template/configRuleOrganization.json +++ b/rdk/template/configRuleOrganization.json @@ -199,14 +199,6 @@ "PolicyDocument": { "Version": "2012-10-17", "Statement": [ - { - "Sid": "1", - "Action": [ - "s3:GetObject" - ], - "Effect": "Allow", - "Resource": { "Fn::Sub": "arn:${AWS::Partition}:s3:::${SourceBucket}/${SourcePath}" } - }, { "Sid": "2", "Action": [ @@ -230,7 +222,6 @@ "Sid": "4", "Action": [ "iam:List*", - "iam:Describe*", "iam:Get*" ], "Effect": "Allow", diff --git a/rdk/template/example_ci/AWS_R53_HostedZone.json b/rdk/template/example_ci/AWS_R53_HostedZone.json new file mode 100644 index 00000000..e93a324a --- /dev/null +++ b/rdk/template/example_ci/AWS_R53_HostedZone.json @@ -0,0 +1,39 @@ +{ + "version": "1.3", + "accountId": "123456789012", + "configurationItemCaptureTime": "2023-05-01T18:00:07.672Z", + "configurationItemStatus": "ResourceDiscovered", + "configurationStateId": "1682964007672", + "configurationItemMD5Hash": "", + "arn": "arn:aws:route53:::hostedzone/Z017455410COBZEF0ABCD", + "resourceType": "AWS::Route53::HostedZone", + "resourceId": "Z017455410COBZEF0ABCD", + "resourceName": "testdomain.lab.", + "awsRegion": "us-east-1", + "availabilityZone": "Regional", + "tags": {}, + "relatedEvents": [], + "relationships": [], + "configuration": { + "Id": "Z017455410COBZEF0ABCD", + "HostedZoneConfig": { + "Comment": "This is a test domain" + }, + "Name": "testdomain.lab.", + "NameServers": [ + "ns-1965.awsdns-53.co.uk", + "ns-944.awsdns-54.net", + "ns-1144.awsdns-15.org", + "ns-430.awsdns-53.com" + ], + "VPCs": [], + "HostedZoneTags": [ + { + "Key": "cost_center", + "Value": "payroll" + } + ] + }, + "supplementaryConfiguration": {}, + "resourceTransitionStatus": "None" + } \ No newline at end of file diff --git a/rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json b/rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json new file mode 100644 index 00000000..23b6d759 --- /dev/null +++ b/rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json @@ -0,0 +1,23 @@ +{ + "version": "1.3", + "accountId": "123456789012", + "configurationItemCaptureTime": "2022-05-20T15:53:57.732Z", + "configurationItemStatus": "ResourceDiscovered", + "configurationStateId": "1653062037732", + "configurationItemMD5Hash": "", + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": "123456789012", + "awsRegion": "us-east-1", + "availabilityZone": "Not Applicable", + "tags": {}, + "relatedEvents": [], + "relationships": [], + "configuration": { + "blockPublicAcls": true, + "ignorePublicAcls": true, + "blockPublicPolicy": true, + "restrictPublicBuckets": true + }, + "supplementaryConfiguration": {}, + "resourceTransitionStatus": "None" + } \ No newline at end of file diff --git a/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json b/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json index 2846342f..109534b5 100644 --- a/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json +++ b/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json @@ -48,1772 +48,1519 @@ "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2894856", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2896496", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2919355", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2919442", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2920189", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB2934520", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2938066", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2938772", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB2949621", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2954879", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2955164", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB2959626", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2962409", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB2962806", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2965500", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2967917", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2969339", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2971203", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2973351", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2973448", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2975061", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2975719", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2976627", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2977765", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2978041", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2978126", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2984006", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2987107", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2989647", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2989930", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2993100", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2995004", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2995388", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB2996799", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2998174", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB2999226", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3000483", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3000850", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3003057", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, February 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3004361", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3004365", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3004545", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3008923", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3012199", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3012702", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013172", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013769", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013791", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013816", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3014442", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3019978", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, February 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3020338", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3021910", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, February 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3021952", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3022345", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3022777", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3023222", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3023266", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3024751", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3024755", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3029603", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3030377", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3030947", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3032359", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3032663", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3033446", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3035126", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3036612", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3037579", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3037924", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3038002", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3038314", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3038701", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3041857", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3042085", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3042553", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3044374", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3044673", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045634", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3045685", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045717", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045719", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3045755", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045992", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3045999", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3046017", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3046737", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3048043", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3049563", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054169", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054203", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054256", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054464", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3055323", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3055343", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3055642", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3059316", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3059317", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3060681", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3060793", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3061512", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3063843", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3064209", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3068708", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3071756", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3074228", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3074548", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3075220", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3075853", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3077715", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3078071", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3078405", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3078676", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3080042", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3080149", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3082089", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3083325", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3083711", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3083992", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3084135", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3084905", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3086255", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3087038", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3087041", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3087137", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3091297", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3092601", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3092627", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3093983", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3094486", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3095701", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3096433", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3097997", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3098779", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3098785", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3099834", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3100473", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3100773", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3100919", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3100956", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3102429", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3102467", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3102812", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3103616", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3103696", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3103709", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3104002", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3109094", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3109103", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3109976", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3110329", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3112148", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3112336", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3115224", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3118401", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3121255", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3121261", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3121461", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3121918", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3122654", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3122660", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3123242", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3123245", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3123479", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3124275", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3125424", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3126033", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3126434", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3126587", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3126593", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3127226", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3127231", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3128650", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3133043", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3133681", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3133690", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3133924", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3134179", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3134242", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3134814", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3134815", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3135449", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3135456", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3135998", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3137061", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3137725", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3137728", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3138602", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3138615", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3139164", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3139398", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3139914", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3139929", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3140219", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3140234", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3141092", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3142036", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3145384", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3145432", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3146604", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3146723", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3146751", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3146963", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3147071", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3148198", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3148851", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3149090", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3149157", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3153704", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3154070", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3155784", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156016", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156017", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156019", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156059", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3156418", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3159398", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3160005", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3161561", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3161949", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3161958", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3162343", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3162835", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164024", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164033", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164035", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164294", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3167679", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3169704", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3170377", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3170455", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3172614", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3172727", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3172729", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3173424", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3174644", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3175024", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3175443", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3175887", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3177108", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3177186", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3177723", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3177725", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3178034", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3178539", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3179574", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, October 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3179948", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, October 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3182203", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3184122", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3184943", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3185319", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3185911", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, October 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3185331", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" } ] }, @@ -1922,13 +1669,13 @@ SYSTEM" "tags": {}, "configurationItemVersion": "1.2", "configurationItemCaptureTime": "2016-10-26T19:11:44.151Z", - "configurationStateId": 1477509104151, - "awsAccountId": "123456789012", + "configurationStateId": "1477509104151", + "awsAccountId": "138920347130", "configurationItemStatus": "ResourceDiscovered", "resourceType": "AWS::SSM::ManagedInstanceInventory", "resourceId": "i-07f6b44c44bab9e8e", "resourceName": "", - "ARN": "arn:aws:ssm:us-east-1:123456789012:managed-instance-inventory/i-07f6b44c44bab9e8e", + "ARN": "arn:aws:ssm:us-east-1:138920347130:managed-instance-inventory/i-07f6b44c44bab9e8e", "awsRegion": "us-east-1", "availabilityZone": null, "configurationStateMd5Hash": "f5edb28b271ef50dddb2c5b08a535f14", diff --git a/rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs b/rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs deleted file mode 100644 index d5fefc36..00000000 --- a/rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs +++ /dev/null @@ -1,189 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.IO; -using System.Text; - -using System.Threading.Tasks; - -using Amazon.Lambda.Serialization.Json; -using Amazon.Lambda.Core; - -using Amazon.Lambda.ConfigEvents; -using Amazon.CloudWatchEvents; -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Runtime; -using Amazon.Lambda.Model; -using Newtonsoft.Json.Linq; - -// Assembly attribute to enable the Lambda function's JSON input to be converted into a .NET class. -[assembly: LambdaSerializer(typeof(Amazon.Lambda.Serialization.Json.JsonSerializer))] - -namespace Rdk -{ - public class CustomConfigHandler - { - public const String AWS_REGION_PROPERTY = "AWS_DEFAULT_REGION"; - public const String MESSAGE_TYPE_PROPERTY = "messageType"; - public const String HOST_ID = "hostId"; - public const String PLACEMENT = "placement"; - public const String CONFIGURATION = "configuration"; - public const String IMAGE_ID = "imageId"; - public const String STATUS_PATH = "configurationItemStatus"; - public const String TENANCY = "tenancy"; - public const String RESOURCE_DELETED = "ResourceDeleted"; - public const String RESOURCE_DELETED_NOT_RECORDED = "ResourceDeletedNotRecorded"; - public const String CAPTURE_TIME_PATH = "configurationItemCaptureTime"; - public const String CONFIGURATION_ITEM = "configurationItem"; - public const String RESOURCE_ID = "resourceId"; - public const String RESOURCE_NOT_RECORDED = "ResourceNotRecorded"; - public const String RESOURCE_TYPE = "resourceType"; - - - IAmazonConfigService ConfigService { get; set; } - - /// - /// Default constructor. This constructor is used by Lambda to construct the instance. When invoked in a Lambda environment - /// the AWS credentials will come from the IAM role associated with the function and the AWS region will be set to the - /// region the Lambda function is executed in. - /// - public CustomConfigHandler() - { - Console.WriteLine("inside constructor..."); - } - - /// - /// Constructs an instance with a preconfigured S3 client. This can be used for testing the outside of the Lambda environment. - /// - /// - public CustomConfigHandler(IAmazonConfigService configService) - { - this.ConfigService = configService; - } - - /// - /// This method is called for every Lambda invocation. This method takes in an Config event object and can be used - /// to respond to Config notifications. - /// - /// - /// - /// Nothing - public async Task FunctionHandler(ConfigEvent evnt, ILambdaContext context) - { - Console.WriteLine("inside function handler..."); - Amazon.RegionEndpoint region = Amazon.RegionEndpoint.GetBySystemName(System.Environment.GetEnvironmentVariable(AWS_REGION_PROPERTY)); - AmazonConfigServiceClient configServiceClient = new AmazonConfigServiceClient(region); - await DoHandle(evnt, context, configServiceClient); - } - - private async Task DoHandle(ConfigEvent configEvent, ILambdaContext context, AmazonConfigServiceClient configServiceClient) - { - JObject ruleParamsObj; - JObject configItem; - - if (configEvent.RuleParameters != null){ - ruleParamsObj = JObject.Parse(configEvent.RuleParameters.ToString()); - } else { - ruleParamsObj = new JObject(); - } - - JObject invokingEventObj = JObject.Parse(configEvent.InvokingEvent.ToString()); - if(invokingEventObj["configurationItem"] != null){ - configItem = JObject.Parse(invokingEventObj[CONFIGURATION_ITEM].ToString()); - } else { - configItem = new JObject(); - } - - FailForIncompatibleEventTypes(invokingEventObj); - ComplianceType myCompliance = ComplianceType.NOT_APPLICABLE; - - if (!IsEventNotApplicable(configItem, configEvent.EventLeftScope)) - { - myCompliance = RuleCode.EvaluateCompliance(invokingEventObj, ruleParamsObj, context); - } - - // Associates the evaluation result with the AWS account published in the event. - Evaluation evaluation = new Evaluation { - ComplianceResourceId = GetResourceId(configItem), - ComplianceResourceType = GetResourceType(configItem), - OrderingTimestamp = GetCiCapturedTime(configItem), - ComplianceType = myCompliance - }; - - await DoPutEvaluations(configServiceClient, configEvent, evaluation); - } - - private String GetResourceType(JObject configItem) - { - return (String) configItem[RESOURCE_TYPE]; - } - - private void FailForIncompatibleEventTypes(JObject invokingEventObj) - { - String messageType = (String) invokingEventObj[MESSAGE_TYPE_PROPERTY]; - if (!IsCompatibleMessageType(messageType)) - { - throw new Exception(String.Format("Events with the message type '{0}' are not evaluated for this Config rule.", messageType)); - } - } - - private String GetResourceId(JObject configItem) - { - return (String) configItem[RESOURCE_ID]; - } - - private DateTime GetCiCapturedTime(JObject configItem) - { - return DateTime.Parse((String) configItem[CAPTURE_TIME_PATH]); - } - - private bool IsCompatibleMessageType(String messageType) - { - return String.Equals(MessageType.ConfigurationItemChangeNotification.ToString(), messageType); - } - - private bool IsEventNotApplicable(JObject configItem, bool eventLeftScope) - { - String status = configItem[STATUS_PATH].ToString(); - return (IsStatusNotApplicable(status) || eventLeftScope); - } - - private bool IsStatusNotApplicable(String status) - { - return String.Equals(RESOURCE_DELETED, status) - || String.Equals(RESOURCE_DELETED_NOT_RECORDED, status) - || String.Equals(RESOURCE_NOT_RECORDED, status); - } - - // Sends the evaluation results to AWS Config. - private async Task DoPutEvaluations(AmazonConfigServiceClient configClient, ConfigEvent configEvent, Evaluation evaluation) - { - Console.WriteLine("inside DoPutEvaluations..."); - PutEvaluationsRequest req = new PutEvaluationsRequest(); - req.Evaluations.Add(evaluation); - req.ResultToken = configEvent.ResultToken; - - - Task taskResp = configClient.PutEvaluationsAsync(req); - PutEvaluationsResponse response = await taskResp; - - // Ends the function execution if any evaluation results are not successfully reported. - if (response.FailedEvaluations.Count > 0) { - throw new Exception(String.Format( - "The following evaluations were not successfully reported to AWS Config: %s", - response.FailedEvaluations)); - } - } - - private DateTime GetDate(String dateString) - { - return DateTime.Parse(dateString, null, System.Globalization.DateTimeStyles.RoundtripKind); - } - - static void Main(string[] args) - { - Console.WriteLine("Hello World!"); - } - } -} diff --git a/rdk/template/runtime/dotnetcore1.0/RuleCode.cs b/rdk/template/runtime/dotnetcore1.0/RuleCode.cs deleted file mode 100755 index 4d376498..00000000 --- a/rdk/template/runtime/dotnetcore1.0/RuleCode.cs +++ /dev/null @@ -1,27 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Lambda.Core; -using Amazon.Lambda.Model; -using Amazon.Lambda.ConfigEvents; -using Newtonsoft.Json.Linq; - -namespace Rdk -{ - class RuleCode - { - public static ComplianceType EvaluateCompliance(JObject invokingEvent, JObject ruleParameters, ILambdaContext context) - { - context.Logger.LogLine("Beginning Custom Config Rule Evaluation"); - - /* - YOUR CODE GOES HERE! - */ - - return ComplianceType.NON_COMPLIANT; - } - } -} diff --git a/rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json b/rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json deleted file mode 100755 index 7cf6db07..00000000 --- a/rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Information": [ - "This file provides default values for the deployment wizard inside Visual Studio and the AWS Lambda commands added to the .NET Core CLI.", - "To learn more about the Lambda commands with the .NET Core CLI execute the following command at the command line in the project root directory.", - - "dotnet lambda help", - - "All the command line options for the Lambda command can be specified in this file." - ], - - "profile":"default", - "region" : "us-west-2", - "configuration": "Release", - "framework": "netcoreapp1.0", - "function-runtime": "dotnetcore1.0", - "function-memory-size": 256, - "function-timeout": 30, - "function-handler": "csharp7.0::Rdk.CustomConfigHandler::FunctionHandler" -} diff --git a/rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj b/rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj deleted file mode 100644 index 08059c59..00000000 --- a/rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj +++ /dev/null @@ -1,28 +0,0 @@ - - - - netcoreapp1.0 - - - Exe - - - - - - - - - - - - - - - - - - - - - diff --git a/rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs b/rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs deleted file mode 100644 index d5fefc36..00000000 --- a/rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs +++ /dev/null @@ -1,189 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.IO; -using System.Text; - -using System.Threading.Tasks; - -using Amazon.Lambda.Serialization.Json; -using Amazon.Lambda.Core; - -using Amazon.Lambda.ConfigEvents; -using Amazon.CloudWatchEvents; -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Runtime; -using Amazon.Lambda.Model; -using Newtonsoft.Json.Linq; - -// Assembly attribute to enable the Lambda function's JSON input to be converted into a .NET class. -[assembly: LambdaSerializer(typeof(Amazon.Lambda.Serialization.Json.JsonSerializer))] - -namespace Rdk -{ - public class CustomConfigHandler - { - public const String AWS_REGION_PROPERTY = "AWS_DEFAULT_REGION"; - public const String MESSAGE_TYPE_PROPERTY = "messageType"; - public const String HOST_ID = "hostId"; - public const String PLACEMENT = "placement"; - public const String CONFIGURATION = "configuration"; - public const String IMAGE_ID = "imageId"; - public const String STATUS_PATH = "configurationItemStatus"; - public const String TENANCY = "tenancy"; - public const String RESOURCE_DELETED = "ResourceDeleted"; - public const String RESOURCE_DELETED_NOT_RECORDED = "ResourceDeletedNotRecorded"; - public const String CAPTURE_TIME_PATH = "configurationItemCaptureTime"; - public const String CONFIGURATION_ITEM = "configurationItem"; - public const String RESOURCE_ID = "resourceId"; - public const String RESOURCE_NOT_RECORDED = "ResourceNotRecorded"; - public const String RESOURCE_TYPE = "resourceType"; - - - IAmazonConfigService ConfigService { get; set; } - - /// - /// Default constructor. This constructor is used by Lambda to construct the instance. When invoked in a Lambda environment - /// the AWS credentials will come from the IAM role associated with the function and the AWS region will be set to the - /// region the Lambda function is executed in. - /// - public CustomConfigHandler() - { - Console.WriteLine("inside constructor..."); - } - - /// - /// Constructs an instance with a preconfigured S3 client. This can be used for testing the outside of the Lambda environment. - /// - /// - public CustomConfigHandler(IAmazonConfigService configService) - { - this.ConfigService = configService; - } - - /// - /// This method is called for every Lambda invocation. This method takes in an Config event object and can be used - /// to respond to Config notifications. - /// - /// - /// - /// Nothing - public async Task FunctionHandler(ConfigEvent evnt, ILambdaContext context) - { - Console.WriteLine("inside function handler..."); - Amazon.RegionEndpoint region = Amazon.RegionEndpoint.GetBySystemName(System.Environment.GetEnvironmentVariable(AWS_REGION_PROPERTY)); - AmazonConfigServiceClient configServiceClient = new AmazonConfigServiceClient(region); - await DoHandle(evnt, context, configServiceClient); - } - - private async Task DoHandle(ConfigEvent configEvent, ILambdaContext context, AmazonConfigServiceClient configServiceClient) - { - JObject ruleParamsObj; - JObject configItem; - - if (configEvent.RuleParameters != null){ - ruleParamsObj = JObject.Parse(configEvent.RuleParameters.ToString()); - } else { - ruleParamsObj = new JObject(); - } - - JObject invokingEventObj = JObject.Parse(configEvent.InvokingEvent.ToString()); - if(invokingEventObj["configurationItem"] != null){ - configItem = JObject.Parse(invokingEventObj[CONFIGURATION_ITEM].ToString()); - } else { - configItem = new JObject(); - } - - FailForIncompatibleEventTypes(invokingEventObj); - ComplianceType myCompliance = ComplianceType.NOT_APPLICABLE; - - if (!IsEventNotApplicable(configItem, configEvent.EventLeftScope)) - { - myCompliance = RuleCode.EvaluateCompliance(invokingEventObj, ruleParamsObj, context); - } - - // Associates the evaluation result with the AWS account published in the event. - Evaluation evaluation = new Evaluation { - ComplianceResourceId = GetResourceId(configItem), - ComplianceResourceType = GetResourceType(configItem), - OrderingTimestamp = GetCiCapturedTime(configItem), - ComplianceType = myCompliance - }; - - await DoPutEvaluations(configServiceClient, configEvent, evaluation); - } - - private String GetResourceType(JObject configItem) - { - return (String) configItem[RESOURCE_TYPE]; - } - - private void FailForIncompatibleEventTypes(JObject invokingEventObj) - { - String messageType = (String) invokingEventObj[MESSAGE_TYPE_PROPERTY]; - if (!IsCompatibleMessageType(messageType)) - { - throw new Exception(String.Format("Events with the message type '{0}' are not evaluated for this Config rule.", messageType)); - } - } - - private String GetResourceId(JObject configItem) - { - return (String) configItem[RESOURCE_ID]; - } - - private DateTime GetCiCapturedTime(JObject configItem) - { - return DateTime.Parse((String) configItem[CAPTURE_TIME_PATH]); - } - - private bool IsCompatibleMessageType(String messageType) - { - return String.Equals(MessageType.ConfigurationItemChangeNotification.ToString(), messageType); - } - - private bool IsEventNotApplicable(JObject configItem, bool eventLeftScope) - { - String status = configItem[STATUS_PATH].ToString(); - return (IsStatusNotApplicable(status) || eventLeftScope); - } - - private bool IsStatusNotApplicable(String status) - { - return String.Equals(RESOURCE_DELETED, status) - || String.Equals(RESOURCE_DELETED_NOT_RECORDED, status) - || String.Equals(RESOURCE_NOT_RECORDED, status); - } - - // Sends the evaluation results to AWS Config. - private async Task DoPutEvaluations(AmazonConfigServiceClient configClient, ConfigEvent configEvent, Evaluation evaluation) - { - Console.WriteLine("inside DoPutEvaluations..."); - PutEvaluationsRequest req = new PutEvaluationsRequest(); - req.Evaluations.Add(evaluation); - req.ResultToken = configEvent.ResultToken; - - - Task taskResp = configClient.PutEvaluationsAsync(req); - PutEvaluationsResponse response = await taskResp; - - // Ends the function execution if any evaluation results are not successfully reported. - if (response.FailedEvaluations.Count > 0) { - throw new Exception(String.Format( - "The following evaluations were not successfully reported to AWS Config: %s", - response.FailedEvaluations)); - } - } - - private DateTime GetDate(String dateString) - { - return DateTime.Parse(dateString, null, System.Globalization.DateTimeStyles.RoundtripKind); - } - - static void Main(string[] args) - { - Console.WriteLine("Hello World!"); - } - } -} diff --git a/rdk/template/runtime/dotnetcore2.0/RuleCode.cs b/rdk/template/runtime/dotnetcore2.0/RuleCode.cs deleted file mode 100644 index 4d376498..00000000 --- a/rdk/template/runtime/dotnetcore2.0/RuleCode.cs +++ /dev/null @@ -1,27 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Lambda.Core; -using Amazon.Lambda.Model; -using Amazon.Lambda.ConfigEvents; -using Newtonsoft.Json.Linq; - -namespace Rdk -{ - class RuleCode - { - public static ComplianceType EvaluateCompliance(JObject invokingEvent, JObject ruleParameters, ILambdaContext context) - { - context.Logger.LogLine("Beginning Custom Config Rule Evaluation"); - - /* - YOUR CODE GOES HERE! - */ - - return ComplianceType.NON_COMPLIANT; - } - } -} diff --git a/rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json b/rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json deleted file mode 100644 index 7cf6db07..00000000 --- a/rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Information": [ - "This file provides default values for the deployment wizard inside Visual Studio and the AWS Lambda commands added to the .NET Core CLI.", - "To learn more about the Lambda commands with the .NET Core CLI execute the following command at the command line in the project root directory.", - - "dotnet lambda help", - - "All the command line options for the Lambda command can be specified in this file." - ], - - "profile":"default", - "region" : "us-west-2", - "configuration": "Release", - "framework": "netcoreapp1.0", - "function-runtime": "dotnetcore1.0", - "function-memory-size": 256, - "function-timeout": 30, - "function-handler": "csharp7.0::Rdk.CustomConfigHandler::FunctionHandler" -} diff --git a/rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj b/rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj deleted file mode 100644 index dfbe6d79..00000000 --- a/rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj +++ /dev/null @@ -1,28 +0,0 @@ - - - - netcoreapp2.0 - - - Exe - - - - - - - - - - - - - - - - - - - - - diff --git a/rdk/template/runtime/nodejs4.3/rule_code.js b/rdk/template/runtime/nodejs4.3/rule_code.js deleted file mode 100644 index a8840f66..00000000 --- a/rdk/template/runtime/nodejs4.3/rule_code.js +++ /dev/null @@ -1,183 +0,0 @@ -'use strict'; - -const aws = require('aws-sdk'); - -const config = new aws.ConfigService(); - -function evaluateCompliance(configurationItem, ruleParameters, callback) { - - /* - ############################### - # Add your custom logic here. # - ############################### - */ - - callback('NOT_APPLICABLE'); -} - -//Boilerplate Code - You should not need to change anything below this comment. -function rule_handler(event, context, callback) { - //console.info(event); - const invokingEvent = JSON.parse(event.invokingEvent); - const configItem = invokingEvent.configurationItem; - const ruleParameters = JSON.parse(event.ruleParameters); - evaluateCompliance(configItem, ruleParameters, function(results){ - console.log(results); - callback(null, results); - }); -} - -// Helper function used to validate input -function checkDefined(reference, referenceName) { - if (!reference) { - throw new Error(`Error: ${referenceName} is not defined`); - } - return reference; -} - -// Check whether the message is OversizedConfigurationItemChangeNotification or not -function isOverSizedChangeNotification(messageType) { - checkDefined(messageType, 'messageType'); - return messageType === 'OversizedConfigurationItemChangeNotification'; -} - -// Check whether the message is a ScheduledNotification or not -function isScheduledNotification(messageType) { - checkDefined(messageType, 'messageType'); - return messageType === 'ScheduledNotification' -} - -// Get configurationItem using getResourceConfigHistory API. -function getConfiguration(resourceType, resourceId, configurationCaptureTime, callback) { - config.getResourceConfigHistory({ resourceType, resourceId, laterTime: new Date(configurationCaptureTime), limit: 1 }, (err, data) => { - if (err) { - callback(err, null); - } - const configurationItem = data.configurationItems[0]; - callback(null, configurationItem); - }); -} - -// Convert from the API model to the original invocation model -/*eslint no-param-reassign: ["error", { "props": false }]*/ -function convertApiConfiguration(apiConfiguration) { - apiConfiguration.awsAccountId = apiConfiguration.accountId; - apiConfiguration.ARN = apiConfiguration.arn; - apiConfiguration.configurationStateMd5Hash = apiConfiguration.configurationItemMD5Hash; - apiConfiguration.configurationItemVersion = apiConfiguration.version; - apiConfiguration.configuration = JSON.parse(apiConfiguration.configuration); - if ({}.hasOwnProperty.call(apiConfiguration, 'relationships')) { - for (let i = 0; i < apiConfiguration.relationships.length; i++) { - apiConfiguration.relationships[i].name = apiConfiguration.relationships[i].relationshipName; - } - } - return apiConfiguration; -} - -// Based on the type of message get the configuration item either from configurationItem in the invoking event or using the getResourceConfigHistory API in getConfiguration function. -function getConfigurationItem(invokingEvent, callback) { - checkDefined(invokingEvent, 'invokingEvent'); - if (isOverSizedChangeNotification(invokingEvent.messageType)) { - const configurationItemSummary = checkDefined(invokingEvent.configurationItemSummary, 'configurationItemSummary'); - getConfiguration(configurationItemSummary.resourceType, configurationItemSummary.resourceId, configurationItemSummary.configurationItemCaptureTime, (err, apiConfigurationItem) => { - if (err) { - callback(err); - } - const configurationItem = convertApiConfiguration(apiConfigurationItem); - callback(null, configurationItem); - }); - } else if (isScheduledNotification(invokingEvent.messageType)) { - callback(null, null) - } else { - checkDefined(invokingEvent.configurationItem, 'configurationItem'); - callback(null, invokingEvent.configurationItem); - } -} - -// Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. -function isApplicable(configurationItem, event) { - //checkDefined(configurationItem, 'configurationItem'); - checkDefined(event, 'event'); - //const status = configurationItem.configurationItemStatus; - const eventLeftScope = event.eventLeftScope; - //return (status === 'OK' || status === 'ResourceDiscovered') && eventLeftScope === false; - return (eventLeftScope === false); -} - -// This is the handler that's invoked by Lambda -// Most of this code is boilerplate; use as is -exports.lambda_handler = function(event, context, callback) { - checkDefined(event, 'event'); - const invokingEvent = JSON.parse(event.invokingEvent); - const ruleParameters = JSON.parse(event.ruleParameters); - getConfigurationItem(invokingEvent, (err, configurationItem) => { - if (err) { - callback(err); - } - //let compliance = 'NOT_APPLICABLE'; - if (isApplicable(configurationItem, event)) { - invokingEvent.configurationItem = configurationItem; - event.invokingEvent = JSON.stringify(invokingEvent); - rule_handler(event, context, (err, compliance_results) => { - if (err) { - callback(err); - } - //compliance = computedCompliance; - var putEvaluationsRequest = {}; - - // Put together the request that reports the evaluation status - if (typeof compliance_results === 'string' || compliance_results instanceof String){ - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: compliance_results, - OrderingTimestamp: configurationItem.configurationItemCaptureTime - } - ]; - } else if (compliance_results instanceof Array) { - putEvaluationsRequest.Evaluations = []; - - var fields = ['ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp']; - - for (var i = 0; i < compliance_results.length; i++) { - var missing_fields = false; - for (var j = 0; j < fields.length; j++) { - if (!compliance_results[i].hasOwnProperty(fields[j])) { - console.info("Missing " + fields[j] + " from custom evaluation."); - missing_fields = true; - } - } - - if (!missing_fields){ - putEvaluationsRequest.Evaluations.push(compliance_results[i]); - } - } - } else { - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: 'INSUFFICIENT_DATA', - OrderingTimestamp: configurationItem.configurationItemCaptureTime - } - ]; - } - - putEvaluationsRequest.ResultToken = event.resultToken; - - // Invoke the Config API to report the result of the evaluation - config.putEvaluations(putEvaluationsRequest, (error, data) => { - if (error) { - callback(error, null); - } else if (data.FailedEvaluations.length > 0) { - // Ends the function execution if any evaluation results are not successfully reported. - callback(JSON.stringify(data), null); - } else { - callback(null, data); - } - }); - }); - } - }); -}; diff --git a/rdk/template/runtime/nodejs6.10/rule_code.js b/rdk/template/runtime/nodejs6.10/rule_code.js deleted file mode 100644 index bb90065a..00000000 --- a/rdk/template/runtime/nodejs6.10/rule_code.js +++ /dev/null @@ -1,215 +0,0 @@ -"use strict"; - -const aws = require("aws-sdk"); - -const config = new aws.ConfigService(); - -function evaluateCompliance(configurationItem, ruleParameters, callback) { - /* - ############################### - # Add your custom logic here. # - ############################### - */ - - callback("NOT_APPLICABLE"); -} - -//Boilerplate Code - You should not need to change anything below this comment. -function rule_handler(event, context, callback) { - //console.info(event); - const invokingEvent = JSON.parse(event.invokingEvent); - const configItem = invokingEvent.configurationItem; - const ruleParameters = JSON.parse(event.ruleParameters); - evaluateCompliance(configItem, ruleParameters, function (results) { - console.log(results); - callback(null, results); - }); -} - -// Helper function used to validate input -function checkDefined(reference, referenceName) { - if (!reference) { - throw new Error(`Error: ${referenceName} is not defined`); - } - return reference; -} - -// Check whether the message is OversizedConfigurationItemChangeNotification or not -function isOverSizedChangeNotification(messageType) { - checkDefined(messageType, "messageType"); - return messageType === "OversizedConfigurationItemChangeNotification"; -} - -// Check whether the message is a ScheduledNotification or not -function isScheduledNotification(messageType) { - checkDefined(messageType, "messageType"); - return messageType === "ScheduledNotification"; -} - -// Get configurationItem using getResourceConfigHistory API. -function getConfiguration( - resourceType, - resourceId, - configurationCaptureTime, - callback -) { - config.getResourceConfigHistory( - { - resourceType, - resourceId, - laterTime: new Date(configurationCaptureTime), - limit: 1, - }, - (err, data) => { - if (err) { - callback(err, null); - } - const configurationItem = data.configurationItems[0]; - callback(null, configurationItem); - } - ); -} - -// Convert from the API model to the original invocation model -/*eslint no-param-reassign: ["error", { "props": false }]*/ -function convertApiConfiguration(apiConfiguration) { - apiConfiguration.awsAccountId = apiConfiguration.accountId; - apiConfiguration.ARN = apiConfiguration.arn; - apiConfiguration.configurationStateMd5Hash = - apiConfiguration.configurationItemMD5Hash; - apiConfiguration.configurationItemVersion = apiConfiguration.version; - apiConfiguration.configuration = JSON.parse(apiConfiguration.configuration); - if ({}.hasOwnProperty.call(apiConfiguration, "relationships")) { - for (let i = 0; i < apiConfiguration.relationships.length; i++) { - apiConfiguration.relationships[i].name = - apiConfiguration.relationships[i].relationshipName; - } - } - return apiConfiguration; -} - -// Based on the type of message get the configuration item either from configurationItem in the invoking event or using the getResourceConfigHistory API in getConfiguration function. -function getConfigurationItem(invokingEvent, callback) { - checkDefined(invokingEvent, "invokingEvent"); - if (isOverSizedChangeNotification(invokingEvent.messageType)) { - const configurationItemSummary = checkDefined( - invokingEvent.configurationItemSummary, - "configurationItemSummary" - ); - getConfiguration( - configurationItemSummary.resourceType, - configurationItemSummary.resourceId, - configurationItemSummary.configurationItemCaptureTime, - (err, apiConfigurationItem) => { - if (err) { - callback(err); - } - const configurationItem = convertApiConfiguration(apiConfigurationItem); - callback(null, configurationItem); - } - ); - } else if (isScheduledNotification(invokingEvent.messageType)) { - callback(null, null); - } else { - checkDefined(invokingEvent.configurationItem, "configurationItem"); - callback(null, invokingEvent.configurationItem); - } -} - -// Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. -function isApplicable(configurationItem, event) { - //checkDefined(configurationItem, 'configurationItem'); - checkDefined(event, "event"); - //const status = configurationItem.configurationItemStatus; - const eventLeftScope = event.eventLeftScope; - //return (status === 'OK' || status === 'ResourceDiscovered') && eventLeftScope === false; - return eventLeftScope === false; -} - -// This is the handler that's invoked by Lambda -// Most of this code is boilerplate; use as is -exports.lambda_handler = function (event, context, callback) { - checkDefined(event, "event"); - const invokingEvent = JSON.parse(event.invokingEvent); - const ruleParameters = JSON.parse(event.ruleParameters); - getConfigurationItem(invokingEvent, (err, configurationItem) => { - if (err) { - callback(err); - } - //let compliance = 'NOT_APPLICABLE'; - if (isApplicable(configurationItem, event)) { - invokingEvent.configurationItem = configurationItem; - event.invokingEvent = JSON.stringify(invokingEvent); - rule_handler(event, context, (err, compliance_results) => { - if (err) { - callback(err); - } - //compliance = computedCompliance; - var putEvaluationsRequest = {}; - - // Put together the request that reports the evaluation status - if ( - typeof compliance_results === "string" || - compliance_results instanceof String - ) { - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: compliance_results, - OrderingTimestamp: configurationItem.configurationItemCaptureTime, - }, - ]; - } else if (compliance_results instanceof Array) { - putEvaluationsRequest.Evaluations = []; - - var fields = [ - "ComplianceResourceType", - "ComplianceResourceId", - "ComplianceType", - "OrderingTimestamp", - ]; - - for (var i = 0; i < compliance_results.length; i++) { - var missing_fields = false; - for (var j = 0; j < fields.length; j++) { - if (!compliance_results[i].hasOwnProperty(fields[j])) { - console.info( - "Missing " + fields[j] + " from custom evaluation." - ); - missing_fields = true; - } - } - - if (!missing_fields) { - putEvaluationsRequest.Evaluations.push(compliance_results[i]); - } - } - } else { - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: "INSUFFICIENT_DATA", - OrderingTimestamp: configurationItem.configurationItemCaptureTime, - }, - ]; - } - - putEvaluationsRequest.ResultToken = event.resultToken; - - // Invoke the Config API to report the result of the evaluation - config.putEvaluations(putEvaluationsRequest, (error, data) => { - if (error) { - callback(error, null); - } else if (data.FailedEvaluations.length > 0) { - // Ends the function execution if any evaluation results are not successfully reported. - callback(JSON.stringify(data), null); - } else { - callback(null, data); - } - }); - }); - } - }); -}; diff --git a/rdk/template/runtime/python3.10-lib/rule_code.py b/rdk/template/runtime/python3.10-lib/rule_code.py new file mode 100644 index 00000000..90fdebe2 --- /dev/null +++ b/rdk/template/runtime/python3.10-lib/rule_code.py @@ -0,0 +1,25 @@ +from rdklib import Evaluator, Evaluation, ConfigRule, ComplianceType +<%ApplicableResources1%> +class <%RuleName%>(ConfigRule): + def evaluate_change(self, event, client_factory, configuration_item, valid_rule_parameters): + ############################### + # Add your custom logic here. # + ############################### + + return [Evaluation(ComplianceType.NOT_APPLICABLE)] + + #def evaluate_periodic(self, event, client_factory, valid_rule_parameters): + # pass + + def evaluate_parameters(self, rule_parameters): + valid_rule_parameters = rule_parameters + return valid_rule_parameters + + +################################ +# DO NOT MODIFY ANYTHING BELOW # +################################ +def lambda_handler(event, context): + my_rule = <%RuleName%>() + evaluator = Evaluator(my_rule<%ApplicableResources2%>) + return evaluator.handle(event, context) diff --git a/rdk/template/runtime/python3.10-lib/rule_test.py b/rdk/template/runtime/python3.10-lib/rule_test.py new file mode 100644 index 00000000..db0cf30c --- /dev/null +++ b/rdk/template/runtime/python3.10-lib/rule_test.py @@ -0,0 +1,157 @@ +import datetime +import json +import logging +import unittest +from unittest.mock import patch, MagicMock +from botocore.exceptions import ClientError +from rdklib import Evaluation, ComplianceType +import rdklibtest + +############## +# Parameters # +############## + +# Define the default resource to report to Config Rules +# TODO - Replace with your resource type +RESOURCE_TYPE = "AWS::IAM::Role" + +############# +# Main Code # +############# + +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() + +CLIENT_FACTORY = MagicMock() + +# example for mocking IAM API calls +IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() + + +def mock_get_client(client_name, *args, **kwargs): + if client_name == "iam": + return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK + raise Exception("Attempting to create an unknown client") + + +@patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) +class ComplianceTest(unittest.TestCase): + rule_parameters = { + "SomeParameterKey": "SomeParameterValue", + "SomeParameterKey2": "SomeParameterValue2", + } + + role_sample_configuration_abridged = {"arn": "some-arn", "roleName": "testrole"} + + invoking_event_iam_role_sample = { + "configurationItem": { + "relatedEvents": [], + "relationships": [], + "configuration": role_sample_configuration_abridged, + "tags": {}, + "configurationItemCaptureTime": "2018-07-02T03:37:52.418Z", + "awsAccountId": "123456789012", + "configurationItemStatus": "ResourceDiscovered", + "resourceType": "AWS::IAM::Role", + "resourceId": "some-resource-id", + "resourceName": "some-resource-name", + "ARN": "some-arn", + }, + "notificationCreationTime": "2018-07-02T23:05:34.445Z", + "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", + } + + list_roles_response = { + "Roles": [ + { + "Path": "/", + "RoleName": "testrole", + "RoleId": "some-role-id", + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), + "Description": "this is a test role", + "MaxSessionDuration": 123, + "Tags": [ + {"Key": "one_tag", "Value": "its_value"}, + ], + "RoleLastUsed": { + "LastUsedDate": datetime.datetime(2015, 1, 1), + "Region": "us-east-1", + }, + }, + ] + } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} + + def setUp(self): + STS_CLIENT_MOCK.reset_mock() + + def test_sample(self): + self.assertTrue(True) + + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/rdk/template/runtime/python3.10/rule_code.py b/rdk/template/runtime/python3.10/rule_code.py new file mode 100644 index 00000000..682297b0 --- /dev/null +++ b/rdk/template/runtime/python3.10/rule_code.py @@ -0,0 +1,437 @@ +import json +import sys +import datetime +import boto3 +import botocore + +try: + import liblogging +except ImportError: + pass + +############## +# Parameters # +############## + +# Define the default resource to report to Config Rules +DEFAULT_RESOURCE_TYPE = "AWS::::Account" + +# Set to True to get the lambda to assume the Role attached on the Config Service (useful for cross-account). +ASSUME_ROLE_MODE = False + +# Other parameters (no change needed) +CONFIG_ROLE_TIMEOUT_SECONDS = 900 + +############# +# Main Code # +############# + + +def evaluate_compliance(event, configuration_item, valid_rule_parameters): + """Form the evaluation(s) to be return to Config Rules + + Return either: + None -- when no result needs to be displayed + a string -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE + a dictionary -- the evaluation dictionary, usually built by build_evaluation_from_config_item() + a list of dictionary -- a list of evaluation dictionary , usually built by build_evaluation() + + Keyword arguments: + event -- the event variable given in the lambda handler + configuration_item -- the configurationItem dictionary in the invokingEvent + valid_rule_parameters -- the output of the evaluate_parameters() representing validated parameters of the Config Rule + + Advanced Notes: + 1 -- if a resource is deleted and generate a configuration change with ResourceDeleted status, the Boilerplate code will put a NOT_APPLICABLE on this resource automatically. + 2 -- if a None or a list of dictionary is returned, the old evaluation(s) which are not returned in the new evaluation list are returned as NOT_APPLICABLE by the Boilerplate code + 3 -- if None or an empty string, list or dict is returned, the Boilerplate code will put a "shadow" evaluation to feedback that the evaluation took place properly + """ + + ############################### + # Add your custom logic here. # + ############################### + + return "NOT_APPLICABLE" + + +def evaluate_parameters(rule_parameters): + """Evaluate the rule parameters dictionary validity. Raise a ValueError for invalid parameters. + + Return: + anything suitable for the evaluate_compliance() + + Keyword arguments: + rule_parameters -- the Key/Value dictionary of the Config Rules parameters + """ + valid_rule_parameters = rule_parameters + return valid_rule_parameters + + +#################### +# Helper Functions # +#################### + +# Build an error to be displayed in the logs when the parameter is invalid. +def build_parameters_value_error_response(ex): + """Return an error dictionary when the evaluate_parameters() raises a ValueError. + + Keyword arguments: + ex -- Exception text + """ + return build_error_response( + internal_error_message="Parameter value is invalid", + internal_error_details="An ValueError was raised during the validation of the Parameter value", + customer_error_code="InvalidParameterValueException", + customer_error_message=str(ex), + ) + + +# This gets the client after assuming the Config service role +# either in the same AWS account or cross-account. +def get_client(service, event, region=None): + """Return the service boto client. It should be used instead of directly calling the client. + + Keyword arguments: + service -- the service name used for calling the boto.client() + event -- the event variable given in the lambda handler + region -- the region where the client is called (default: None) + """ + if not ASSUME_ROLE_MODE: + return boto3.client(service, region) + credentials = get_assume_role_credentials(get_execution_role_arn(event), region) + return boto3.client( + service, + aws_access_key_id=credentials["AccessKeyId"], + aws_secret_access_key=credentials["SecretAccessKey"], + aws_session_token=credentials["SessionToken"], + region_name=region, + ) + + +# This generates an evaluation for config +def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None): + """Form an evaluation as a dictionary. Usually suited to report on scheduled rules. + + Keyword arguments: + resource_id -- the unique id of the resource to report + compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE + event -- the event variable given in the lambda handler + resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule (default DEFAULT_RESOURCE_TYPE) + annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer. + """ + eval_cc = {} + if annotation: + eval_cc["Annotation"] = build_annotation(annotation) + eval_cc["ComplianceResourceType"] = resource_type + eval_cc["ComplianceResourceId"] = resource_id + eval_cc["ComplianceType"] = compliance_type + eval_cc["OrderingTimestamp"] = str(json.loads(event["invokingEvent"])["notificationCreationTime"]) + return eval_cc + + +def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None): + """Form an evaluation as a dictionary. Usually suited to report on configuration change rules. + + Keyword arguments: + configuration_item -- the configurationItem dictionary in the invokingEvent + compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE + annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer. + """ + eval_ci = {} + if annotation: + eval_ci["Annotation"] = build_annotation(annotation) + eval_ci["ComplianceResourceType"] = configuration_item["resourceType"] + eval_ci["ComplianceResourceId"] = configuration_item["resourceId"] + eval_ci["ComplianceType"] = compliance_type + eval_ci["OrderingTimestamp"] = configuration_item["configurationItemCaptureTime"] + return eval_ci + + +#################### +# Boilerplate Code # +#################### + +# Get execution role for Lambda function +def get_execution_role_arn(event): + role_arn = None + if "ruleParameters" in event: + rule_params = json.loads(event["ruleParameters"]) + role_name = rule_params.get("ExecutionRoleName") + if role_name: + execution_role_prefix = event["executionRoleArn"].split("/")[0] + role_arn = "{}/{}".format(execution_role_prefix, role_name) + + if not role_arn: + role_arn = event["executionRoleArn"] + + return role_arn + + +# Build annotation within Service constraints +def build_annotation(annotation_string): + if len(annotation_string) > 256: + return annotation_string[:244] + " [truncated]" + return annotation_string + + +# Helper function used to validate input +def check_defined(reference, reference_name): + if not reference: + raise Exception("Error: ", reference_name, "is not defined") + return reference + + +# Check whether the message is OversizedConfigurationItemChangeNotification or not +def is_oversized_changed_notification(message_type): + check_defined(message_type, "messageType") + return message_type == "OversizedConfigurationItemChangeNotification" + + +# Check whether the message is a ScheduledNotification or not. +def is_scheduled_notification(message_type): + check_defined(message_type, "messageType") + return message_type == "ScheduledNotification" + + +# Get configurationItem using getResourceConfigHistory API +# in case of OversizedConfigurationItemChangeNotification +def get_configuration(resource_type, resource_id, configuration_capture_time): + result = AWS_CONFIG_CLIENT.get_resource_config_history( + resourceType=resource_type, resourceId=resource_id, laterTime=configuration_capture_time, limit=1 + ) + configuration_item = result["configurationItems"][0] + return convert_api_configuration(configuration_item) + + +# Convert from the API model to the original invocation model +def convert_api_configuration(configuration_item): + for k, v in configuration_item.items(): + if isinstance(v, datetime.datetime): + configuration_item[k] = str(v) + configuration_item["awsAccountId"] = configuration_item["accountId"] + configuration_item["ARN"] = configuration_item["arn"] + configuration_item["configurationStateMd5Hash"] = configuration_item["configurationItemMD5Hash"] + configuration_item["configurationItemVersion"] = configuration_item["version"] + configuration_item["configuration"] = json.loads(configuration_item["configuration"]) + if "relationships" in configuration_item: + for i in range(len(configuration_item["relationships"])): + configuration_item["relationships"][i]["name"] = configuration_item["relationships"][i]["relationshipName"] + return configuration_item + + +# Based on the type of message get the configuration item +# either from configurationItem in the invoking event +# or using the getResourceConfigHistory API in getConfiguration function. +def get_configuration_item(invoking_event): + check_defined(invoking_event, "invokingEvent") + if is_oversized_changed_notification(invoking_event["messageType"]): + configuration_item_summary = check_defined( + invoking_event["configurationItemSummary"], "configurationItemSummary" + ) + return get_configuration( + configuration_item_summary["resourceType"], + configuration_item_summary["resourceId"], + configuration_item_summary["configurationItemCaptureTime"], + ) + if is_scheduled_notification(invoking_event["messageType"]): + return None + return check_defined(invoking_event["configurationItem"], "configurationItem") + + +# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. +def is_applicable(configuration_item, event): + try: + check_defined(configuration_item, "configurationItem") + check_defined(event, "event") + except: + return True + status = configuration_item["configurationItemStatus"] + event_left_scope = event["eventLeftScope"] + if status == "ResourceDeleted": + print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.") + + return status in ("OK", "ResourceDiscovered") and not event_left_scope + + +def get_assume_role_credentials(role_arn, region=None): + sts_client = boto3.client("sts", region) + try: + assume_role_response = sts_client.assume_role( + RoleArn=role_arn, RoleSessionName="configLambdaExecution", DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS + ) + if "liblogging" in sys.modules: + liblogging.logSession(role_arn, assume_role_response) + return assume_role_response["Credentials"] + except botocore.exceptions.ClientError as ex: + # Scrub error message for any internal account info leaks + print(str(ex)) + if "AccessDenied" in ex.response["Error"]["Code"]: + ex.response["Error"]["Message"] = "AWS Config does not have permission to assume the IAM role." + else: + ex.response["Error"]["Message"] = "InternalError" + ex.response["Error"]["Code"] = "InternalError" + raise ex + + +# This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account). +def clean_up_old_evaluations(latest_evaluations, event): + + cleaned_evaluations = [] + + old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule( + ConfigRuleName=event["configRuleName"], ComplianceTypes=["COMPLIANT", "NON_COMPLIANT"], Limit=100 + ) + + old_eval_list = [] + + while True: + for old_result in old_eval["EvaluationResults"]: + old_eval_list.append(old_result) + if "NextToken" in old_eval: + next_token = old_eval["NextToken"] + old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule( + ConfigRuleName=event["configRuleName"], + ComplianceTypes=["COMPLIANT", "NON_COMPLIANT"], + Limit=100, + NextToken=next_token, + ) + else: + break + + for old_eval in old_eval_list: + old_resource_id = old_eval["EvaluationResultIdentifier"]["EvaluationResultQualifier"]["ResourceId"] + newer_founded = False + for latest_eval in latest_evaluations: + if old_resource_id == latest_eval["ComplianceResourceId"]: + newer_founded = True + if not newer_founded: + cleaned_evaluations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event)) + + return cleaned_evaluations + latest_evaluations + + +def lambda_handler(event, context): + if "liblogging" in sys.modules: + liblogging.logEvent(event) + + global AWS_CONFIG_CLIENT + + # print(event) + check_defined(event, "event") + invoking_event = json.loads(event["invokingEvent"]) + rule_parameters = {} + if "ruleParameters" in event: + rule_parameters = json.loads(event["ruleParameters"]) + + try: + valid_rule_parameters = evaluate_parameters(rule_parameters) + except ValueError as ex: + return build_parameters_value_error_response(ex) + + try: + AWS_CONFIG_CLIENT = get_client("config", event) + if invoking_event["messageType"] in [ + "ConfigurationItemChangeNotification", + "ScheduledNotification", + "OversizedConfigurationItemChangeNotification", + ]: + configuration_item = get_configuration_item(invoking_event) + if is_applicable(configuration_item, event): + compliance_result = evaluate_compliance(event, configuration_item, valid_rule_parameters) + else: + compliance_result = "NOT_APPLICABLE" + else: + return build_internal_error_response("Unexpected message type", str(invoking_event)) + except botocore.exceptions.ClientError as ex: + if is_internal_error(ex): + return build_internal_error_response("Unexpected error while completing API request", str(ex)) + return build_error_response( + "Customer error while making API request", + str(ex), + ex.response["Error"]["Code"], + ex.response["Error"]["Message"], + ) + except ValueError as ex: + return build_internal_error_response(str(ex), str(ex)) + + evaluations = [] + latest_evaluations = [] + + if not compliance_result: + latest_evaluations.append( + build_evaluation(event["accountId"], "NOT_APPLICABLE", event, resource_type="AWS::::Account") + ) + evaluations = clean_up_old_evaluations(latest_evaluations, event) + elif isinstance(compliance_result, str): + if configuration_item: + evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result)) + else: + evaluations.append( + build_evaluation(event["accountId"], compliance_result, event, resource_type=DEFAULT_RESOURCE_TYPE) + ) + elif isinstance(compliance_result, list): + for evaluation in compliance_result: + missing_fields = False + for field in ("ComplianceResourceType", "ComplianceResourceId", "ComplianceType", "OrderingTimestamp"): + if field not in evaluation: + print("Missing " + field + " from custom evaluation.") + missing_fields = True + + if not missing_fields: + latest_evaluations.append(evaluation) + evaluations = clean_up_old_evaluations(latest_evaluations, event) + elif isinstance(compliance_result, dict): + missing_fields = False + for field in ("ComplianceResourceType", "ComplianceResourceId", "ComplianceType", "OrderingTimestamp"): + if field not in compliance_result: + print("Missing " + field + " from custom evaluation.") + missing_fields = True + if not missing_fields: + evaluations.append(compliance_result) + else: + evaluations.append(build_evaluation_from_config_item(configuration_item, "NOT_APPLICABLE")) + + # Put together the request that reports the evaluation status + result_token = event["resultToken"] + test_mode = False + if result_token == "TESTMODE": + # Used solely for RDK test to skip actual put_evaluation API call + test_mode = True + + # Invoke the Config API to report the result of the evaluation + evaluation_copy = [] + evaluation_copy = evaluations[:] + while evaluation_copy: + AWS_CONFIG_CLIENT.put_evaluations( + Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode + ) + del evaluation_copy[:100] + + # Used solely for RDK test to be able to test Lambda function + return evaluations + + +def is_internal_error(exception): + return ( + (not isinstance(exception, botocore.exceptions.ClientError)) + or exception.response["Error"]["Code"].startswith("5") + or "InternalError" in exception.response["Error"]["Code"] + or "ServiceError" in exception.response["Error"]["Code"] + ) + + +def build_internal_error_response(internal_error_message, internal_error_details=None): + return build_error_response(internal_error_message, internal_error_details, "InternalError", "InternalError") + + +def build_error_response( + internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None +): + error_response = { + "internalErrorMessage": internal_error_message, + "internalErrorDetails": internal_error_details, + "customerErrorMessage": customer_error_message, + "customerErrorCode": customer_error_code, + } + print(error_response) + return error_response diff --git a/rdk/template/runtime/python3.10/rule_test.py b/rdk/template/runtime/python3.10/rule_test.py new file mode 100644 index 00000000..e0f8c974 --- /dev/null +++ b/rdk/template/runtime/python3.10/rule_test.py @@ -0,0 +1,177 @@ +import sys +import unittest +from unittest.mock import MagicMock +import botocore + +############## +# Parameters # +############## + +# Define the default resource to report to Config Rules +DEFAULT_RESOURCE_TYPE = "AWS::::Account" + +############# +# Main Code # +############# + +CONFIG_CLIENT_MOCK = MagicMock() +STS_CLIENT_MOCK = MagicMock() + + +class Boto3Mock: + @staticmethod + def client(client_name, *args, **kwargs): + if client_name == "config": + return CONFIG_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK + raise Exception("Attempting to create an unknown client") + + +sys.modules["boto3"] = Boto3Mock() + +RULE = __import__("<%RuleName%>") + + +class ComplianceTest(unittest.TestCase): + + rule_parameters = '{"SomeParameterKey":"SomeParameterValue","SomeParameterKey2":"SomeParameterValue2"}' + + invoking_event_iam_role_sample = '{"configurationItem":{"relatedEvents":[],"relationships":[],"configuration":{},"tags":{},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","awsAccountId":"123456789012","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::IAM::Role","resourceId":"some-resource-id","resourceName":"some-resource-name","ARN":"some-arn"},"notificationCreationTime":"2018-07-02T23:05:34.445Z","messageType":"ConfigurationItemChangeNotification"}' + + def setUp(self): + pass + + def test_sample(self): + self.assertTrue(True) + + # def test_sample_2(self): + # RULE.ASSUME_ROLE_MODE = False + # response = RULE.lambda_handler(build_lambda_configurationchange_event(self.invoking_event_iam_role_sample, self.rule_parameters), {}) + # resp_expected = [] + # resp_expected.append(build_expected_response('NOT_APPLICABLE', 'some-resource-id', 'AWS::IAM::Role')) + # assert_successful_evaluation(self, response, resp_expected) + + +#################### +# Helper Functions # +#################### + + +def build_lambda_configurationchange_event(invoking_event, rule_parameters=None): + event_to_return = { + "configRuleName": "myrule", + "executionRoleArn": "roleArn", + "eventLeftScope": False, + "invokingEvent": invoking_event, + "accountId": "123456789012", + "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", + "resultToken": "token", + } + if rule_parameters: + event_to_return["ruleParameters"] = rule_parameters + return event_to_return + + +def build_lambda_scheduled_event(rule_parameters=None): + invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}' + event_to_return = { + "configRuleName": "myrule", + "executionRoleArn": "roleArn", + "eventLeftScope": False, + "invokingEvent": invoking_event, + "accountId": "123456789012", + "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", + "resultToken": "token", + } + if rule_parameters: + event_to_return["ruleParameters"] = rule_parameters + return event_to_return + + +def build_expected_response( + compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None +): + if not annotation: + return { + "ComplianceType": compliance_type, + "ComplianceResourceId": compliance_resource_id, + "ComplianceResourceType": compliance_resource_type, + } + return { + "ComplianceType": compliance_type, + "ComplianceResourceId": compliance_resource_id, + "ComplianceResourceType": compliance_resource_type, + "Annotation": annotation, + } + + +def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1): + if isinstance(response, dict): + test_class.assertEquals(resp_expected["ComplianceResourceType"], response["ComplianceResourceType"]) + test_class.assertEquals(resp_expected["ComplianceResourceId"], response["ComplianceResourceId"]) + test_class.assertEquals(resp_expected["ComplianceType"], response["ComplianceType"]) + test_class.assertTrue(response["OrderingTimestamp"]) + if "Annotation" in resp_expected or "Annotation" in response: + test_class.assertEquals(resp_expected["Annotation"], response["Annotation"]) + elif isinstance(response, list): + test_class.assertEquals(evaluations_count, len(response)) + for i, response_expected in enumerate(resp_expected): + test_class.assertEquals(response_expected["ComplianceResourceType"], response[i]["ComplianceResourceType"]) + test_class.assertEquals(response_expected["ComplianceResourceId"], response[i]["ComplianceResourceId"]) + test_class.assertEquals(response_expected["ComplianceType"], response[i]["ComplianceType"]) + test_class.assertTrue(response[i]["OrderingTimestamp"]) + if "Annotation" in response_expected or "Annotation" in response[i]: + test_class.assertEquals(response_expected["Annotation"], response[i]["Annotation"]) + + +def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None): + if customer_error_code: + test_class.assertEqual(customer_error_code, response["customerErrorCode"]) + if customer_error_message: + test_class.assertEqual(customer_error_message, response["customerErrorMessage"]) + test_class.assertTrue(response["customerErrorCode"]) + test_class.assertTrue(response["customerErrorMessage"]) + if "internalErrorMessage" in response: + test_class.assertTrue(response["internalErrorMessage"]) + if "internalErrorDetails" in response: + test_class.assertTrue(response["internalErrorDetails"]) + + +def sts_mock(): + assume_role_response = { + "Credentials": {"AccessKeyId": "string", "SecretAccessKey": "string", "SessionToken": "string"} + } + STS_CLIENT_MOCK.reset_mock(return_value=True) + STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response) + + +################## +# Common Testing # +################## + + +class TestStsErrors(unittest.TestCase): + def test_sts_unknown_error(self): + RULE.ASSUME_ROLE_MODE = True + RULE.evaluate_parameters = MagicMock(return_value=True) + STS_CLIENT_MOCK.assume_role = MagicMock( + side_effect=botocore.exceptions.ClientError( + {"Error": {"Code": "unknown-code", "Message": "unknown-message"}}, "operation" + ) + ) + response = RULE.lambda_handler(build_lambda_configurationchange_event("{}"), {}) + assert_customer_error_response(self, response, "InternalError", "InternalError") + + def test_sts_access_denied(self): + RULE.ASSUME_ROLE_MODE = True + RULE.evaluate_parameters = MagicMock(return_value=True) + STS_CLIENT_MOCK.assume_role = MagicMock( + side_effect=botocore.exceptions.ClientError( + {"Error": {"Code": "AccessDenied", "Message": "access-denied"}}, "operation" + ) + ) + response = RULE.lambda_handler(build_lambda_configurationchange_event("{}"), {}) + assert_customer_error_response( + self, response, "AccessDenied", "AWS Config does not have permission to assume the IAM role." + ) diff --git a/rdk/template/runtime/python3.7-lib/rule_test.py b/rdk/template/runtime/python3.7-lib/rule_test.py index 8f11d043..db0cf30c 100644 --- a/rdk/template/runtime/python3.7-lib/rule_test.py +++ b/rdk/template/runtime/python3.7-lib/rule_test.py @@ -4,7 +4,6 @@ import unittest from unittest.mock import patch, MagicMock from botocore.exceptions import ClientError -import rdklib from rdklib import Evaluation, ComplianceType import rdklibtest @@ -20,24 +19,27 @@ # Main Code # ############# -MODULE = __import__("<%RuleName%>") -RULE = MODULE.<%RuleName%>() +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() CLIENT_FACTORY = MagicMock() # example for mocking IAM API calls IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() def mock_get_client(client_name, *args, **kwargs): if client_name == "iam": return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK raise Exception("Attempting to create an unknown client") @patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) class ComplianceTest(unittest.TestCase): - rule_parameters = { "SomeParameterKey": "SomeParameterValue", "SomeParameterKey2": "SomeParameterValue2", @@ -61,6 +63,7 @@ class ComplianceTest(unittest.TestCase): }, "notificationCreationTime": "2018-07-02T23:05:34.445Z", "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", } list_roles_response = { @@ -69,74 +72,85 @@ class ComplianceTest(unittest.TestCase): "Path": "/", "RoleName": "testrole", "RoleId": "some-role-id", - "Arn": "arn:aws:iam00000056789012:role/testrole", - "CreateDate": datetime(2015, 1, 1), + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), "Description": "this is a test role", "MaxSessionDuration": 123, "Tags": [ {"Key": "one_tag", "Value": "its_value"}, ], "RoleLastUsed": { - "LastUsedDate": datetime(2015, 1, 1), + "LastUsedDate": datetime.datetime(2015, 1, 1), "Region": "us-east-1", }, }, ] } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} def setUp(self): - pass + STS_CLIENT_MOCK.reset_mock() def test_sample(self): self.assertTrue(True) - # def test_configurationchange_rule(self): - # # Example of how to evaluate a configuration change rule - # response = RULE.evaluate_change( - # event=json.dumps(self.invoking_event_iam_role_sample), - # client_factory=CLIENT_FACTORY, - # configuration_item=self.role_sample_configuration_abridged, - # valid_rule_parameters=json.dumps(self.rule_parameters), - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # annotation="This is a configuration change rule's annotation.", - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("resourceId", None), - # resourceType=RESOURCE_TYPE, - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) - - # def test_periodic_rule(self): - # # Example of how to mock the client response for a list_roles API call - # IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) - # # Example of how to evaluate a periodic rule - # response = RULE.evaluate_periodic( - # event=rdklibtest.create_test_scheduled_event(self.rule_parameters), - # client_factory=CLIENT_FACTORY, - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("awsAccountId", None), - # resourceType="AWS::::Account", - # annotation="This is a periodic rule's annotation.", - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) if __name__ == "__main__": diff --git a/rdk/template/runtime/python3.8-lib/rule_test.py b/rdk/template/runtime/python3.8-lib/rule_test.py index 8f11d043..db0cf30c 100644 --- a/rdk/template/runtime/python3.8-lib/rule_test.py +++ b/rdk/template/runtime/python3.8-lib/rule_test.py @@ -4,7 +4,6 @@ import unittest from unittest.mock import patch, MagicMock from botocore.exceptions import ClientError -import rdklib from rdklib import Evaluation, ComplianceType import rdklibtest @@ -20,24 +19,27 @@ # Main Code # ############# -MODULE = __import__("<%RuleName%>") -RULE = MODULE.<%RuleName%>() +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() CLIENT_FACTORY = MagicMock() # example for mocking IAM API calls IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() def mock_get_client(client_name, *args, **kwargs): if client_name == "iam": return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK raise Exception("Attempting to create an unknown client") @patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) class ComplianceTest(unittest.TestCase): - rule_parameters = { "SomeParameterKey": "SomeParameterValue", "SomeParameterKey2": "SomeParameterValue2", @@ -61,6 +63,7 @@ class ComplianceTest(unittest.TestCase): }, "notificationCreationTime": "2018-07-02T23:05:34.445Z", "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", } list_roles_response = { @@ -69,74 +72,85 @@ class ComplianceTest(unittest.TestCase): "Path": "/", "RoleName": "testrole", "RoleId": "some-role-id", - "Arn": "arn:aws:iam00000056789012:role/testrole", - "CreateDate": datetime(2015, 1, 1), + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), "Description": "this is a test role", "MaxSessionDuration": 123, "Tags": [ {"Key": "one_tag", "Value": "its_value"}, ], "RoleLastUsed": { - "LastUsedDate": datetime(2015, 1, 1), + "LastUsedDate": datetime.datetime(2015, 1, 1), "Region": "us-east-1", }, }, ] } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} def setUp(self): - pass + STS_CLIENT_MOCK.reset_mock() def test_sample(self): self.assertTrue(True) - # def test_configurationchange_rule(self): - # # Example of how to evaluate a configuration change rule - # response = RULE.evaluate_change( - # event=json.dumps(self.invoking_event_iam_role_sample), - # client_factory=CLIENT_FACTORY, - # configuration_item=self.role_sample_configuration_abridged, - # valid_rule_parameters=json.dumps(self.rule_parameters), - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # annotation="This is a configuration change rule's annotation.", - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("resourceId", None), - # resourceType=RESOURCE_TYPE, - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) - - # def test_periodic_rule(self): - # # Example of how to mock the client response for a list_roles API call - # IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) - # # Example of how to evaluate a periodic rule - # response = RULE.evaluate_periodic( - # event=rdklibtest.create_test_scheduled_event(self.rule_parameters), - # client_factory=CLIENT_FACTORY, - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("awsAccountId", None), - # resourceType="AWS::::Account", - # annotation="This is a periodic rule's annotation.", - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) if __name__ == "__main__": diff --git a/rdk/template/runtime/python3.9-lib/rule_test.py b/rdk/template/runtime/python3.9-lib/rule_test.py index 8f11d043..db0cf30c 100644 --- a/rdk/template/runtime/python3.9-lib/rule_test.py +++ b/rdk/template/runtime/python3.9-lib/rule_test.py @@ -4,7 +4,6 @@ import unittest from unittest.mock import patch, MagicMock from botocore.exceptions import ClientError -import rdklib from rdklib import Evaluation, ComplianceType import rdklibtest @@ -20,24 +19,27 @@ # Main Code # ############# -MODULE = __import__("<%RuleName%>") -RULE = MODULE.<%RuleName%>() +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() CLIENT_FACTORY = MagicMock() # example for mocking IAM API calls IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() def mock_get_client(client_name, *args, **kwargs): if client_name == "iam": return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK raise Exception("Attempting to create an unknown client") @patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) class ComplianceTest(unittest.TestCase): - rule_parameters = { "SomeParameterKey": "SomeParameterValue", "SomeParameterKey2": "SomeParameterValue2", @@ -61,6 +63,7 @@ class ComplianceTest(unittest.TestCase): }, "notificationCreationTime": "2018-07-02T23:05:34.445Z", "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", } list_roles_response = { @@ -69,74 +72,85 @@ class ComplianceTest(unittest.TestCase): "Path": "/", "RoleName": "testrole", "RoleId": "some-role-id", - "Arn": "arn:aws:iam00000056789012:role/testrole", - "CreateDate": datetime(2015, 1, 1), + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), "Description": "this is a test role", "MaxSessionDuration": 123, "Tags": [ {"Key": "one_tag", "Value": "its_value"}, ], "RoleLastUsed": { - "LastUsedDate": datetime(2015, 1, 1), + "LastUsedDate": datetime.datetime(2015, 1, 1), "Region": "us-east-1", }, }, ] } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} def setUp(self): - pass + STS_CLIENT_MOCK.reset_mock() def test_sample(self): self.assertTrue(True) - # def test_configurationchange_rule(self): - # # Example of how to evaluate a configuration change rule - # response = RULE.evaluate_change( - # event=json.dumps(self.invoking_event_iam_role_sample), - # client_factory=CLIENT_FACTORY, - # configuration_item=self.role_sample_configuration_abridged, - # valid_rule_parameters=json.dumps(self.rule_parameters), - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # annotation="This is a configuration change rule's annotation.", - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("resourceId", None), - # resourceType=RESOURCE_TYPE, - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) - - # def test_periodic_rule(self): - # # Example of how to mock the client response for a list_roles API call - # IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) - # # Example of how to evaluate a periodic rule - # response = RULE.evaluate_periodic( - # event=rdklibtest.create_test_scheduled_event(self.rule_parameters), - # client_factory=CLIENT_FACTORY, - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("awsAccountId", None), - # resourceType="AWS::::Account", - # annotation="This is a periodic rule's annotation.", - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) if __name__ == "__main__": diff --git a/rdk/template/terraform/0.11/config_rule.tf b/rdk/template/terraform/0.11/config_rule.tf index 4df8f241..6e190217 100644 --- a/rdk/template/terraform/0.11/config_rule.tf +++ b/rdk/template/terraform/0.11/config_rule.tf @@ -9,13 +9,6 @@ data "aws_iam_policy" "read_only_access" { } data "aws_iam_policy_document" "config_iam_policy" { - - statement{ - actions=["s3:GetObject"] - resources =["arn:${data.aws_partition.current.partition}:s3:::${var.source_bucket}/${var.rule_name}.zip"] - effect = "Allow" - sid= "1" - } statement{ actions=[ "logs:CreateLogGroup", @@ -36,7 +29,6 @@ data "aws_iam_policy_document" "config_iam_policy" { statement{ actions=[ "iam:List*", - "iam:Describe*", "iam:Get*" ] resources = ["*"] diff --git a/rdk/template/terraform/0.12/config_rule.tf b/rdk/template/terraform/0.12/config_rule.tf index 13e2b6f6..ab384bdc 100644 --- a/rdk/template/terraform/0.12/config_rule.tf +++ b/rdk/template/terraform/0.12/config_rule.tf @@ -10,12 +10,6 @@ data "aws_iam_policy" "read_only_access" { data "aws_iam_policy_document" "config_iam_policy" { - statement{ - actions=["s3:GetObject"] - resources = [format("arn:%s:s3:::%s/%s",data.aws_partition.current.partition,var.source_bucket,local.rule_name_source)] - effect = "Allow" - sid= "1" - } statement{ actions=[ "logs:CreateLogGroup", @@ -36,7 +30,6 @@ data "aws_iam_policy_document" "config_iam_policy" { statement{ actions=[ "iam:List*", - "iam:Describe*", "iam:Get*" ] resources = ["*"] diff --git a/testing/linux-python3-buildspec.yaml b/testing/linux-python3-buildspec.yaml index 74930af5..715d06ac 100644 --- a/testing/linux-python3-buildspec.yaml +++ b/testing/linux-python3-buildspec.yaml @@ -14,7 +14,7 @@ phases: commands: - rdk create-region-set -o test-region - rdk -f test-region.yaml init - - rdk create MFA_ENABLED_RULE --runtime python3.8 --resource-types AWS::IAM::User + - rdk create MFA_ENABLED_RULE --runtime python3.10 --resource-types AWS::IAM::User - rdk -f test-region.yaml deploy MFA_ENABLED_RULE - sleep 30 - python3 testing/multi_region_execution_test.py @@ -22,22 +22,26 @@ phases: - rdk -f test-region.yaml undeploy --force MFA_ENABLED_RULE - python3 testing/partition_test.py - rdk init --generate-lambda-layer + - rdk create LP3_TestRule_P310_lib --runtime python3.10-lib --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P39_lib --runtime python3.9-lib --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P38_lib --runtime python3.8-lib --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P37_lib --runtime python3.7-lib --resource-types AWS::EC2::SecurityGroup + - rdk -f test-region.yaml deploy LP3_TestRule_P310_lib --generated-lambda-layer - rdk -f test-region.yaml deploy LP3_TestRule_P39_lib --generated-lambda-layer - rdk -f test-region.yaml deploy LP3_TestRule_P38_lib --generated-lambda-layer - rdk -f test-region.yaml deploy LP3_TestRule_P37_lib --generated-lambda-layer + - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P310_lib - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P39_lib - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P38_lib - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P37_lib + - rdk create LP3_TestRule_P310 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P39 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P38 --runtime python3.8 --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P37 --runtime python3.7 --resource-types AWS::EC2::SecurityGroup - - rdk create LP3_TestRule_P3 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - - rdk create LP3_TestRule_EFSFS --runtime python3.9 --resource-types AWS::EFS::FileSystem - - rdk create LP3_TestRule_ECSTD --runtime python3.7 --resource-types AWS::ECS::TaskDefinition - - rdk create LP3_TestRule_ECSS --runtime python3.9 --resource-types AWS::ECS::Service + - rdk create LP3_TestRule_P3 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup + - rdk create LP3_TestRule_EFSFS --runtime python3.10 --resource-types AWS::EFS::FileSystem + - rdk create LP3_TestRule_ECSTD --runtime python3.10 --resource-types AWS::ECS::TaskDefinition + - rdk create LP3_TestRule_ECSS --runtime python3.10 --resource-types AWS::ECS::Service - rdk modify LP3_TestRule_P3 --input-parameters '{"TestParameter":"TestValue"}' - rdk create LP3_TestRule_P37_Periodic --runtime python3.7 --maximum-frequency One_Hour - rdk create LP3_TestRule_P37lib_Periodic --runtime python3.7-lib --maximum-frequency One_Hour @@ -45,6 +49,8 @@ phases: - rdk create LP3_TestRule_P38lib_Periodic --runtime python3.8-lib --maximum-frequency One_Hour - rdk create LP3_TestRule_P39_Periodic --runtime python3.9 --maximum-frequency One_Hour - rdk create LP3_TestRule_P39lib_Periodic --runtime python3.9-lib --maximum-frequency One_Hour + - rdk create LP3_TestRule_P310_Periodic --runtime python3.10 --maximum-frequency One_Hour + - rdk create LP3_TestRule_P310lib_Periodic --runtime python3.10-lib --maximum-frequency One_Hour - rdk test-local --all - rdk deploy --all - yes | rdk undeploy LP3_TestRule_P3 @@ -54,6 +60,8 @@ phases: - yes | rdk undeploy LP3_TestRule_P38_Periodic - yes | rdk undeploy LP3_TestRule_P39 - yes | rdk undeploy LP3_TestRule_P39_Periodic + - yes | rdk undeploy LP3_TestRule_P310 + - yes | rdk undeploy LP3_TestRule_P310_Periodic - sleep 30 - rdk logs LP3_TestRule_P3 - yes | rdk undeploy -a diff --git a/testing/windows-python2-buildspec.yaml b/testing/windows-python2-buildspec.yaml deleted file mode 100644 index 57dd3042..00000000 --- a/testing/windows-python2-buildspec.yaml +++ /dev/null @@ -1,13 +0,0 @@ -version: 0.1 - -phases: - install: - commands: - - apt-get update -y - build: - commands: - - echo Creating Windows build server and running tests - - bash testing/test_windows.sh 2 - post_build: - commands: - - echo Build completed on `date` diff --git a/testing/windows-python3-buildspec.yaml b/testing/windows-python3-buildspec.yaml index 4e385fd6..5c92b963 100644 --- a/testing/windows-python3-buildspec.yaml +++ b/testing/windows-python3-buildspec.yaml @@ -14,35 +14,41 @@ phases: commands: - rdk create-region-set -o test-region - rdk -f test-region.yaml init - - rdk create W_MFA_ENABLED_RULE --runtime python3.8 --resource-types AWS::IAM::User + - rdk create W_MFA_ENABLED_RULE --runtime python3.10 --resource-types AWS::IAM::User - rdk -f test-region.yaml deploy W_MFA_ENABLED_RULE - python testing/win_multi_region_execution_test.py - rdk -f test-region.yaml undeploy --force W_MFA_ENABLED_RULE - python testing/win_partition_test.py - rdk init --generate-lambda-layer + - rdk create WP3_TestRule_P310_lib --runtime python3.10-lib --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P39_lib --runtime python3.9-lib --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P38_lib --runtime python3.8-lib --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P37_lib --runtime python3.7-lib --resource-types AWS::EC2::SecurityGroup + - rdk -f test-region.yaml deploy WP3_TestRule_P310_lib --generated-lambda-layer - rdk -f test-region.yaml deploy WP3_TestRule_P39_lib --generated-lambda-layer - rdk -f test-region.yaml deploy WP3_TestRule_P38_lib --generated-lambda-layer - rdk -f test-region.yaml deploy WP3_TestRule_P37_lib --generated-lambda-layer + - rdk -f test-region.yaml undeploy WP3_TestRule_P310_lib --force - rdk -f test-region.yaml undeploy WP3_TestRule_P39_lib --force - rdk -f test-region.yaml undeploy WP3_TestRule_P38_lib --force - rdk -f test-region.yaml undeploy WP3_TestRule_P37_lib --force + - rdk create WP3_TestRule_P310 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P39 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P38 --runtime python3.8 --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P37 --runtime python3.7 --resource-types AWS::EC2::SecurityGroup - - rdk create WP3_TestRule_P3 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - - rdk create WP3_TestRule_EFSFS --runtime python3.9 --resource-types AWS::EFS::FileSystem - - rdk create WP3_TestRule_ECSTD --runtime python3.7 --resource-types AWS::ECS::TaskDefinition - - rdk create WP3_TestRule_ECSS --runtime python3.9 --resource-types AWS::ECS::Service - - rdk modify WP3_TestRule_P3 --runtime python3.8 + - rdk create WP3_TestRule_P3 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup + - rdk create WP3_TestRule_EFSFS --runtime python3.10 --resource-types AWS::EFS::FileSystem + - rdk create WP3_TestRule_ECSTD --runtime python3.10 --resource-types AWS::ECS::TaskDefinition + - rdk create WP3_TestRule_ECSS --runtime python3.10 --resource-types AWS::ECS::Service + - rdk modify WP3_TestRule_P3 --runtime python3.10 - rdk create WP3_TestRule_P37_Periodic --runtime python3.7 --maximum-frequency One_Hour - rdk create WP3_TestRule_P37lib_Periodic --runtime python3.7-lib --maximum-frequency One_Hour - rdk create WP3_TestRule_P38_Periodic --runtime python3.8 --maximum-frequency One_Hour - rdk create WP3_TestRule_P38lib_Periodic --runtime python3.8-lib --maximum-frequency One_Hour - rdk create WP3_TestRule_P39_Periodic --runtime python3.9 --maximum-frequency One_Hour - rdk create WP3_TestRule_P39lib_Periodic --runtime python3.9-lib --maximum-frequency One_Hour + - rdk create WP3_TestRule_P310_Periodic --runtime python3.10 --maximum-frequency One_Hour + - rdk create WP3_TestRule_P310lib_Periodic --runtime python3.10-lib --maximum-frequency One_Hour - rdk test-local --all - rdk deploy --all - rdk undeploy WP3_TestRule_P3 --force @@ -51,7 +57,9 @@ phases: - rdk undeploy WP3_TestRule_P38 --force - rdk undeploy WP3_TestRule_P38_Periodic --force - rdk undeploy WP3_TestRule_P39 --force - - rdk undeploy WP3_TestRule_P39_Periodic --force + - rdk undeploy WP3_TestRule_P39_Periodic --force + - rdk undeploy WP3_TestRule_P310 --force + - rdk undeploy WP3_TestRule_P310_Periodic --force - rdk logs WP3_TestRule_P3 - rdk undeploy -a --force post_build: diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..66a14670 --- /dev/null +++ b/tox.ini @@ -0,0 +1,2 @@ +[flake8] +max-line-length=140 \ No newline at end of file From 15ff29bb816de49bc6d4dee0695cbb76ed80ccdc Mon Sep 17 00:00:00 2001 From: Benjamin Morris <93620006+bmorrissirromb@users.noreply.github.com> Date: Mon, 1 May 2023 14:01:55 -0700 Subject: [PATCH 03/12] reference the correct readthedocs link --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index d5cfe7ec..de8d303a 100644 --- a/README.rst +++ b/README.rst @@ -14,7 +14,7 @@ We greatly appreciate feedback and bug reports at rdk-maintainers@amazon.com! Yo The RDK is designed to support a "Compliance-as-Code" workflow that is intuitive and productive. It abstracts away much of the undifferentiated heavy lifting associated with deploying AWS Config rules backed by custom lambda functions, and provides a streamlined develop-deploy-monitor iterative process. -For complete documentation, including command reference, check out the `ReadTheDocs documentation `_. +For complete documentation, including command reference, check out the `ReadTheDocs documentation `_. Getting Started =============== From b1c544f5e1f646871e113d7ea517254f6cd2171a Mon Sep 17 00:00:00 2001 From: Benjamin Morris Date: Tue, 2 May 2023 08:34:29 -0700 Subject: [PATCH 04/12] new resource types and version bump --- pyproject.toml | 2 +- rdk/__init__.py | 2 +- rdk/rdk.py | 74 ++++++++++++++++++++++++++++++++----------------- 3 files changed, 51 insertions(+), 27 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c3751717..0574f520 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. [tool.poetry] name = "rdk" -version = "0.13.0" +version = "0.14.0" description = "Rule Development Kit CLI for AWS Config" authors = [ "AWS RDK Maintainers ", diff --git a/rdk/__init__.py b/rdk/__init__.py index 93711b5e..a5407534 100644 --- a/rdk/__init__.py +++ b/rdk/__init__.py @@ -6,4 +6,4 @@ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -MY_VERSION = "0.13.0" +MY_VERSION = "0.14.0" diff --git a/rdk/rdk.py b/rdk/rdk.py index 12052e51..4cd52bbc 100644 --- a/rdk/rdk.py +++ b/rdk/rdk.py @@ -87,8 +87,8 @@ # This need to be update whenever config service supports more resource types # See: https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html accepted_resource_types = [ - "AWS::AccessAnalyzer::Analyzer", "AWS::ACM::Certificate", + "AWS::AccessAnalyzer::Analyzer", "AWS::AmazonMQ::Broker", "AWS::ApiGateway::RestApi", "AWS::ApiGateway::Stage", @@ -96,11 +96,14 @@ "AWS::ApiGatewayV2::Stage", "AWS::AppConfig::Application", "AWS::AppConfig::ConfigurationProfile", + "AWS::AppConfig::DeploymentStrategy", "AWS::AppConfig::Environment", + "AWS::AppFlow::Flow", "AWS::AppStream::DirectoryConfig", "AWS::AppSync::GraphQLApi", - "AWS::Athena::WorkGroup", "AWS::Athena::DataCatalog", + "AWS::Athena::WorkGroup", + "AWS::AuditManager::Assessment", "AWS::AutoScaling::AutoScalingGroup", "AWS::AutoScaling::LaunchConfiguration", "AWS::AutoScaling::ScalingPolicy", @@ -120,6 +123,7 @@ "AWS::CloudFront::StreamingDistribution", "AWS::CloudTrail::Trail", "AWS::CloudWatch::Alarm", + "AWS::CloudWatch::MetricStream", "AWS::CodeBuild::Project", "AWS::CodeDeploy::Application", "AWS::CodeDeploy::DeploymentConfig", @@ -131,6 +135,11 @@ "AWS::Config::ResourceCompliance", "AWS::Connect::PhoneNumber", "AWS::CustomerProfiles::Domain", + "AWS::DMS::Certificate", + "AWS::DMS::EventSubscription", + "AWS::DMS::ReplicationInstance", + "AWS::DMS::ReplicationSubnetGroup", + "AWS::DMS::ReplicationTask", "AWS::DataSync::LocationEFS", "AWS::DataSync::LocationFSxLustre", "AWS::DataSync::LocationFSxWindows", @@ -141,22 +150,20 @@ "AWS::DataSync::LocationSMB", "AWS::DataSync::Task", "AWS::Detective::Graph", + "AWS::DeviceFarm::InstanceProfile", + "AWS::DeviceFarm::Project", "AWS::DeviceFarm::TestGridProject", - "AWS::DMS::Certificate", - "AWS::DMS::EventSubscription", - "AWS::DMS::ReplicationInstance", - "AWS::DMS::ReplicationSubnetGroup", - "AWS::DMS::ReplicationTask", "AWS::DynamoDB::Table", "AWS::EC2::CustomerGateway", "AWS::EC2::DHCPOptions", + "AWS::EC2::EC2Fleet", "AWS::EC2::EIP", "AWS::EC2::EgressOnlyInternetGateway", "AWS::EC2::FlowLog", "AWS::EC2::Host", + "AWS::EC2::IPAM", "AWS::EC2::Instance", "AWS::EC2::InternetGateway", - "AWS::EC2::IPAM", "AWS::EC2::LaunchTemplate", "AWS::EC2::NatGateway", "AWS::EC2::NetworkAcl", @@ -167,6 +174,7 @@ "AWS::EC2::RouteTable", "AWS::EC2::SecurityGroup", "AWS::EC2::Subnet", + "AWS::EC2::SubnetRouteTableAssociation", "AWS::EC2::TrafficMirrorFilter", "AWS::EC2::TrafficMirrorSession", "AWS::EC2::TrafficMirrorTarget", @@ -181,8 +189,9 @@ "AWS::EC2::VPNGateway", "AWS::EC2::Volume", "AWS::ECR::PublicRepository", - "AWS::ECR::Repository", + "AWS::ECR::PullThroughCacheRule", "AWS::ECR::RegistryPolicy", + "AWS::ECR::Repository", "AWS::ECS::Cluster", "AWS::ECS::Service", "AWS::ECS::TaskDefinition", @@ -200,16 +209,16 @@ "AWS::ElasticLoadBalancingV2::Listener", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::ElasticSearch::Domain", - "AWS::Events::Archive", + "AWS::EventSchemas::Discoverer", + "AWS::EventSchemas::Registry", + "AWS::EventSchemas::RegistryPolicy", + "AWS::EventSchemas::Schema", "AWS::Events::ApiDestination", + "AWS::Events::Archive", "AWS::Events::Connection", "AWS::Events::Endpoint", "AWS::Events::EventBus", "AWS::Events::Rule", - "AWS::EventSchemas::Discoverer", - "AWS::EventSchemas::Registry", - "AWS::EventSchemas::RegistryPolicy", - "AWS::EventSchemas::Schema", "AWS::FIS::ExperimentTemplate", "AWS::FraudDetector::EntityType", "AWS::FraudDetector::Label", @@ -221,6 +230,7 @@ "AWS::Glue::Classifier", "AWS::Glue::Job", "AWS::Glue::MLTransform", + "AWS::GroundStation::Config", "AWS::GuardDuty::Detector", "AWS::GuardDuty::Filter", "AWS::GuardDuty::IPSet", @@ -230,13 +240,18 @@ "AWS::IAM::Policy", "AWS::IAM::Role", "AWS::IAM::User", + "AWS::IVS::Channel", + "AWS::IVS::PlaybackKeyPair", + "AWS::IVS::RecordingConfiguration", "AWS::ImageBuilder::ContainerRecipe", "AWS::ImageBuilder::DistributionConfiguration", + "AWS::ImageBuilder::ImagePipeline", "AWS::ImageBuilder::InfrastructureConfiguration", "AWS::IoT::AccountAuditConfiguration", "AWS::IoT::Authorizer", "AWS::IoT::CustomMetric", "AWS::IoT::Dimension", + "AWS::IoT::FleetMetric", "AWS::IoT::MitigationAction", "AWS::IoT::Policy", "AWS::IoT::RoleAlias", @@ -257,9 +272,7 @@ "AWS::IoTTwinMaker::Entity", "AWS::IoTTwinMaker::Scene", "AWS::IoTTwinMaker::Workspace", - "AWS::IVS::Channel", - "AWS::IVS::PlaybackKeyPair", - "AWS::IVS::RecordingConfiguration", + "AWS::IoTWireless::ServiceProfile", "AWS::KMS::Alias", "AWS::KMS::Key", "AWS::Kinesis::Stream", @@ -275,14 +288,21 @@ "AWS::Lightsail::StaticIp", "AWS::LookoutMetrics::Alert", "AWS::LookoutVision::Project", + "AWS::MSK::Cluster", "AWS::MediaPackage::PackagingConfiguration", "AWS::MediaPackage::PackagingGroup", - "AWS::MSK::Cluster", "AWS::NetworkFirewall::Firewall", "AWS::NetworkFirewall::FirewallPolicy", "AWS::NetworkFirewall::RuleGroup", + "AWS::NetworkFirewall::TLSInspectionConfiguration", + "AWS::NetworkManager::Device", + "AWS::NetworkManager::GlobalNetwork", + "AWS::NetworkManager::Link", + "AWS::NetworkManager::Site", "AWS::NetworkManager::TransitGatewayRegistration", "AWS::OpenSearch::Domain", + "AWS::Panorama::Package", + "AWS::Pinpoint::App", "AWS::Pinpoint::ApplicationSettings", "AWS::Pinpoint::Segment", "AWS::QLDB::Ledger", @@ -294,12 +314,14 @@ "AWS::RDS::DBSubnetGroup", "AWS::RDS::EventSubscription", "AWS::RDS::GlobalCluster", + "AWS::RUM::AppMonitor", "AWS::Redshift::Cluster", "AWS::Redshift::ClusterParameterGroup", "AWS::Redshift::ClusterSecurityGroup", "AWS::Redshift::ClusterSnapshot", "AWS::Redshift::ClusterSubnetGroup", "AWS::Redshift::EventSubscription", + "AWS::Redshift::ScheduledAction", "AWS::ResilienceHub::ResiliencyPolicy", "AWS::RoboMaker::RobotApplication", "AWS::RoboMaker::RobotApplicationVersion", @@ -310,27 +332,34 @@ "AWS::Route53RecoveryControl::ControlPanel", "AWS::Route53RecoveryControl::RoutingControl", "AWS::Route53RecoveryControl::SafetyRule", - "AWS::Route53RecoveryReadiness::ResourceSet", "AWS::Route53RecoveryReadiness::Cell", "AWS::Route53RecoveryReadiness::ReadinessCheck", "AWS::Route53RecoveryReadiness::RecoveryGroup", + "AWS::Route53RecoveryReadiness::ResourceSet", "AWS::Route53Resolver::FirewallDomainList", + "AWS::Route53Resolver::FirewallRuleGroupAssociation", "AWS::Route53Resolver::ResolverEndpoint", "AWS::Route53Resolver::ResolverRule", "AWS::Route53Resolver::ResolverRuleAssociation", - "AWS::RUM::AppMonitor", "AWS::S3::AccountPublicAccessBlock", "AWS::S3::Bucket", "AWS::S3::MultiRegionAccessPoint", "AWS::S3::StorageLens", + "AWS::SES::ConfigurationSet", + "AWS::SES::ContactList", + "AWS::SES::ReceiptFilter", + "AWS::SES::ReceiptRuleSet", + "AWS::SES::Template", "AWS::SNS::Topic", "AWS::SQS::Queue", "AWS::SSM::AssociationCompliance", "AWS::SSM::FileData", "AWS::SSM::ManagedInstanceInventory", "AWS::SSM::PatchCompliance", + "AWS::SageMaker::AppImageConfig", "AWS::SageMaker::CodeRepository", "AWS::SageMaker::EndpointConfig", + "AWS::SageMaker::Image", "AWS::SageMaker::Model", "AWS::SageMaker::NotebookInstance", "AWS::SageMaker::NotebookInstanceLifecycleConfig", @@ -342,11 +371,6 @@ "AWS::ServiceDiscovery::HttpNamespace", "AWS::ServiceDiscovery::PublicDnsNamespace", "AWS::ServiceDiscovery::Service", - "AWS::SES::ConfigurationSet", - "AWS::SES::ContactList", - "AWS::SES::ReceiptFilter", - "AWS::SES::ReceiptRuleSet", - "AWS::SES::Template", "AWS::Shield::Protection", "AWS::ShieldRegional::Protection", "AWS::StepFunctions::Activity", From 42a25259ad1dc25eba71c6b23fb0dde9a7e37598 Mon Sep 17 00:00:00 2001 From: Benjamin Morris <93620006+bmorrissirromb@users.noreply.github.com> Date: Fri, 28 Apr 2023 16:20:25 -0700 Subject: [PATCH 05/12] remove typo in help output for #408 --- rdk/rdk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rdk/rdk.py b/rdk/rdk.py index a8274675..cebffb57 100644 --- a/rdk/rdk.py +++ b/rdk/rdk.py @@ -442,7 +442,7 @@ def get_command_parser(): parser.add_argument( "command", metavar="", - help=f"Command to run. Refer to the usage instructions for each command for more details. Commands are: {rdk_commands})", + help=f"Command to run. Refer to the usage instructions for each command for more details. Commands are: {rdk_commands}", choices=rdk_commands, ) parser.add_argument( From 8844665fdfd53d5c239021e2fbc46606ff1becfc Mon Sep 17 00:00:00 2001 From: Benjamin Morris Date: Mon, 1 May 2023 13:52:35 -0700 Subject: [PATCH 06/12] bulk bugfix commit --- NEW_RUNTIME_PROCESS.md | 31 + README.rst | 365 ++++ pyproject.toml | 3 + rdk-workshop/instructions.md | 2 +- rdk/rdk.py | 1669 ++++++++++++----- rdk/template/configRule.json | 9 - rdk/template/configRuleOrganization.json | 9 - .../example_ci/AWS_R53_HostedZone.json | 39 + .../AWS_S3_AccountPublicAccessBlock.json | 23 + .../AWS_SSM_ManagedInstanceInventory.json | 765 +++----- .../dotnetcore1.0/CustomConfigHandler.cs | 189 -- .../runtime/dotnetcore1.0/RuleCode.cs | 27 - .../aws-lambda-tools-defaults.json | 19 - .../runtime/dotnetcore1.0/csharp7.0.csproj | 28 - .../dotnetcore2.0/CustomConfigHandler.cs | 189 -- .../runtime/dotnetcore2.0/RuleCode.cs | 27 - .../aws-lambda-tools-defaults.json | 19 - .../runtime/dotnetcore2.0/csharp7.0.csproj | 28 - rdk/template/runtime/nodejs4.3/rule_code.js | 183 -- rdk/template/runtime/nodejs6.10/rule_code.js | 215 --- .../runtime/python3.10-lib/rule_code.py | 25 + .../runtime/python3.10-lib/rule_test.py | 157 ++ rdk/template/runtime/python3.10/rule_code.py | 437 +++++ rdk/template/runtime/python3.10/rule_test.py | 177 ++ .../runtime/python3.7-lib/rule_test.py | 124 +- .../runtime/python3.8-lib/rule_test.py | 124 +- .../runtime/python3.9-lib/rule_test.py | 124 +- rdk/template/terraform/0.11/config_rule.tf | 8 - rdk/template/terraform/0.12/config_rule.tf | 7 - testing/linux-python3-buildspec.yaml | 18 +- testing/windows-python2-buildspec.yaml | 13 - testing/windows-python3-buildspec.yaml | 22 +- tox.ini | 2 + 33 files changed, 2949 insertions(+), 2128 deletions(-) create mode 100644 NEW_RUNTIME_PROCESS.md create mode 100644 README.rst create mode 100644 rdk/template/example_ci/AWS_R53_HostedZone.json create mode 100644 rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json delete mode 100644 rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs delete mode 100755 rdk/template/runtime/dotnetcore1.0/RuleCode.cs delete mode 100755 rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json delete mode 100644 rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj delete mode 100644 rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs delete mode 100644 rdk/template/runtime/dotnetcore2.0/RuleCode.cs delete mode 100644 rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json delete mode 100644 rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj delete mode 100644 rdk/template/runtime/nodejs4.3/rule_code.js delete mode 100644 rdk/template/runtime/nodejs6.10/rule_code.js create mode 100644 rdk/template/runtime/python3.10-lib/rule_code.py create mode 100644 rdk/template/runtime/python3.10-lib/rule_test.py create mode 100644 rdk/template/runtime/python3.10/rule_code.py create mode 100644 rdk/template/runtime/python3.10/rule_test.py delete mode 100644 testing/windows-python2-buildspec.yaml create mode 100644 tox.ini diff --git a/NEW_RUNTIME_PROCESS.md b/NEW_RUNTIME_PROCESS.md new file mode 100644 index 00000000..3961a50c --- /dev/null +++ b/NEW_RUNTIME_PROCESS.md @@ -0,0 +1,31 @@ +# New Runtime Support Process +These instructions document the parts of the repository that need to be updated when support for a new Lambda runtime is added. + +## Update pyproject.toml + +- Add to `classifiers` list: +``` +"Programming Language :: Python :: ," +``` + +- Add to `include` list: +``` +"rdk/template/runtime/python/*", +"rdk/template/runtime/python-lib/*", +``` + +## Update README.rst + +- Update documentation and examples + +## Update getting_started.rst + +- Update examples + +## Update rdk.py + +- Update references to include new version + +## Update Linux and Windows Buildspec files (`testing` folder) + +- Add new test cases for the new version \ No newline at end of file diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..d5cfe7ec --- /dev/null +++ b/README.rst @@ -0,0 +1,365 @@ +rdk +=== +|pypibadge| |downloadsbadge| + + +.. |pypibadge| image:: https://static.pepy.tech/personalized-badge/rdk?period=total&units=international_system&left_color=black&right_color=blue&left_text=downloads + :target: https://pepy.tech/project/rdk +.. |downloadsbadge| image:: https://img.shields.io/pypi/v/rdk + :alt: PyPI + +Rule Development Kit + +We greatly appreciate feedback and bug reports at rdk-maintainers@amazon.com! You may also create an issue on this repo. + +The RDK is designed to support a "Compliance-as-Code" workflow that is intuitive and productive. It abstracts away much of the undifferentiated heavy lifting associated with deploying AWS Config rules backed by custom lambda functions, and provides a streamlined develop-deploy-monitor iterative process. + +For complete documentation, including command reference, check out the `ReadTheDocs documentation `_. + +Getting Started +=============== +Uses python 3.7/3.8/3.9/3.10 and is installed via pip. Requires you to have an AWS account and sufficient permissions to manage the Config service, and to create S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy Document that describes the minimum necessary permissions can be found at policy/rdk-minimum-permissions.json. + +Under the hood, rdk uses boto3 to make API calls to AWS, so you can set your credentials any way that boto3 recognizes (options 3 through 8 here: http://boto3.readthedocs.io/en/latest/guide/configuration.html) or pass them in with the command-line parameters --profile, --region, --access-key-id, or --secret-access-key + +If you just want to use the RDK, go ahead and install it using pip:: + +$ pip install rdk + +Alternately, if you want to see the code and/or contribute you can clone the git repo, and then from the repo directory use pip to install the package. Use the '-e' flag to generate symlinks so that any edits you make will be reflected when you run the installed package. + +If you are going to author your Lambda functions using Java you will need to have Java 8 and gradle installed. If you are going to author your Lambda functions in C# you will need to have the dotnet CLI and the .NET Core Runtime 1.08 installed. +:: + + $ pip install -e . + +To make sure the rdk is installed correctly, running the package from the command line without any arguments should display help information. + +:: + + $ rdk + usage: rdk [-h] [-p PROFILE] [-k ACCESS_KEY] [-s SECRET_ACCESS_KEY] + [-r REGION] + ... + rdk: error: the following arguments are required: , + + +Usage +===== + +Configure your env +------------------ +To use the RDK, it's recommended to create a directory that will be your working directory. This should be committed to a source code repo, and ideally created as a python virtualenv. In that directory, run the ``init`` command to set up your AWS Config environment. + +:: + + $ rdk init + Running init! + Creating Config bucket config-bucket-780784666283 + Creating IAM role config-role + Waiting for IAM role to propagate + Config Service is ON + Config setup complete. + Creating Code bucket config-rule-code-bucket-780784666283ap-southeast-1 + +Running ``init`` subsequent times will validate your AWS Config setup and re-create any S3 buckets or IAM resources that are needed. + +- If you have config delivery bucket already present in some other AWS account then use **--config-bucket-exists-in-another-account** as argument::: + + $ rdk init --config-bucket-exists-in-another-account +- If you have AWS Organizations/ControlTower Setup in your AWS environment then additionally, use **--control-tower** as argument::: + + $ rdk init --control-tower --config-bucket-exists-in-another-account +- If bucket for custom lambda code is already present in current account then use **--skip-code-bucket-creation** argument::: + + $ rdk init --skip-code-bucket-creation + +- If you want rdk to create/update and upload the rdklib-layer for you, then use **--generate-lambda-layer** argument. In supported regions, rdk will deploy the layer using the Serverless Application Repository, otherwise it will build a local lambda layer archive and upload it for use::: + + $ rdk init --generate-lambda-layer +- If you want rdk to give a custom name to the lambda layer for you, then use **--custom-layer-namer** argument. The Serverless Application Repository currently cannot be used for custom lambda layers.::: + + $ rdk init --generate-lambda-layer --custom-layer-name + +Create Rules +------------ +In your working directory, use the ``create`` command to start creating a new custom rule. You must specify the runtime for the lambda function that will back the Rule, and you can also specify a resource type (or comma-separated list of types) that the Rule will evaluate or a maximum frequency for a periodic rule. This will add a new directory for the rule and populate it with several files, including a skeleton of your Lambda code. + +:: + + $ rdk create MyRule --runtime python3.8 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' + Running create! + Local Rule files created. + +On Windows it is necessary to escape the double-quotes when specifying input parameters, so the `--input-parameters` argument would instead look something like this:: + + '{\"desiredInstanceType\":\"t2.micro\"}' + +Note that you can create rules that use EITHER resource-types OR maximum-frequency, but not both. We have found that rules that try to be both event-triggered as well as periodic wind up being very complicated and so we do not recommend it as a best practice. + +Edit Rules Locally +--------------------------- +Once you have created the rule, edit the python file in your rule directory (in the above example it would be ``MyRule/MyRule.py``, but may be deeper into the rule directory tree depending on your chosen Lambda runtime) to add whatever logic your Rule requires in the ``evaluate_compliance`` function. You will have access to the CI that was sent by Config, as well as any parameters configured for the Config Rule. Your function should return either a simple compliance status (one of ``COMPLIANT``, ``NONCOMPLIANT``, or ``NOT_APPLICABLE``), or if you're using the python or node runtimes you can return a JSON object with multiple evaluation responses that the RDK will send back to AWS Config. An example would look like:: + + for sg in response['SecurityGroups']: + evaluations.append( + { + 'ComplianceResourceType': 'AWS::EC2::SecurityGroup', + 'ComplianceResourceId': sg['GroupId'], + 'ComplianceType': 'COMPLIANT', + 'Annotation': 'This is an important note.', + 'OrderingTimestamp': str(datetime.datetime.now()) + }) + + + return evaluations + +This is necessary for periodic rules that are not triggered by any CI change (which means the CI that is passed in will be null), and also for attaching annotations to your evaluation results. + +If you want to see what the JSON structure of a CI looks like for creating your logic, you can use + +:: + +$ rdk sample-ci + +to output a formatted JSON document. + +Write and Run Unit Tests +------------------------ +If you are writing Config Rules using either of the Python runtimes there will be a _test.py file deployed along with your Lambda function skeleton. This can be used to write unit tests according to the standard Python unittest framework (documented here: https://docs.python.org/3/library/unittest.html), which can be run using the `test-local` rdk command:: + + $ rdk test-local MyTestRule + Running local test! + Testing MyTestRule + Looking for tests in /Users/mborch/Code/rdk-dev/MyTestRule + + --------------------------------------------------------------------- + + Ran 0 tests in 0.000s + + OK + + +The test file includes setup for the MagicMock library that can be used to stub boto3 API calls if your rule logic will involve making API calls to gather additional information about your AWS environment. For some tips on how to do this, check out this blog post: https://sgillies.net/2017/10/19/mock-is-magic.html + +Modify Rule +----------- +If you need to change the parameters of a Config rule in your working directory you can use the ``modify`` command. Any parameters you specify will overwrite existing values, any that you do not specify will not be changed. + +:: + + $ rdk modify MyRule --runtime python3.10 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' + Running modify! + Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. + +Again, on Windows the input parameters would look like:: + + '{\"desiredInstanceType\":\"t2.micro\"}' + +It is worth noting that until you actually call the ``deploy`` command your rule only exists in your working directory, none of the Rule commands discussed thus far actually makes changes to your account. + +Deploy Rule +----------- +Once you have completed your compliance validation code and set your Rule's configuration, you can deploy the Rule to your account using the ``deploy`` command. This will zip up your code (and the other associated code files, if any) into a deployable package (or run a gradle build if you have selected the java8 runtime or run the lambda packaging step from the dotnet CLI if you have selected the dotnetcore1.0 runtime), copy that zip file to S3, and then launch or update a CloudFormation stack that defines your Config Rule, Lambda function, and the necessary permissions and IAM Roles for it to function. Since CloudFormation does not deeply inspect Lambda code objects in S3 to construct its changeset, the ``deploy`` command will also directly update the Lambda function for any subsequent deployments to make sure code changes are propagated correctly. + +:: + + $ rdk deploy MyRule + Running deploy! + Zipping MyRule + Uploading MyRule + Creating CloudFormation Stack for MyRule + Waiting for CloudFormation stack operation to complete... + ... + Waiting for CloudFormation stack operation to complete... + Config deploy complete. + +The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. If you used the --generate-lambda-layer flag in rdk init, use the --generated-lambda-layer flag for rdk deploy. + +Deploy Organization Rule +------------------------ +You can also deploy the Rule to your AWS Organization using the ``deploy-organization`` command. +For successful evaluation of custom rules in child accounts, please make sure you do one of the following: + +1. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume the Role attached on the Config Service and confirm that the role trusts the master account where the Lambda function is going to be deployed. +2. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume a custom role and define an optional parameter with key as ExecutionRoleName and set the value to your custom role name; confirm that the role trusts the master account of the organization where the Lambda function will be deployed. + +:: + + $ rdk deploy-organization MyRule + Running deploy! + Zipping MyRule + Uploading MyRule + Creating CloudFormation Stack for MyRule + Waiting for CloudFormation stack operation to complete... + ... + Waiting for CloudFormation stack operation to complete... + Config deploy complete. + +The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. +This command uses 'PutOrganizationConfigRule' API for the rule deployment. If a new account joins an organization, the rule is deployed to that account. When an account leaves an organization, the rule is removed. Deployment of existing organizational AWS Config Rules will only be retried for 7 hours after an account is added to your organization if a recorder is not available. You are expected to create a recorder if one doesn't exist within 7 hours of adding an account to your organization. + +View Logs For Deployed Rule +--------------------------- +Once the Rule has been deployed to AWS you can get the CloudWatch logs associated with your lambda function using the ``logs`` command. + +:: + + $ rdk logs MyRule -n 5 + 2017-11-15 22:59:33 - START RequestId: 96e7639a-ca15-11e7-95a2-b1521890638d Version: $LATEST + 2017-11-15 23:41:13 - REPORT RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Duration: 0.50 ms Billed Duration: 100 ms Memory Size: 256 MB + Max Memory Used: 36 MB + 2017-11-15 23:41:13 - END RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda + 2017-11-15 23:41:13 - Default RDK utility class does not yet support Scheduled Notifications. + 2017-11-15 23:41:13 - START RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Version: $LATEST + +You can use the ``-n`` and ``-f`` command line flags just like the UNIX ``tail`` command to view a larger number of log events and to continuously poll for new events. The latter option can be useful in conjunction with manually initiating Config Evaluations for your deploy Config Rule to make sure it is behaving as expected. + + + +Running the tests +================= + +The `testing` directory contains scripts and buildspec files that I use to run basic functionality tests across a variety of CLI environments (currently Ubuntu linux running python 3.7/3.8/3.9/3.10, and Windows Server running python3.10). If there is interest I can release a CloudFormation template that could be used to build the test environment, let me know if this is something you want! + + +Advanced Features +================= +Cross-Account Deployments +------------------------- +Features have been added to the RDK to facilitate the cross-account deployment pattern that enterprise customers have standardized on for custom Config Rules. A cross-account architecture is one in which the Lambda functions are deployed to a single central "Compliance" account (which may be the same as a central "Security" account), and the Config Rules are deployed to any number of "Satellite" accounts that are used by other teams or departments. This gives the compliance team confidence that their Rule logic cannot be tampered with and makes it much easier for them to modify rule logic without having to go through a complex deployment process to potentially hundreds of AWS accounts. The cross-account pattern uses two advanced RDK features - functions-only deployments and the `create-rule-template` command. + +**Function-Only Deployment** + +By using the `-f` or `--functions-only` flag on the `deploy` command the RDK will deploy only the necessary Lambda Functions, Lambda Execution Role, and Lambda Permissions to the account specified by the execution credentials. It accomplishes this by batching up all of the Lambda function CloudFormation snippets for the selected Rule(s) into a single dynamically generated template and deploy that CloudFormation template. One consequence of this is that subsequent deployments that specify a different set of Rules for the same stack name will update that CloudFormation stack, and any Rules that were included in the first deployment but not in the second will be removed. You can use the `--stack-name` parameter to override the default CloudFormation stack name if you need to manage different subsets of your Lambda Functions independently. The intended usage is to deploy the functions for all of the Config rules in the Security/Compliance account, which can be done simply by using `rdk deploy -f --all` from your working directory. + +**`create-rule-template` command** + +This command generates a CloudFormation template that defines the AWS Config rules themselves, along with the Config Role, Config data bucket, Configuration Recorder, and Delivery channel necessary for the Config rules to work in a satellite account. You must specify the file name for the generated template using the `--output-file` or `o` command line flags. The generated template takes a single parameter of the AccountID of the central compliance account that contains the Lambda functions that will back your custom Config Rules. The generated template can be deployed in the desired satellite accounts through any of the means that you can deploy any other CloudFormation template, including the console, the CLI, as a CodePipeline task, or using StackSets. The `create-rule-template` command takes all of the standard arguments for selecting Rules to include in the generated template, including lists of individual Rule names, an `--all` flag, or using the RuleSets feature described below. + +:: + + $ rdk create-rule-template -o remote-rule-template.json --all + Generating CloudFormation template! + CloudFormation template written to remote-rule-template.json + + +Disable the supported resource types check +------------------------------------------ +It is now possible to define a resource type that is not yet supported by rdk. To disable the supported resource check use the optional flag '--skip-supported-resource-check' during the create command. + +:: + + $ rdk create MyRule --runtime python3.8 --resource-types AWS::New::ResourceType --skip-supported-resource-check + 'AWS::New::ResourceType' not found in list of accepted resource types. + Skip-Supported-Resource-Check Flag set (--skip-supported-resource-check), ignoring missing resource type error. + Running create! + Local Rule files created. + +Custom Lambda Function Name +--------------------------- +As of version 0.7.14, instead of defaulting the lambda function names to 'RDK-Rule-Function-' it is possible to customize the name for the Lambda function to any 64 characters string as per Lambda's naming standards using the optional '--custom-lambda-name' flag while performing rdk create. This opens up new features like : + +1. Longer config rule name. +2. Custom lambda function naming as per personal or enterprise standards. + +:: + + $ rdk create MyLongerRuleName --runtime python3.8 --resource-types AWS::EC2::Instance --custom-lambda-name custom-prefix-for-MyLongerRuleName + Running create! + Local Rule files created. + +The above example would create files with config rule name as 'MyLongerRuleName' and lambda function with the name 'custom-prefix-for-MyLongerRuleName' instead of 'RDK-Rule-Function-MyLongerRuleName' + +RuleSets +-------- +New as of version 0.3.11, it is possible to add RuleSet tags to rules that can be used to deploy and test groups of rules together. Rules can belong to multiple RuleSets, and RuleSet membership is stored only in the parameters.json metadata. The `deploy`, `create-rule-template`, and `test-local` commands are RuleSet-aware such that a RuleSet can be passed in as the target instead of `--all` or a specific named Rule. + +A comma-delimited list of RuleSets can be added to a Rule when you create it (using the `--rulesets` flag), as part of a `modify` command, or using new `ruleset` subcommands to add or remove individual rules from a RuleSet. + +Running `rdk rulesets list` will display a list of the RuleSets currently defined across all of the Rules in the working directory + +:: + + rdk-dev $ rdk rulesets list + RuleSets: AnotherRuleSet MyNewSet + +Naming a specific RuleSet will list all of the Rules that are part of that RuleSet. + +:: + + rdk-dev $ rdk rulesets list AnotherRuleSet + Rules in AnotherRuleSet : RSTest + +Rules can be added to or removed from RuleSets using the `add` and `remove` subcommands: + +:: + + rdk-dev $ rdk rulesets add MyNewSet RSTest + RSTest added to RuleSet MyNewSet + + rdk-dev $ rdk rulesets remove AnotherRuleSet RSTest + RSTest removed from RuleSet AnotherRuleSet + +RuleSets are a convenient way to maintain a single repository of Config Rules that may need to have subsets of them deployed to different environments. For example your development environment may contain some of the Rules that you run in Production but not all of them; RuleSets gives you a way to identify and selectively deploy the appropriate Rules to each environment. + +Managed Rules +------------- +The RDK is able to deploy AWS Managed Rules. + +To do so, create a rule using "rdk create" and provide a valid SourceIdentifier via the --source-identifier CLI option. The list of Managed Rules can be found here: https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html, and note that the Identifier can be obtained by replacing the dashes with underscores and using all capitals (for example, the "guardduty-enabled-centralized" rule has the SourceIdentifier "GUARDDUTY_ENABLED_CENTRALIZED"). Just like custom Rules you will need to specify source events and/or a maximum evaluation frequency, and also pass in any Rule parameters. The resulting Rule directory will contain only the parameters.json file, but using `rdk deploy` or `rdk create-rule-template` can be used to deploy the Managed Rule like any other Custom Rule. + +Deploying Rules Across Multiple Regions +--------------------------------------- +The RDK is able to run init/deploy/undeploy across multiple regions with a `rdk -f -t ` + +If no region group is specified, rdk will deploy to the `default` region set + +To create a sample starter region group, run `rdk create-region-set` to specify the filename, add the `-o ` this will create a region set with the following tests and regions `"default":["us-east-1","us-west-1","eu-north-1","ap-east-1"],"aws-cn-region-set":["cn-north-1","cn-northwest-1"]` + +Using RDK to Generate a Lambda Layer in a region (Python3) +---------------------------------------------------------- +By default `rdk init --generate-lambda-layer` will generate an rdklib lambda layer while running init in whatever region it is run, to force re-generation of the layer, run `rdk init --generate-lambda-layer` again over a region + +To use this generated lambda layer, add the flag `--generated-lambda-layer` when running `rdk deploy`. For example: `rdk -f regions.yaml deploy LP3_TestRule_P39_lib --generated-lambda-layer` + +If you created layer with a custom name (by running `rdk init --custom-lambda-layer`, add a similar `custom-lambda-layer` flag when running deploy. + +Contributing +============ + +email us at rdk-maintainers@amazon.com if you have any questions. We are happy to help and discuss. + +Contacts +======== +* **Ricky Chau** - `rickychau2780 `_ - *current maintainer* +* **Benjamin Morris** - `bmorrissirromb `_ - *current maintainer* +* **Mark Beacom** - `mbeacom `_ - *current maintainer* +* **Julio Delgado Jr** - `tekdj7 `_ - *current maintainer* + +Past Contributors +================= +* **Michael Borchert** - *Orignal Python version* +* **Jonathan Rault** - *Orignal Design, testing, feedback* +* **Greg Kim and Chris Gutierrez** - *Initial work and CI definitions* +* **Henry Huang** - *Original CFN templates and other code* +* **Santosh Kumar** - *maintainer* +* **Jose Obando** - *maintainer* +* **Jarrett Andrulis** - `jarrettandrulis `_ - *maintainer* +* **Sandeep Batchu** - `batchus `_ - *maintainer* + +License +======= + +This project is licensed under the Apache 2.0 License + +Acknowledgments +=============== + +* the boto3 team makes all of this magic possible. + + +Link +==== + +* to view example of rules built with the RDK: https://github.com/awslabs/aws-config-rules/tree/master/python diff --git a/pyproject.toml b/pyproject.toml index 2f7948b0..9b31317a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,7 @@ classifiers = [ "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ] include = [ "README.md", @@ -46,6 +47,8 @@ include = [ "rdk/template/runtime/python3.8-lib/*", "rdk/template/runtime/python3.9/*", "rdk/template/runtime/python3.9-lib/*", + "rdk/template/runtime/python3.10/*", + "rdk/template/runtime/python3.10-lib/*", "rdk/template/runtime/dotnetcore1.0/*", "rdk/template/runtime/dotnetcore1.0/bin/*", "rdk/template/runtime/dotnetcore1.0/obj/*", diff --git a/rdk-workshop/instructions.md b/rdk-workshop/instructions.md index 0bbafc2a..210f13c5 100644 --- a/rdk-workshop/instructions.md +++ b/rdk-workshop/instructions.md @@ -113,7 +113,7 @@ Note: It might take up to 2 hours to get the information about the CIS benchmark ## (Optional) Going further 7. Discover all the available [Managed Config Rules](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html). -8. Navigate to [AWS System Manager Automation Documents](https://eu-west-1.console.aws.amazon.com/systems-manager/documents?region=eu-west-1) to discover all existing remediation actions. +8. Navigate to [AWS System Manager Automation Documents](https://us-east-1.console.aws.amazon.com/systems-manager/documents?region=us-east-1) to discover all existing remediation actions. # Lab 2: Writing Your First Config Rule diff --git a/rdk/rdk.py b/rdk/rdk.py index cebffb57..12052e51 100644 --- a/rdk/rdk.py +++ b/rdk/rdk.py @@ -1,10 +1,16 @@ # Copyright 2017-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +# Licensed under the Apache License, Version 2.0 (the "License"). +# +# You may not use this file except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # -# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +# or in the "license" file accompanying this file. +# +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# +# See the License for the specific language governing permissions and limitations under the License. import argparse import base64 import fileinput @@ -69,12 +75,17 @@ "sa-east-1": "5", } -RDKLIB_LAYER_SAR_ID = "arn:aws:serverlessrepo:ap-southeast-1:711761543063:applications/rdklib" +RDKLIB_LAYER_SAR_ID = ( + "arn:aws:serverlessrepo:ap-southeast-1:711761543063:applications/rdklib" +) RDKLIB_ARN_STRING = "arn:aws:lambda:{region}:711761543063:layer:rdklib-layer:{version}" -PARALLEL_COMMAND_THROTTLE_PERIOD = 2 # 2 seconds, used in running commands in parallel over multiple regions +PARALLEL_COMMAND_THROTTLE_PERIOD = ( + 2 # 2 seconds, used in running commands in parallel over multiple regions +) -# this need to be update whenever config service supports more resource types : https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html +# This need to be update whenever config service supports more resource types +# See: https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html accepted_resource_types = [ "AWS::AccessAnalyzer::Analyzer", "AWS::ACM::Certificate", @@ -372,7 +383,9 @@ { "Sid": "REMOTE", "Effect": "Allow", - "Principal": {"AWS": {"Fn::Sub": "arn:${AWS::Partition}:iam::${LambdaAccountId}:root"}}, + "Principal": { + "AWS": {"Fn::Sub": "arn:${AWS::Partition}:iam::${LambdaAccountId}:root"} + }, "Action": "sts:AssumeRole", }, ], @@ -383,7 +396,9 @@ { "Effect": "Allow", "Action": "s3:PutObject*", - "Resource": {"Fn::Sub": "arn:${AWS::Partition}:s3:::${ConfigBucket}/AWSLogs/${AWS::AccountId}/*"}, + "Resource": { + "Fn::Sub": "arn:${AWS::Partition}:s3:::${ConfigBucket}/AWSLogs/${AWS::AccountId}/*" + }, "Condition": {"StringLike": {"s3:x-amz-acl": "bucket-owner-full-control"}}, }, { @@ -404,10 +419,18 @@ def get_command_parser(): # formatter_class=argparse.RawDescriptionHelpFormatter, description="The RDK is a command-line utility for authoring, deploying, and testing custom AWS Config rules." ) - parser.add_argument("-p", "--profile", help="[optional] indicate which Profile to use.") - parser.add_argument("-k", "--access-key-id", help="[optional] Access Key ID to use.") - parser.add_argument("-s", "--secret-access-key", help="[optional] Secret Access Key to use.") - parser.add_argument("-r", "--region", help="Select the region to run the command in.") + parser.add_argument( + "-p", "--profile", help="[optional] indicate which Profile to use." + ) + parser.add_argument( + "-k", "--access-key-id", help="[optional] Access Key ID to use." + ) + parser.add_argument( + "-s", "--secret-access-key", help="[optional] Secret Access Key to use." + ) + parser.add_argument( + "-r", "--region", help="Select the region to run the command in." + ) parser.add_argument( "-f", "--region-file", @@ -452,7 +475,11 @@ def get_command_parser(): help="Run `rdk --help` to see command-specific arguments.", ) parser.add_argument( - "-v", "--version", help="Display the version of this tool", action="version", version="%(prog)s " + MY_VERSION + "-v", + "--version", + help="Display the version of this tool", + action="version", + version="%(prog)s " + MY_VERSION, ) return parser @@ -534,7 +561,9 @@ def get_rule_parser(is_required, command): + command + " the Rule and metadata.", ) - parser.add_argument("rulename", metavar="", help="Rule name to create/modify") + parser.add_argument( + "rulename", metavar="", help="Rule name to create/modify" + ) runtime_group = parser.add_mutually_exclusive_group() runtime_group.add_argument( "-R", @@ -542,7 +571,6 @@ def get_rule_parser(is_required, command): required=False, help="Runtime for lambda function", choices=[ - "nodejs6.10", "java8", "python3.7", "python3.7-lib", @@ -550,16 +578,23 @@ def get_rule_parser(is_required, command): "python3.8-lib", "python3.9", "python3.9-lib", - "dotnetcore1.0", - "dotnetcore2.0", + "python3.10", + "python3.10-lib", ], metavar="", ) runtime_group.add_argument( - "--source-identifier", required=False, help="[optional] Used only for creating Managed Rules." + "--source-identifier", + required=False, + help="[optional] Used only for creating Managed Rules.", ) - parser.add_argument("-l", "--custom-lambda-name", required=False, help="[optional] Provide custom lambda name") - parser.set_defaults(runtime="python3.9-lib") + parser.add_argument( + "-l", + "--custom-lambda-name", + required=False, + help="[optional] Provide custom lambda name", + ) + parser.set_defaults(runtime="python3.10-lib") parser.add_argument( "-r", "--resource-types", @@ -571,17 +606,41 @@ def get_rule_parser(is_required, command): "--maximum-frequency", required=False, help="[optional] Maximum execution frequency for scheduled Rules", - choices=["One_Hour", "Three_Hours", "Six_Hours", "Twelve_Hours", "TwentyFour_Hours"], + choices=[ + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + ], + ) + parser.add_argument( + "-i", + "--input-parameters", + help="[optional] JSON for required Config parameters.", + ) + parser.add_argument( + "--optional-parameters", help="[optional] JSON for optional Config parameters." + ) + parser.add_argument( + "--tags", + help="[optional] JSON for tags to be applied to all CFN created resources.", + ) + parser.add_argument( + "-s", + "--rulesets", + required=False, + help="[optional] comma-delimited list of RuleSet names to add this Rule to.", ) - parser.add_argument("-i", "--input-parameters", help="[optional] JSON for required Config parameters.") - parser.add_argument("--optional-parameters", help="[optional] JSON for optional Config parameters.") - parser.add_argument("--tags", help="[optional] JSON for tags to be applied to all CFN created resources.") parser.add_argument( - "-s", "--rulesets", required=False, help="[optional] comma-delimited list of RuleSet names to add this Rule to." + "--remediation-action", + required=False, + help="[optional] SSM document for remediation.", ) - parser.add_argument("--remediation-action", required=False, help="[optional] SSM document for remediation.") parser.add_argument( - "--remediation-action-version", required=False, help="[optional] SSM document version for remediation action." + "--remediation-action-version", + required=False, + help="[optional] SSM document version for remediation action.", ) parser.add_argument( "--auto-remediate", @@ -595,7 +654,9 @@ def get_rule_parser(is_required, command): help="[optional] Number of times to retry automated remediation.", ) parser.add_argument( - "--auto-remediation-retry-time", required=False, help="[optional] Duration of automated remediation retries." + "--auto-remediation-retry-time", + required=False, + help="[optional] Duration of automated remediation retries.", ) parser.add_argument( "--remediation-concurrent-execution-percent", @@ -646,13 +707,27 @@ def get_deployment_parser(ForceArgument=False, Command="deploy"): parser = argparse.ArgumentParser( prog="rdk " + Command, - description="Used to " + Command + " the Config Rule " + direction + " the target account.", + description="Used to " + + Command + + " the Config Rule " + + direction + + " the target account.", + ) + parser.add_argument( + "rulename", + metavar="", + nargs="*", + help="Rule name(s) to deploy. Rule(s) will be pushed to AWS.", + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="All rules in the working directory will be deployed.", ) parser.add_argument( - "rulename", metavar="", nargs="*", help="Rule name(s) to deploy. Rule(s) will be pushed to AWS." + "-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names" ) - parser.add_argument("--all", "-a", action="store_true", help="All rules in the working directory will be deployed.") - parser.add_argument("-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names") parser.add_argument( "-f", "--functions-only", @@ -736,20 +811,36 @@ def get_deployment_parser(ForceArgument=False, Command="deploy"): return parser -def get_deployment_organization_parser(ForceArgument=False, Command="deploy-organization"): +def get_deployment_organization_parser( + ForceArgument=False, Command="deploy-organization" +): direction = "to" if Command == "undeploy": direction = "from" parser = argparse.ArgumentParser( prog="rdk " + Command, - description="Used to " + Command + " the Config Rule " + direction + " the target Organization.", + description="Used to " + + Command + + " the Config Rule " + + direction + + " the target Organization.", ) parser.add_argument( - "rulename", metavar="", nargs="*", help="Rule name(s) to deploy. Rule(s) will be pushed to AWS." + "rulename", + metavar="", + nargs="*", + help="Rule name(s) to deploy. Rule(s) will be pushed to AWS.", + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="All rules in the working directory will be deployed.", + ) + parser.add_argument( + "-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names" ) - parser.add_argument("--all", "-a", action="store_true", help="All rules in the working directory will be deployed.") - parser.add_argument("-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names") parser.add_argument( "-f", "--functions-only", @@ -834,13 +925,25 @@ def get_deployment_organization_parser(ForceArgument=False, Command="deploy-orga def get_export_parser(ForceArgument=False, Command="export"): - parser = argparse.ArgumentParser( - prog="rdk " + Command, description="Used to " + Command + " the Config Rule to terraform file." + prog="rdk " + Command, + description="Used to " + Command + " the Config Rule to terraform file.", + ) + parser.add_argument( + "rulename", + metavar="", + nargs="*", + help="Rule name(s) to export to a file.", + ) + parser.add_argument( + "-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names" + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="All rules in the working directory will be deployed.", ) - parser.add_argument("rulename", metavar="", nargs="*", help="Rule name(s) to export to a file.") - parser.add_argument("-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names") - parser.add_argument("--all", "-a", action="store_true", help="All rules in the working directory will be deployed.") parser.add_argument( "--lambda-layers", required=False, @@ -878,8 +981,16 @@ def get_export_parser(ForceArgument=False, Command="export"): required=False, help="[optional] Lambda Layer ARN that contains the desired rdklib. Note that Lambda Layers are region-specific.", ) - parser.add_argument("-v", "--version", required=True, help="Terraform version", choices=["0.11", "0.12"]) - parser.add_argument("-f", "--format", required=True, help="Export Format", choices=["terraform"]) + parser.add_argument( + "-v", + "--version", + required=True, + help="Terraform version", + choices=["0.11", "0.12"], + ) + parser.add_argument( + "-f", "--format", required=True, help="Export Format", choices=["terraform"] + ) parser.add_argument( "-g", "--generated-lambda-layer", @@ -897,15 +1008,36 @@ def get_export_parser(ForceArgument=False, Command="export"): def get_test_parser(command): - parser = argparse.ArgumentParser(prog="rdk " + command, description="Used to run tests on your Config Rule code.") - parser.add_argument("rulename", metavar="[,,...]", nargs="*", help="Rule name(s) to test") + parser = argparse.ArgumentParser( + prog="rdk " + command, description="Used to run tests on your Config Rule code." + ) parser.add_argument( - "--all", "-a", action="store_true", help="Test will be run against all rules in the working directory." + "rulename", + metavar="[,,...]", + nargs="*", + help="Rule name(s) to test", + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="Test will be run against all rules in the working directory.", + ) + parser.add_argument( + "--test-ci-json", "-j", help="[optional] JSON for test CI for testing." + ) + parser.add_argument( + "--test-ci-types", "-t", help="[optional] CI type to use for testing." + ) + parser.add_argument( + "--verbose", "-v", action="store_true", help="[optional] Enable full log output" + ) + parser.add_argument( + "-s", + "--rulesets", + required=False, + help="[optional] comma-delimited list of RuleSet names", ) - parser.add_argument("--test-ci-json", "-j", help="[optional] JSON for test CI for testing.") - parser.add_argument("--test-ci-types", "-t", help="[optional] CI type to use for testing.") - parser.add_argument("--verbose", "-v", action="store_true", help="[optional] Enable full log output") - parser.add_argument("-s", "--rulesets", required=False, help="[optional] comma-delimited list of RuleSet names") return parser @@ -933,11 +1065,21 @@ def get_logs_parser(): usage="rdk logs [-n/--number NUMBER] [-f/--follow]", description="Displays CloudWatch logs for the Lambda Function for the specified Rule.", ) - parser.add_argument("rulename", metavar="", help="Rule whose logs will be displayed") parser.add_argument( - "-f", "--follow", action="store_true", help="[optional] Continuously poll Lambda logs and write to stdout." + "rulename", metavar="", help="Rule whose logs will be displayed" + ) + parser.add_argument( + "-f", + "--follow", + action="store_true", + help="[optional] Continuously poll Lambda logs and write to stdout.", + ) + parser.add_argument( + "-n", + "--number", + default=3, + help="[optional] Number of previous logged events to display.", ) - parser.add_argument("-n", "--number", default=3, help="[optional] Number of previous logged events to display.") return parser @@ -949,7 +1091,9 @@ def get_rulesets_parser(): ) parser.add_argument("subcommand", help="One of list, add, or remove") parser.add_argument("ruleset", nargs="?", help="Name of RuleSet") - parser.add_argument("rulename", nargs="?", help="Name of Rule to be added or removed") + parser.add_argument( + "rulename", nargs="?", help="Name of Rule to be added or removed" + ) return parser @@ -1004,10 +1148,15 @@ def get_create_rule_template_parser(): def get_create_region_set_parser(): parser = argparse.ArgumentParser( - prog="rdk create-region-set", description="Outputs a YAML region set file for multi-region deployment." + prog="rdk create-region-set", + description="Outputs a YAML region set file for multi-region deployment.", ) parser.add_argument( - "-o", "--output-file", required=False, default="regions", help="Filename of the generated region set file" + "-o", + "--output-file", + required=False, + default="regions", + help="Filename of the generated region set file", ) return parser @@ -1020,7 +1169,9 @@ def parse_region_file(args): region_text = yaml.safe_load(open(args.region_file, "r")) return region_text[region_set] except Exception: - raise SyntaxError(f"Error reading regions: {region_set} in file: {args.region_file}") + raise SyntaxError( + f"Error reading regions: {region_set} in file: {args.region_file}" + ) def run_multi_region(args): @@ -1069,7 +1220,9 @@ def init(self): config_bucket_exists = False if self.args.config_bucket_exists_in_another_account: - print(f"[{my_session.region_name}]: Skipping Config Bucket check due to command line args") + print( + f"[{my_session.region_name}]: Skipping Config Bucket check due to command line args" + ) config_bucket_exists = True config_bucket_name = config_bucket_prefix + "-" + account_id @@ -1082,9 +1235,14 @@ def init(self): control_tower = True if self.args.generate_lambda_layer: - lambda_layer_version = self.__get_existing_lambda_layer(my_session, layer_name=self.args.custom_layer_name) + lambda_layer_version = self.__get_existing_lambda_layer( + my_session, layer_name=self.args.custom_layer_name + ) if lambda_layer_version: - print(f"[{my_session.region_name}]: Found Version: " + lambda_layer_version) + print( + f"[{my_session.region_name}]: Found Version: " + + lambda_layer_version + ) if self.args.generate_lambda_layer: print( f"[{my_session.region_name}]: --generate-lambda-layer Flag received, forcing update of the Lambda Layer in {my_session.region_name}" @@ -1094,8 +1252,12 @@ def init(self): f"[{my_session.region_name}]: Lambda Layer not found in {my_session.region_name}. Creating one now" ) # Try to generate lambda layer with ServerlessAppRepo, manually generate if impossible - self.__create_new_lambda_layer(my_session, layer_name=self.args.custom_layer_name) - lambda_layer_version = self.__get_existing_lambda_layer(my_session, layer_name=self.args.custom_layer_name) + self.__create_new_lambda_layer( + my_session, layer_name=self.args.custom_layer_name + ) + lambda_layer_version = self.__get_existing_lambda_layer( + my_session, layer_name=self.args.custom_layer_name + ) # Check to see if the ConfigRecorder has been created. recorders = my_config.describe_configuration_recorders() @@ -1103,13 +1265,18 @@ def init(self): config_recorder_exists = True config_recorder_name = recorders["ConfigurationRecorders"][0]["name"] config_role_arn = recorders["ConfigurationRecorders"][0]["roleARN"] - print(f"[{my_session.region_name}]: Found Config Recorder: " + config_recorder_name) + print( + f"[{my_session.region_name}]: Found Config Recorder: " + + config_recorder_name + ) print(f"[{my_session.region_name}]: Found Config Role: " + config_role_arn) delivery_channels = my_config.describe_delivery_channels() if len(delivery_channels["DeliveryChannels"]) > 0: delivery_channel_exists = True - config_bucket_name = delivery_channels["DeliveryChannels"][0]["s3BucketName"] + config_bucket_name = delivery_channels["DeliveryChannels"][0][ + "s3BucketName" + ] my_s3 = my_session.client("s3") @@ -1119,18 +1286,26 @@ def init(self): bucket_exists = False for bucket in response["Buckets"]: if bucket["Name"] == config_bucket_name: - print(f"[{my_session.region_name}]: Found Bucket: " + config_bucket_name) + print( + f"[{my_session.region_name}]: Found Bucket: " + + config_bucket_name + ) config_bucket_exists = True bucket_exists = True if not bucket_exists: - print(f"[{my_session.region_name}]: Creating Config bucket " + config_bucket_name) + print( + f"[{my_session.region_name}]: Creating Config bucket " + + config_bucket_name + ) if my_session.region_name == "us-east-1": my_s3.create_bucket(Bucket=config_bucket_name) else: my_s3.create_bucket( Bucket=config_bucket_name, - CreateBucketConfiguration={"LocationConstraint": my_session.region_name}, + CreateBucketConfiguration={ + "LocationConstraint": my_session.region_name + }, ) if not config_role_arn: @@ -1149,29 +1324,49 @@ def init(self): elif partition == "aws-cn": partition_url = ".com.cn" assume_role_policy_template = open( - os.path.join(path.dirname(__file__), "template", assume_role_policy_file), "r" + os.path.join( + path.dirname(__file__), "template", assume_role_policy_file + ), + "r", ).read() - assume_role_policy = json.loads(assume_role_policy_template.replace("${PARTITIONURL}", partition_url)) + assume_role_policy = json.loads( + assume_role_policy_template.replace( + "${PARTITIONURL}", partition_url + ) + ) assume_role_policy["Statement"].append( - {"Effect": "Allow", "Principal": {"AWS": str(account_id)}, "Action": "sts:AssumeRole"} + { + "Effect": "Allow", + "Principal": {"AWS": str(account_id)}, + "Action": "sts:AssumeRole", + } ) my_iam.create_role( - RoleName=config_role_name, AssumeRolePolicyDocument=json.dumps(assume_role_policy), Path="/rdk/" + RoleName=config_role_name, + AssumeRolePolicyDocument=json.dumps(assume_role_policy), + Path="/rdk/", ) # attach role policy my_iam.attach_role_policy( - RoleName=config_role_name, PolicyArn="arn:" + partition + ":iam::aws:policy/service-role/AWS_ConfigRole" + RoleName=config_role_name, + PolicyArn="arn:" + + partition + + ":iam::aws:policy/service-role/AWS_ConfigRole", ) my_iam.attach_role_policy( - RoleName=config_role_name, PolicyArn="arn:" + partition + ":iam::aws:policy/ReadOnlyAccess" + RoleName=config_role_name, + PolicyArn="arn:" + partition + ":iam::aws:policy/ReadOnlyAccess", ) policy_template = open( - os.path.join(path.dirname(__file__), "template", delivery_permission_policy_file), "r" + os.path.join( + path.dirname(__file__), "template", delivery_permission_policy_file + ), + "r", ).read() - delivery_permissions_policy = policy_template.replace("${ACCOUNTID}", account_id).replace( - "${PARTITION}", partition - ) + delivery_permissions_policy = policy_template.replace( + "${ACCOUNTID}", account_id + ).replace("${PARTITION}", partition) my_iam.put_role_policy( RoleName=config_role_name, PolicyName="ConfigDeliveryPermissions", @@ -1184,30 +1379,42 @@ def init(self): # create or update config recorder if not config_role_arn: - config_role_arn = "arn:" + partition + ":iam::" + account_id + ":role/rdk/config-role" + config_role_arn = ( + "arn:" + partition + ":iam::" + account_id + ":role/rdk/config-role" + ) if not control_tower: my_config.put_configuration_recorder( ConfigurationRecorder={ "name": config_recorder_name, "roleARN": config_role_arn, - "recordingGroup": {"allSupported": True, "includeGlobalResourceTypes": True}, + "recordingGroup": { + "allSupported": True, + "includeGlobalResourceTypes": True, + }, } ) if not delivery_channel_exists: # create delivery channel - print(f"[{my_session.region_name}]: Creating delivery channel to bucket " + config_bucket_name) + print( + f"[{my_session.region_name}]: Creating delivery channel to bucket " + + config_bucket_name + ) my_config.put_delivery_channel( DeliveryChannel={ "name": "default", "s3BucketName": config_bucket_name, - "configSnapshotDeliveryProperties": {"deliveryFrequency": "Six_Hours"}, + "configSnapshotDeliveryProperties": { + "deliveryFrequency": "Six_Hours" + }, } ) # start config recorder - my_config.start_configuration_recorder(ConfigurationRecorderName=config_recorder_name) + my_config.start_configuration_recorder( + ConfigurationRecorderName=config_recorder_name + ) print(f"[{my_session.region_name}]: Config Service is ON") else: print( @@ -1217,26 +1424,39 @@ def init(self): print(f"[{my_session.region_name}]: Config setup complete.") # create code bucket - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) response = my_s3.list_buckets() bucket_exists = False for bucket in response["Buckets"]: if bucket["Name"] == code_bucket_name: bucket_exists = True - print(f"[{my_session.region_name}]: Found code bucket: " + code_bucket_name) + print( + f"[{my_session.region_name}]: Found code bucket: " + + code_bucket_name + ) if not bucket_exists: if self.args.skip_code_bucket_creation: - print(f"[{my_session.region_name}]: Skipping Code Bucket creation due to command line args") + print( + f"[{my_session.region_name}]: Skipping Code Bucket creation due to command line args" + ) else: - print(f"[{my_session.region_name}]: Creating Code bucket " + code_bucket_name) + print( + f"[{my_session.region_name}]: Creating Code bucket " + + code_bucket_name + ) # Consideration for us-east-1 S3 API if my_session.region_name == "us-east-1": my_s3.create_bucket(Bucket=code_bucket_name) else: my_s3.create_bucket( - Bucket=code_bucket_name, CreateBucketConfiguration={"LocationConstraint": my_session.region_name} + Bucket=code_bucket_name, + CreateBucketConfiguration={ + "LocationConstraint": my_session.region_name + }, ) return 0 @@ -1283,10 +1503,14 @@ def clean(self): try: # First delete the Config Recorder itself. Do we need to stop it first? Let's stop it just to be safe. my_config.stop_configuration_recorder( - ConfigurationRecorderName=recorders["ConfigurationRecorders"][0]["name"] + ConfigurationRecorderName=recorders["ConfigurationRecorders"][0][ + "name" + ] ) my_config.delete_configuration_recorder( - ConfigurationRecorderName=recorders["ConfigurationRecorders"][0]["name"] + ConfigurationRecorderName=recorders["ConfigurationRecorders"][0][ + "name" + ] ) except Exception as e: print("Error encountered removing Configuration Recorder: " + str(e)) @@ -1296,13 +1520,21 @@ def clean(self): try: response = iam_client.get_role(RoleName=config_role_name) try: - role_policy_results = iam_client.list_role_policies(RoleName=config_role_name) + role_policy_results = iam_client.list_role_policies( + RoleName=config_role_name + ) for policy_name in role_policy_results["PolicyNames"]: - iam_client.delete_role_policy(RoleName=config_role_name, PolicyName=policy_name) + iam_client.delete_role_policy( + RoleName=config_role_name, PolicyName=policy_name + ) - role_policy_results = iam_client.list_attached_role_policies(RoleName=config_role_name) + role_policy_results = iam_client.list_attached_role_policies( + RoleName=config_role_name + ) for policy in role_policy_results["AttachedPolicies"]: - iam_client.detach_role_policy(RoleName=config_role_name, PolicyArn=policy["PolicyArn"]) + iam_client.detach_role_policy( + RoleName=config_role_name, PolicyArn=policy["PolicyArn"] + ) # Once all policies are detached we should be able to delete the Role. iam_client.delete_role(RoleName=config_role_name) @@ -1315,11 +1547,17 @@ def clean(self): delivery_channels = my_config.describe_delivery_channels() if len(delivery_channels["DeliveryChannels"]) > 0: for delivery_channel in delivery_channels["DeliveryChannels"]: - config_bucket_names.append(delivery_channels["DeliveryChannels"][0]["s3BucketName"]) + config_bucket_names.append( + delivery_channels["DeliveryChannels"][0]["s3BucketName"] + ) try: - my_config.delete_delivery_channel(DeliveryChannelName=delivery_channel["name"]) + my_config.delete_delivery_channel( + DeliveryChannelName=delivery_channel["name"] + ) except Exception as e: - print("Error encountered trying to delete Delivery Channel: " + str(e)) + print( + "Error encountered trying to delete Delivery Channel: " + str(e) + ) if config_bucket_names: # empty and then delete the config bucket. @@ -1353,7 +1591,9 @@ def clean(self): print("Error encountered deleting Functions stack: " + str(e)) # Delete the code bucket, if one exists. - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) try: code_bucket = my_session.resource("s3").Bucket(code_bucket_name) code_bucket.objects.all().delete() @@ -1380,16 +1620,14 @@ def create(self): extension_mapping = { "java8": ".java", - "python3.6-managed": ".py", "python3.7": ".py", "python3.7-lib": ".py", "python3.8": ".py", "python3.8-lib": ".py", "python3.9": ".py", "python3.9-lib": ".py", - "nodejs6.10": ".js", - "dotnetcore1.0": "cs", - "dotnetcore2.0": "cs", + "python3.10": ".py", + "python3.10-lib": ".py", } if self.args.runtime not in extension_mapping: print("rdk does not support that runtime yet.") @@ -1411,8 +1649,6 @@ def create(self): # copy rule template into rule directory if self.args.runtime == "java8": self.__create_java_rule() - elif self.args.runtime in ["dotnetcore1.0", "dotnetcore2.0"]: - self.__create_dotnet_rule() else: src = os.path.join( path.dirname(__file__), @@ -1430,18 +1666,32 @@ def create(self): shutil.copyfile(src, dst) f = fileinput.input(files=dst, inplace=True) for line in f: - if self.args.runtime in ["python3.7-lib", "python3.8-lib", "python3.9-lib"]: + if self.args.runtime in [ + "python3.7-lib", + "python3.8-lib", + "python3.9-lib", + "python3.10-lib", + ]: if self.args.resource_types: applicable_resource_list = "" - for resource_type in self.args.resource_types.split(","): - applicable_resource_list += "'" + resource_type + "', " + for resource_type in self.args.resource_types.split( + "," + ): + applicable_resource_list += ( + "'" + resource_type + "', " + ) print( line.replace("<%RuleName%>", self.args.rulename) .replace( "<%ApplicableResources1%>", - "\nAPPLICABLE_RESOURCES = [" + applicable_resource_list[:-2] + "]\n", + "\nAPPLICABLE_RESOURCES = [" + + applicable_resource_list[:-2] + + "]\n", ) - .replace("<%ApplicableResources2%>", ", APPLICABLE_RESOURCES"), + .replace( + "<%ApplicableResources2%>", + ", APPLICABLE_RESOURCES", + ), end="", ) else: @@ -1452,7 +1702,9 @@ def create(self): end="", ) else: - print(line.replace("<%RuleName%>", self.args.rulename), end="") + print( + line.replace("<%RuleName%>", self.args.rulename), end="" + ) f.close() src = os.path.join( @@ -1467,12 +1719,16 @@ def create(self): os.getcwd(), rules_dir, self.args.rulename, - self.args.rulename + "_test" + extension_mapping[self.args.runtime], + self.args.rulename + + "_test" + + extension_mapping[self.args.runtime], ) shutil.copyfile(src, dst) f = fileinput.input(files=dst, inplace=True) for line in f: - print(line.replace("<%RuleName%>", self.args.rulename), end="") + print( + line.replace("<%RuleName%>", self.args.rulename), end="" + ) f.close() src = os.path.join( @@ -1548,10 +1804,18 @@ def modify(self): self.args.remediation_concurrent_execution_percent = ssm_controls.get( "ConcurrentExecutionRatePercentage", "" ) - self.args.remediation_error_rate_percent = ssm_controls.get("ErrorPercentage", "") - self.args.remediation_parameters = json.dumps(params["Parameters"]) if params.get("Parameters") else None - self.args.auto_remediation_retry_attempts = params.get("MaximumAutomaticAttempts", "") - self.args.auto_remediation_retry_time = params.get("RetryAttemptSeconds", "") + self.args.remediation_error_rate_percent = ssm_controls.get( + "ErrorPercentage", "" + ) + self.args.remediation_parameters = ( + json.dumps(params["Parameters"]) if params.get("Parameters") else None + ) + self.args.auto_remediation_retry_attempts = params.get( + "MaximumAutomaticAttempts", "" + ) + self.args.auto_remediation_retry_time = params.get( + "RetryAttemptSeconds", "" + ) self.args.remediation_action = params.get("TargetId", "") self.args.remediation_action_version = params.get("TargetVersion", "") @@ -1562,7 +1826,11 @@ def modify(self): # Write the parameters to a file in the rule directory. self.__populate_params() - print("Modified Rule '" + self.args.rulename + "'. Use the `deploy` command to push your changes to AWS.") + print( + "Modified Rule '" + + self.args.rulename + + "'. Use the `deploy` command to push your changes to AWS." + ) def undeploy(self): self.__parse_deploy_args(ForceArgument=True) @@ -1570,7 +1838,9 @@ def undeploy(self): if not self.args.force: confirmation = False while not confirmation: - my_input = input("Delete specified Rules and Lambda Functions from your AWS Account? (y/N): ") + my_input = input( + "Delete specified Rules and Lambda Functions from your AWS Account? (y/N): " + ) if my_input.lower() == "y": confirmation = True if my_input.lower() == "n" or my_input == "": @@ -1608,7 +1878,9 @@ def undeploy(self): for rule_name in rule_names: try: - cfn_client.delete_stack(StackName=self.__get_stack_name_from_rule_name(rule_name)) + cfn_client.delete_stack( + StackName=self.__get_stack_name_from_rule_name(rule_name) + ) deleted_stacks.append(self.__get_stack_name_from_rule_name(rule_name)) except ClientError as ce: print( @@ -1621,12 +1893,16 @@ def undeploy(self): + str(e) ) - print(f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete.") + print( + f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete." + ) for stack_name in deleted_stacks: self.__wait_for_cfn_stack(cfn_client, stack_name) - print(f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved.") + print( + f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved." + ) print(f"[{my_session.region_name}]: To re-deploy, use the 'deploy' command.") def undeploy_organization(self): @@ -1635,7 +1911,9 @@ def undeploy_organization(self): if not self.args.force: confirmation = False while not confirmation: - my_input = input("Delete specified Rules and Lambda Functions from your Organization? (y/N): ") + my_input = input( + "Delete specified Rules and Lambda Functions from your Organization? (y/N): " + ) if my_input.lower() == "y": confirmation = True if my_input.lower() == "n" or my_input == "": @@ -1673,7 +1951,9 @@ def undeploy_organization(self): for rule_name in rule_names: try: - cfn_client.delete_stack(StackName=self.__get_stack_name_from_rule_name(rule_name)) + cfn_client.delete_stack( + StackName=self.__get_stack_name_from_rule_name(rule_name) + ) deleted_stacks.append(self.__get_stack_name_from_rule_name(rule_name)) except ClientError as ce: print( @@ -1686,13 +1966,19 @@ def undeploy_organization(self): + str(e) ) - print(f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete.") + print( + f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete." + ) for stack_name in deleted_stacks: self.__wait_for_cfn_stack(cfn_client, stack_name) - print(f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved.") - print(f"[{my_session.region_name}]: To re-deploy, use the 'deploy-organization' command.") + print( + f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved." + ) + print( + f"[{my_session.region_name}]: To re-deploy, use the 'deploy-organization' command." + ) def deploy(self): self.__parse_deploy_args() @@ -1713,7 +1999,9 @@ def deploy(self): if self.args.custom_code_bucket: code_bucket_name = self.args.custom_code_bucket else: - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) # If we're only deploying the Lambda functions (and role + permissions), branch here. Someday the "main" execution path should use the same generated CFN templates for single-account deployment. if self.args.functions_only: @@ -1741,7 +2029,9 @@ def deploy(self): for rule_name in rule_names: rule_params, cfn_tags = self.__get_rule_parameters(rule_name) if "SourceIdentifier" in rule_params: - print(f"[{my_session.region_name}]: Skipping code packaging for Managed Rule.") + print( + f"[{my_session.region_name}]: Skipping code packaging for Managed Rule." + ) else: s3_dst = self.__upload_function_code( rule_name, rule_params, account_id, my_session, code_bucket_name @@ -1754,7 +2044,12 @@ def deploy(self): config = my_s3_client._client_config config.signature_version = botocore.UNSIGNED template_url = boto3.client("s3", config=config).generate_presigned_url( - "get_object", ExpiresIn=0, Params={"Bucket": code_bucket_name, "Key": self.args.stack_name + ".json"} + "get_object", + ExpiresIn=0, + Params={ + "Bucket": code_bucket_name, + "Key": self.args.stack_name + ".json", + }, ) # Check if stack exists. If it does, update it. If it doesn't, create it. @@ -1763,9 +2058,10 @@ def deploy(self): my_stack = my_cfn.describe_stacks(StackName=self.args.stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for Lambda functions.") + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for Lambda functions." + ) try: - cfn_args = { "StackName": self.args.stack_name, "TemplateURL": template_url, @@ -1785,7 +2081,9 @@ def deploy(self): if e.response["Error"]["Code"] == "ValidationError": if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule configurations.") + print( + f"[{my_session.region_name}]: No changes to Config Rule configurations." + ) else: # Something unexpected has gone wrong. Emit an error and bail. print(f"[{my_session.region_name}]: {e}") @@ -1797,10 +2095,16 @@ def deploy(self): for rule_name in rule_names: rule_params, cfn_tags = self.__get_rule_parameters(rule_name) my_lambda_arn = self.__get_lambda_arn_for_rule( - rule_name, partition, my_session.region_name, account_id, rule_params + rule_name, + partition, + my_session.region_name, + account_id, + rule_params, ) if "SourceIdentifier" in rule_params: - print(f"[{my_session.region_name}]: Skipping Lambda upload for Managed Rule.") + print( + f"[{my_session.region_name}]: Skipping Lambda upload for Managed Rule." + ) continue print(f"[{my_session.region_name}]: Publishing Lambda code...") @@ -1812,9 +2116,11 @@ def deploy(self): Publish=True, ) print(f"[{my_session.region_name}]: Lambda code updated.") - except ClientError as e: + except ClientError: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for Lambda Functions.") + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for Lambda Functions." + ) cfn_args = { "StackName": self.args.stack_name, @@ -1850,7 +2156,9 @@ def deploy(self): combined_input_parameters = {} if "InputParameters" in rule_params: - combined_input_parameters.update(json.loads(rule_params["InputParameters"])) + combined_input_parameters.update( + json.loads(rule_params["InputParameters"]) + ) if "OptionalParameters" in rule_params: # Remove empty parameters @@ -1892,65 +2200,112 @@ def deploy(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceIdentifier", "ParameterValue": rule_params["SourceIdentifier"]}, + { + "ParameterKey": "SourceIdentifier", + "ParameterValue": rule_params["SourceIdentifier"], + }, ] my_cfn = my_session.client("cloudformation") if "Remediation" in rule_params: - print(f"[{my_session.region_name}]: Build The CFN Template with Remediation Settings") - cfn_body = os.path.join(path.dirname(__file__), "template", "configManagedRuleWithRemediation.json") + print( + f"[{my_session.region_name}]: Build The CFN Template with Remediation Settings" + ) + cfn_body = os.path.join( + path.dirname(__file__), + "template", + "configManagedRuleWithRemediation.json", + ) template_body = open(cfn_body, "r").read() json_body = json.loads(template_body) - remediation = self.__create_remediation_cloudformation_block(rule_params["Remediation"]) + remediation = self.__create_remediation_cloudformation_block( + rule_params["Remediation"] + ) json_body["Resources"]["Remediation"] = remediation if "SSMAutomation" in rule_params: # Reference the SSM Automation Role Created, if IAM is created - print(f"[{my_session.region_name}]: Building SSM Automation Section") + print( + f"[{my_session.region_name}]: Building SSM Automation Section" + ) ssm_automation = self.__create_automation_cloudformation_block( - rule_params["SSMAutomation"], self.__get_alphanumeric_rule_name(rule_name) + rule_params["SSMAutomation"], + self.__get_alphanumeric_rule_name(rule_name), ) json_body["Resources"][ - self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) ] = ssm_automation if "IAM" in rule_params["SSMAutomation"]: - print(f"[{my_session.region_name}]: Lets Build IAM Role and Policy") + print( + f"[{my_session.region_name}]: Lets Build IAM Role and Policy" + ) # TODO Check For IAM Settings - json_body["Resources"]["Remediation"]["Properties"]["Parameters"]["AutomationAssumeRole"][ - "StaticValue" - ]["Values"] = [ - {"Fn::GetAtt": [self.__get_alphanumeric_rule_name(rule_name + "Role"), "Arn"]} + json_body["Resources"]["Remediation"]["Properties"][ + "Parameters" + ]["AutomationAssumeRole"]["StaticValue"]["Values"] = [ + { + "Fn::GetAtt": [ + self.__get_alphanumeric_rule_name( + rule_name + "Role" + ), + "Arn", + ] + } ] - ssm_iam_role, ssm_iam_policy = self.__create_automation_iam_cloudformation_block( - rule_params["SSMAutomation"], self.__get_alphanumeric_rule_name(rule_name) + ( + ssm_iam_role, + ssm_iam_policy, + ) = self.__create_automation_iam_cloudformation_block( + rule_params["SSMAutomation"], + self.__get_alphanumeric_rule_name(rule_name), ) - json_body["Resources"][self.__get_alphanumeric_rule_name(rule_name + "Role")] = ssm_iam_role + json_body["Resources"][ + self.__get_alphanumeric_rule_name(rule_name + "Role") + ] = ssm_iam_role json_body["Resources"][ self.__get_alphanumeric_rule_name(rule_name + "Policy") ] = ssm_iam_policy - print(f"[{my_session.region_name}]: Build Supporting SSM Resources") + print( + f"[{my_session.region_name}]: Build Supporting SSM Resources" + ) resource_depends_on = [ "rdkConfigRule", - self.__get_alphanumeric_rule_name(rule_name + "RemediationAction"), + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ), ] # Builds SSM Document Before Config RUle - json_body["Resources"]["Remediation"]["DependsOn"] = resource_depends_on - json_body["Resources"]["Remediation"]["Properties"]["TargetId"] = { - "Ref": self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") + json_body["Resources"]["Remediation"][ + "DependsOn" + ] = resource_depends_on + json_body["Resources"]["Remediation"]["Properties"][ + "TargetId" + ] = { + "Ref": self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) } try: my_stack_name = self.__get_stack_name_from_rule_name(rule_name) my_stack = my_cfn.describe_stacks(StackName=my_stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for " + + rule_name + ) try: cfn_args = { "StackName": my_stack_name, "TemplateBody": json.dumps(json_body, indent=2), "Parameters": my_params, - "Capabilities": ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], + "Capabilities": [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + ], } # If no tags key is specified, or if the tags dict is empty @@ -1962,23 +2317,31 @@ def deploy(self): if e.response["Error"]["Code"] == "ValidationError": if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule.") + print( + f"[{my_session.region_name}]: No changes to Config Rule." + ) else: # Something unexpected has gone wrong. Emit an error and bail. print(f"[{my_session.region_name}]: {e}") return 1 else: raise - except ClientError as e: + except ClientError: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for " + + rule_name + ) if "Remediation" in rule_params: cfn_args = { "StackName": my_stack_name, "TemplateBody": json.dumps(json_body, indent=2), "Parameters": my_params, - "Capabilities": ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], + "Capabilities": [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + ], } else: @@ -1999,13 +2362,18 @@ def deploy(self): else: # deploy config rule - cfn_body = os.path.join(path.dirname(__file__), "template", "configManagedRule.json") + cfn_body = os.path.join( + path.dirname(__file__), "template", "configManagedRule.json" + ) try: my_stack_name = self.__get_stack_name_from_rule_name(rule_name) my_stack = my_cfn.describe_stacks(StackName=my_stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for " + + rule_name + ) try: cfn_args = { "StackName": my_stack_name, @@ -2022,7 +2390,9 @@ def deploy(self): if e.response["Error"]["Code"] == "ValidationError": if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule.") + print( + f"[{my_session.region_name}]: No changes to Config Rule." + ) else: # Something unexpected has gone wrong. Emit an error and bail. print(f"[{my_session.region_name}]: {e}") @@ -2031,7 +2401,10 @@ def deploy(self): raise except ClientError as e: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for " + + rule_name + ) cfn_args = { "StackName": my_stack_name, "TemplateBody": open(cfn_body, "r").read(), @@ -2055,20 +2428,31 @@ def deploy(self): print(f"[{my_session.region_name}]: Found Custom Rule.") s3_src = "" - s3_dst = self.__upload_function_code(rule_name, rule_params, account_id, my_session, code_bucket_name) + s3_dst = self.__upload_function_code( + rule_name, rule_params, account_id, my_session, code_bucket_name + ) # create CFN Parameters for Custom Rules lambdaRoleArn = "" if self.args.lambda_role_arn: - print(f"[{my_session.region_name}]: Existing IAM Role provided: " + self.args.lambda_role_arn) + print( + f"[{my_session.region_name}]: Existing IAM Role provided: " + + self.args.lambda_role_arn + ) lambdaRoleArn = self.args.lambda_role_arn elif self.args.lambda_role_name: - print(f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + self.args.lambda_role_name) + print( + f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + + self.args.lambda_role_name + ) arn = f"arn:{partition}:iam::{account_id}:role/{self.args.lambda_role_name}" lambdaRoleArn = arn if self.args.boundary_policy_arn: - print(f"[{my_session.region_name}]: Boundary Policy provided: " + self.args.boundary_policy_arn) + print( + f"[{my_session.region_name}]: Boundary Policy provided: " + + self.args.boundary_policy_arn + ) boundaryPolicyArn = self.args.boundary_policy_arn else: boundaryPolicyArn = "" @@ -2123,8 +2507,14 @@ def deploy(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceHandler", "ParameterValue": self.__get_handler(rule_name, rule_params)}, - {"ParameterKey": "Timeout", "ParameterValue": str(self.args.lambda_timeout)}, + { + "ParameterKey": "SourceHandler", + "ParameterValue": self.__get_handler(rule_name, rule_params), + }, + { + "ParameterKey": "Timeout", + "ParameterValue": str(self.args.lambda_timeout), + }, ] layers = self.__get_lambda_layers(my_session, self.args, rule_params) @@ -2133,55 +2523,89 @@ def deploy(self): layers.extend(additional_layers) if layers: - my_params.append({"ParameterKey": "Layers", "ParameterValue": ",".join(layers)}) + my_params.append( + {"ParameterKey": "Layers", "ParameterValue": ",".join(layers)} + ) if self.args.lambda_security_groups and self.args.lambda_subnets: my_params.append( - {"ParameterKey": "SecurityGroupIds", "ParameterValue": self.args.lambda_security_groups} + { + "ParameterKey": "SecurityGroupIds", + "ParameterValue": self.args.lambda_security_groups, + } + ) + my_params.append( + { + "ParameterKey": "SubnetIds", + "ParameterValue": self.args.lambda_subnets, + } ) - my_params.append({"ParameterKey": "SubnetIds", "ParameterValue": self.args.lambda_subnets}) # create json of CFN template - cfn_body = os.path.join(path.dirname(__file__), "template", "configRule.json") + cfn_body = os.path.join( + path.dirname(__file__), "template", "configRule.json" + ) template_body = open(cfn_body, "r").read() json_body = json.loads(template_body) remediation = "" if "Remediation" in rule_params: - remediation = self.__create_remediation_cloudformation_block(rule_params["Remediation"]) + remediation = self.__create_remediation_cloudformation_block( + rule_params["Remediation"] + ) json_body["Resources"]["Remediation"] = remediation if "SSMAutomation" in rule_params: ##AWS needs to build the SSM before the Config Rule resource_depends_on = [ "rdkConfigRule", - self.__get_alphanumeric_rule_name(rule_name + "RemediationAction"), + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ), ] remediation["DependsOn"] = resource_depends_on # Add JSON Reference to SSM Document { "Ref" : "MyEC2Instance" } remediation["Properties"]["TargetId"] = { - "Ref": self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") + "Ref": self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) } if "SSMAutomation" in rule_params: print(f"[{my_session.region_name}]: Building SSM Automation Section") - ssm_automation = self.__create_automation_cloudformation_block(rule_params["SSMAutomation"], rule_name) + ssm_automation = self.__create_automation_cloudformation_block( + rule_params["SSMAutomation"], rule_name + ) json_body["Resources"][ self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") ] = ssm_automation if "IAM" in rule_params["SSMAutomation"]: print("Lets Build IAM Role and Policy") # TODO Check For IAM Settings - json_body["Resources"]["Remediation"]["Properties"]["Parameters"]["AutomationAssumeRole"][ - "StaticValue" - ]["Values"] = [{"Fn::GetAtt": [self.__get_alphanumeric_rule_name(rule_name + "Role"), "Arn"]}] + json_body["Resources"]["Remediation"]["Properties"]["Parameters"][ + "AutomationAssumeRole" + ]["StaticValue"]["Values"] = [ + { + "Fn::GetAtt": [ + self.__get_alphanumeric_rule_name(rule_name + "Role"), + "Arn", + ] + } + ] - ssm_iam_role, ssm_iam_policy = self.__create_automation_iam_cloudformation_block( + ( + ssm_iam_role, + ssm_iam_policy, + ) = self.__create_automation_iam_cloudformation_block( rule_params["SSMAutomation"], rule_name ) - json_body["Resources"][self.__get_alphanumeric_rule_name(rule_name + "Role")] = ssm_iam_role - json_body["Resources"][self.__get_alphanumeric_rule_name(rule_name + "Policy")] = ssm_iam_policy + json_body["Resources"][ + self.__get_alphanumeric_rule_name(rule_name + "Role") + ] = ssm_iam_role + json_body["Resources"][ + self.__get_alphanumeric_rule_name(rule_name + "Policy") + ] = ssm_iam_policy # debugging # print(json.dumps(json_body, indent=2)) @@ -2192,7 +2616,10 @@ def deploy(self): my_stack_name = self.__get_stack_name_from_rule_name(rule_name) my_stack = my_cfn.describe_stacks(StackName=my_stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for " + + rule_name + ) try: cfn_args = { "StackName": my_stack_name, @@ -2208,14 +2635,21 @@ def deploy(self): response = my_cfn.update_stack(**cfn_args) except ClientError as e: if e.response["Error"]["Code"] == "ValidationError": - if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule.") + print( + f"[{my_session.region_name}]: No changes to Config Rule." + ) else: # Something unexpected has gone wrong. Emit an error and bail. - print(f"[{my_session.region_name}]: Validation Error on CFN\n") - print(f"[{my_session.region_name}]: " + json.dumps(cfn_args) + "\n") + print( + f"[{my_session.region_name}]: Validation Error on CFN\n" + ) + print( + f"[{my_session.region_name}]: " + + json.dumps(cfn_args) + + "\n" + ) print(f"[{my_session.region_name}]: {e}\n") return 1 else: @@ -2226,12 +2660,18 @@ def deploy(self): print(f"[{my_session.region_name}]: Publishing Lambda code...") my_lambda_client = my_session.client("lambda") my_lambda_client.update_function_code( - FunctionName=my_lambda_arn, S3Bucket=code_bucket_name, S3Key=s3_dst, Publish=True + FunctionName=my_lambda_arn, + S3Bucket=code_bucket_name, + S3Key=s3_dst, + Publish=True, ) print(f"[{my_session.region_name}]: Lambda code updated.") except ClientError as e: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for " + + rule_name + ) cfn_args = { "StackName": my_stack_name, "TemplateBody": json.dumps(json_body, indent=2), @@ -2275,7 +2715,9 @@ def deploy_organization(self): if self.args.custom_code_bucket: code_bucket_name = self.args.custom_code_bucket else: - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) # If we're only deploying the Lambda functions (and role + permissions), branch here. Someday the "main" execution path should use the same generated CFN templates for single-account deployment. if self.args.functions_only: @@ -2302,7 +2744,9 @@ def deploy_organization(self): combined_input_parameters = {} if "InputParameters" in rule_params: - combined_input_parameters.update(json.loads(rule_params["InputParameters"])) + combined_input_parameters.update( + json.loads(rule_params["InputParameters"]) + ) if "OptionalParameters" in rule_params: # Remove empty parameters @@ -2344,12 +2788,19 @@ def deploy_organization(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceIdentifier", "ParameterValue": rule_params["SourceIdentifier"]}, + { + "ParameterKey": "SourceIdentifier", + "ParameterValue": rule_params["SourceIdentifier"], + }, ] my_cfn = my_session.client("cloudformation") # deploy config rule - cfn_body = os.path.join(path.dirname(__file__), "template", "configManagedRuleOrganization.json") + cfn_body = os.path.join( + path.dirname(__file__), + "template", + "configManagedRuleOrganization.json", + ) try: my_stack_name = self.__get_stack_name_from_rule_name(rule_name) @@ -2407,7 +2858,9 @@ def deploy_organization(self): print("Found Custom Rule.") s3_src = "" - s3_dst = self.__upload_function_code(rule_name, rule_params, account_id, my_session, code_bucket_name) + s3_dst = self.__upload_function_code( + rule_name, rule_params, account_id, my_session, code_bucket_name + ) # create CFN Parameters for Custom Rules lambdaRoleArn = "" @@ -2415,7 +2868,10 @@ def deploy_organization(self): print("Existing IAM Role provided: " + self.args.lambda_role_arn) lambdaRoleArn = self.args.lambda_role_arn elif self.args.lambda_role_name: - print(f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + self.args.lambda_role_name) + print( + f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + + self.args.lambda_role_name + ) arn = f"arn:{partition}:iam::{account_id}:role/{self.args.lambda_role_name}" lambdaRoleArn = arn @@ -2475,8 +2931,14 @@ def deploy_organization(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceHandler", "ParameterValue": self.__get_handler(rule_name, rule_params)}, - {"ParameterKey": "Timeout", "ParameterValue": str(self.args.lambda_timeout)}, + { + "ParameterKey": "SourceHandler", + "ParameterValue": self.__get_handler(rule_name, rule_params), + }, + { + "ParameterKey": "Timeout", + "ParameterValue": str(self.args.lambda_timeout), + }, ] layers = self.__get_lambda_layers(my_session, self.args, rule_params) @@ -2485,16 +2947,28 @@ def deploy_organization(self): layers.extend(additional_layers) if layers: - my_params.append({"ParameterKey": "Layers", "ParameterValue": ",".join(layers)}) + my_params.append( + {"ParameterKey": "Layers", "ParameterValue": ",".join(layers)} + ) if self.args.lambda_security_groups and self.args.lambda_subnets: my_params.append( - {"ParameterKey": "SecurityGroupIds", "ParameterValue": self.args.lambda_security_groups} + { + "ParameterKey": "SecurityGroupIds", + "ParameterValue": self.args.lambda_security_groups, + } + ) + my_params.append( + { + "ParameterKey": "SubnetIds", + "ParameterValue": self.args.lambda_subnets, + } ) - my_params.append({"ParameterKey": "SubnetIds", "ParameterValue": self.args.lambda_subnets}) # create json of CFN template - cfn_body = os.path.join(path.dirname(__file__), "template", "configRuleOrganization.json") + cfn_body = os.path.join( + path.dirname(__file__), "template", "configRuleOrganization.json" + ) template_body = open(cfn_body, "r").read() json_body = json.loads(template_body) @@ -2523,7 +2997,6 @@ def deploy_organization(self): response = my_cfn.update_stack(**cfn_args) except ClientError as e: if e.response["Error"]["Code"] == "ValidationError": - if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. print("No changes to Config Rule.") @@ -2541,7 +3014,10 @@ def deploy_organization(self): print("Publishing Lambda code...") my_lambda_client = my_session.client("lambda") my_lambda_client.update_function_code( - FunctionName=my_lambda_arn, S3Bucket=code_bucket_name, S3Key=s3_dst, Publish=True + FunctionName=my_lambda_arn, + S3Bucket=code_bucket_name, + S3Key=s3_dst, + Publish=True, ) print("Lambda code updated.") except ClientError as e: @@ -2573,7 +3049,6 @@ def deploy_organization(self): return 0 def export(self): - self.__parse_export_args() # get the rule names @@ -2600,7 +3075,9 @@ def export(self): combined_input_parameters = {} if "InputParameters" in rule_params: - combined_input_parameters.update(json.loads(rule_params["InputParameters"])) + combined_input_parameters.update( + json.loads(rule_params["InputParameters"]) + ) if "OptionalParameters" in rule_params: # Remove empty parameters @@ -2654,22 +3131,36 @@ def export(self): "lambda_timeout": str(self.args.lambda_timeout), } - params_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + ".tfvars.json") + params_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, rule_name.lower() + ".tfvars.json" + ) parameters_file = open(params_file_path, "w") json.dump(my_params, parameters_file, indent=4) parameters_file.close() # create json of CFN template print(self.args.format + " version: " + self.args.version) tf_file_body = os.path.join( - path.dirname(__file__), "template", self.args.format, self.args.version, "config_rule.tf" + path.dirname(__file__), + "template", + self.args.format, + self.args.version, + "config_rule.tf", + ) + tf_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_rule.tf" ) - tf_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_rule.tf") shutil.copy(tf_file_body, tf_file_path) variables_file_body = os.path.join( - path.dirname(__file__), "template", self.args.format, self.args.version, "variables.tf" + path.dirname(__file__), + "template", + self.args.format, + self.args.version, + "variables.tf", + ) + variables_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_variables.tf" ) - variables_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_variables.tf") shutil.copy(variables_file_body, variables_file_path) print("Export completed.This will generate three .tf files.") @@ -2691,8 +3182,14 @@ def test_local(self): "python3.8-lib", "python3.9", "python3.9-lib", + "python3.10", + "python3.10-lib", ): - print("Skipping " + rule_name + " - Runtime not supported for local testing.") + print( + "Skipping " + + rule_name + + " - Runtime not supported for local testing." + ) continue print("Testing " + rule_name) @@ -2700,9 +3197,13 @@ def test_local(self): print("Looking for tests in " + test_dir) if args.verbose == True: - results = unittest.TextTestRunner(buffer=False, verbosity=2).run(self.__create_test_suite(test_dir)) + results = unittest.TextTestRunner(buffer=False, verbosity=2).run( + self.__create_test_suite(test_dir) + ) else: - results = unittest.TextTestRunner(buffer=True, verbosity=2).run(self.__create_test_suite(test_dir)) + results = unittest.TextTestRunner(buffer=True, verbosity=2).run( + self.__create_test_suite(test_dir) + ) print(results) @@ -2735,11 +3236,19 @@ def test_remote(self): # Generate test event from templates test_event = json.load( - open(os.path.join(path.dirname(__file__), "template", event_template_filename), "r"), strict=False + open( + os.path.join( + path.dirname(__file__), "template", event_template_filename + ), + "r", + ), + strict=False, ) my_invoking_event = json.loads(test_event["invokingEvent"]) my_invoking_event["configurationItem"] = my_ci - my_invoking_event["notificationCreationTime"] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z") + my_invoking_event[ + "notificationCreationTime" + ] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z") test_event["invokingEvent"] = json.dumps(my_invoking_event) test_event["ruleParameters"] = json.dumps(my_parameters) @@ -2792,11 +3301,15 @@ def logs(self): logGroupName=log_group_name, orderBy="LastEventTime", descending=True, - limit=int(self.args.number), # This is the worst-case scenario if there is only one event per stream + limit=int( + self.args.number + ), # This is the worst-case scenario if there is only one event per stream ) # Sadly we can't just use filter_log_events, since we don't know the timestamps yet and filter_log_events doesn't appear to support ordering. - my_events = self.__get_log_events(cw_logs, log_streams, int(self.args.number)) + my_events = self.__get_log_events( + cw_logs, log_streams, int(self.args.number) + ) latest_timestamp = 0 @@ -2841,8 +3354,12 @@ def logs(self): def rulesets(self): self.args = get_rulesets_parser().parse_args(self.args.command_args, self.args) - if self.args.subcommand in ["add", "remove"] and (not self.args.ruleset or not self.args.rulename): - print("You must specify a ruleset name and a rule for the `add` and `remove` commands.") + if self.args.subcommand in ["add", "remove"] and ( + not self.args.ruleset or not self.args.rulename + ): + print( + "You must specify a ruleset name and a rule for the `add` and `remove` commands." + ) return 1 if self.args.subcommand == "list": @@ -2855,7 +3372,9 @@ def rulesets(self): print("Unknown subcommand.") def create_terraform_template(self): - self.args = get_create_rule_template_parser().parse_args(self.args.command_args, self.args) + self.args = get_create_rule_template_parser().parse_args( + self.args.command_args, self.args + ) if self.args.rulesets: self.args.rulesets = self.args.rulesets.split(",") @@ -2877,7 +3396,9 @@ def create_terraform_template(self): print("CloudFormation template written to " + self.args.output_file) def create_rule_template(self): - self.args = get_create_rule_template_parser().parse_args(self.args.command_args, self.args) + self.args = get_create_rule_template_parser().parse_args( + self.args.command_args, self.args + ) if self.args.rulesets: self.args.rulesets = self.args.rulesets.split(",") @@ -2899,7 +3420,9 @@ def create_rule_template(self): parameters = {} parameters["LambdaAccountId"] = {} - parameters["LambdaAccountId"]["Description"] = "Account ID that contains Lambda functions for Config Rules." + parameters["LambdaAccountId"][ + "Description" + ] = "Account ID that contains Lambda functions for Config Rules." parameters["LambdaAccountId"]["Type"] = "String" parameters["LambdaAccountId"]["MinLength"] = "12" parameters["LambdaAccountId"]["MaxLength"] = "12" @@ -2916,17 +3439,29 @@ def create_rule_template(self): "RoleName": config_role_name, "Path": "/rdk/", "ManagedPolicyArns": [ - {"Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWS_ConfigRole"}, + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWS_ConfigRole" + }, {"Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/ReadOnlyAccess"}, ], "AssumeRolePolicyDocument": CONFIG_ROLE_ASSUME_ROLE_POLICY_DOCUMENT, - "Policies": [{"PolicyName": "DeliveryPermission", "PolicyDocument": CONFIG_ROLE_POLICY_DOCUMENT}], + "Policies": [ + { + "PolicyName": "DeliveryPermission", + "PolicyDocument": CONFIG_ROLE_POLICY_DOCUMENT, + } + ], } # Create Bucket for Config Data resources["ConfigBucket"] = { "Type": "AWS::S3::Bucket", - "Properties": {"BucketName": {"Fn::Sub": config_bucket_prefix + "-${AWS::AccountId}-${AWS::Region}"}}, + "Properties": { + "BucketName": { + "Fn::Sub": config_bucket_prefix + + "-${AWS::AccountId}-${AWS::Region}" + } + }, } # Create ConfigurationRecorder and DeliveryChannel @@ -2935,18 +3470,25 @@ def create_rule_template(self): "Properties": { "Name": "default", "RoleARN": {"Fn::GetAtt": ["ConfigRole", "Arn"]}, - "RecordingGroup": {"AllSupported": True, "IncludeGlobalResourceTypes": True}, + "RecordingGroup": { + "AllSupported": True, + "IncludeGlobalResourceTypes": True, + }, }, } if self.args.config_role_arn: - resources["ConfigurationRecorder"]["Properties"]["RoleARN"] = self.args.config_role_arn + resources["ConfigurationRecorder"]["Properties"][ + "RoleARN" + ] = self.args.config_role_arn resources["DeliveryChannel"] = { "Type": "AWS::Config::DeliveryChannel", "Properties": { "Name": "default", "S3BucketName": {"Ref": "ConfigBucket"}, - "ConfigSnapshotDeliveryProperties": {"DeliveryFrequency": "One_Hour"}, + "ConfigSnapshotDeliveryProperties": { + "DeliveryFrequency": "One_Hour" + }, }, } @@ -2958,7 +3500,10 @@ def create_rule_template(self): for input_param in input_params: cfn_param = {} cfn_param["Description"] = ( - "Pass-through to required Input Parameter " + input_param + " for Config Rule " + rule_name + "Pass-through to required Input Parameter " + + input_param + + " for Config Rule " + + rule_name ) if len(str(input_params[input_param]).strip()) == 0: default = "" @@ -2978,17 +3523,24 @@ def create_rule_template(self): for optional_param in optional_params: cfn_param = {} cfn_param["Description"] = ( - "Pass-through to optional Input Parameter " + optional_param + " for Config Rule " + rule_name + "Pass-through to optional Input Parameter " + + optional_param + + " for Config Rule " + + rule_name ) cfn_param["Default"] = optional_params[optional_param] cfn_param["Type"] = "String" - param_name = self.__get_alphanumeric_rule_name(rule_name) + optional_param + param_name = ( + self.__get_alphanumeric_rule_name(rule_name) + optional_param + ) parameters[param_name] = cfn_param optional_parameter_group["Parameters"].append(param_name) - conditions[param_name] = {"Fn::Not": [{"Fn::Equals": ["", {"Ref": param_name}]}]} + conditions[param_name] = { + "Fn::Not": [{"Fn::Equals": ["", {"Ref": param_name}]}] + } config_rule = {} config_rule["Type"] = "AWS::Config::ConfigRule" @@ -3013,7 +3565,10 @@ def create_rule_template(self): # Also add the appropriate event source. source["SourceDetails"].append( - {"EventSource": "aws.config", "MessageType": "ConfigurationItemChangeNotification"} + { + "EventSource": "aws.config", + "MessageType": "ConfigurationItemChangeNotification", + } ) if "SourcePeriodic" in params: source["SourceDetails"].append( @@ -3045,56 +3600,102 @@ def create_rule_template(self): if "InputParameters" in params: for required_param in json.loads(params["InputParameters"]): - cfn_param_name = self.__get_alphanumeric_rule_name(rule_name) + required_param - properties["InputParameters"][required_param] = {"Ref": cfn_param_name} + cfn_param_name = ( + self.__get_alphanumeric_rule_name(rule_name) + required_param + ) + properties["InputParameters"][required_param] = { + "Ref": cfn_param_name + } if "OptionalParameters" in params: for optional_param in json.loads(params["OptionalParameters"]): - cfn_param_name = self.__get_alphanumeric_rule_name(rule_name) + optional_param + cfn_param_name = ( + self.__get_alphanumeric_rule_name(rule_name) + optional_param + ) properties["InputParameters"][optional_param] = { - "Fn::If": [cfn_param_name, {"Ref": cfn_param_name}, {"Ref": "AWS::NoValue"}] + "Fn::If": [ + cfn_param_name, + {"Ref": cfn_param_name}, + {"Ref": "AWS::NoValue"}, + ] } config_rule["Properties"] = properties - config_rule_resource_name = self.__get_alphanumeric_rule_name(rule_name) + "ConfigRule" + config_rule_resource_name = ( + self.__get_alphanumeric_rule_name(rule_name) + "ConfigRule" + ) resources[config_rule_resource_name] = config_rule # If Remediation create the remediation section with potential links to the SSM Details if "Remediation" in params: - remediation = self.__create_remediation_cloudformation_block(params["Remediation"]) + remediation = self.__create_remediation_cloudformation_block( + params["Remediation"] + ) remediation["DependsOn"] = [config_rule_resource_name] if not self.args.rules_only: remediation["DependsOn"].append("ConfigRole") if "SSMAutomation" in params: - ssm_automation = self.__create_automation_cloudformation_block(params["SSMAutomation"], rule_name) + ssm_automation = self.__create_automation_cloudformation_block( + params["SSMAutomation"], rule_name + ) # AWS needs to build the SSM before the Config Rule - remediation["DependsOn"].append(self.__get_alphanumeric_rule_name(rule_name + "RemediationAction")) + remediation["DependsOn"].append( + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) + ) # Add JSON Reference to SSM Document { "Ref" : "MyEC2Instance" } remediation["Properties"]["TargetId"] = { - "Ref": self.__get_alphanumeric_rule_name(rule_name) + "RemediationAction" + "Ref": self.__get_alphanumeric_rule_name(rule_name) + + "RemediationAction" } if "IAM" in params["SSMAutomation"]: print("Lets Build IAM Role and Policy For the SSM Document") - ssm_iam_role, ssm_iam_policy = self.__create_automation_iam_cloudformation_block( + ( + ssm_iam_role, + ssm_iam_policy, + ) = self.__create_automation_iam_cloudformation_block( params["SSMAutomation"], rule_name ) - resources[self.__get_alphanumeric_rule_name(rule_name + "Role")] = ssm_iam_role - resources[self.__get_alphanumeric_rule_name(rule_name + "Policy")] = ssm_iam_policy - remediation["Properties"]["Parameters"]["AutomationAssumeRole"]["StaticValue"]["Values"] = [ - {"Fn::GetAtt": [self.__get_alphanumeric_rule_name(rule_name + "Role"), "Arn"]} + resources[ + self.__get_alphanumeric_rule_name(rule_name + "Role") + ] = ssm_iam_role + resources[ + self.__get_alphanumeric_rule_name(rule_name + "Policy") + ] = ssm_iam_policy + remediation["Properties"]["Parameters"]["AutomationAssumeRole"][ + "StaticValue" + ]["Values"] = [ + { + "Fn::GetAtt": [ + self.__get_alphanumeric_rule_name( + rule_name + "Role" + ), + "Arn", + ] + } ] # Override the placeholder to associate the SSM Document Role with newly crafted role - resources[self.__get_alphanumeric_rule_name(rule_name + "RemediationAction")] = ssm_automation - resources[self.__get_alphanumeric_rule_name(rule_name) + "Remediation"] = remediation + resources[ + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) + ] = ssm_automation + resources[ + self.__get_alphanumeric_rule_name(rule_name) + "Remediation" + ] = remediation if tags: tags_str = "" for tag in tags: - tags_str += "Key={},Value={} ".format(tag["Key"], tag["Value"]) - script_for_tag += "aws configservice tag-resource --resources-arn $(aws configservice describe-config-rules --config-rule-names {} --query 'ConfigRules[0].ConfigRuleArn' | tr -d '\"') --tags {} \n".format( - rule_name, tags_str + key = tag["Key"] + val = tag["Value"] + tags_str += f"Key={key},Value={val} " + script_for_tag += ( + "aws configservice tag-resource --resources-arn $(aws configservice describe-config-rules " + + f"--config-rule-names {rule_name} --query 'ConfigRules[0].ConfigRuleArn' | tr -d '\"') --tags {tags_str} \n" ) template["Resources"] = resources @@ -3103,7 +3704,10 @@ def create_rule_template(self): template["Metadata"] = { "AWS::CloudFormation::Interface": { "ParameterGroups": [ - {"Label": {"default": "Lambda Account ID"}, "Parameters": ["LambdaAccountId"]}, + { + "Label": {"default": "Lambda Account ID"}, + "Parameters": ["LambdaAccountId"], + }, required_parameter_group, optional_parameter_group, ], @@ -3120,7 +3724,9 @@ def create_rule_template(self): print("CloudFormation template written to " + self.args.output_file) if script_for_tag: - print("Found tags on config rules. Cloudformation do not support tagging config rule at the moment") + print( + "Found tags on config rules. Cloudformation do not support tagging config rule at the moment" + ) print("Generating script for config rules tags") script_for_tag = "#! /bin/bash \n" + script_for_tag if self.args.tag_config_rules_script: @@ -3129,10 +3735,14 @@ def create_rule_template(self): else: print("=========SCRIPT=========") print(script_for_tag) - print("you can use flag [--tag-config-rules-script ] to output the script") + print( + "you can use flag [--tag-config-rules-script ] to output the script" + ) def create_region_set(self): - self.args = get_create_region_set_parser().parse_args(self.args.command_args, self.args) + self.args = get_create_region_set_parser().parse_args( + self.args.command_args, self.args + ) output_file = self.args.output_file output_dict = { "default": ["us-east-1", "us-west-1", "eu-north-1", "ap-southeast-1"], @@ -3184,7 +3794,8 @@ def __list_rulesets(self): rules = [] for obj_name in os.listdir("."): - # print(obj_name) + if obj_name.startswith("."): + continue # Skip hidden items params_file_path = os.path.join(".", obj_name, parameter_file_name) if os.path.isfile(params_file_path): parameters_file = open(params_file_path, "r") @@ -3211,7 +3822,7 @@ def __get_template_dir(self): def __create_test_suite(self, test_dir): tests = [] - for (top, dirs, filenames) in os.walk(test_dir): + for top, dirs, filenames in os.walk(test_dir): for filename in fnmatch.filter(filenames, "*_test.py"): print(filename) sys.path.append(top) @@ -3233,39 +3844,39 @@ def __clean_rule_name(self, rule_name): return output def __create_java_rule(self): - src = os.path.join(path.dirname(__file__), "template", "runtime", "java8", "src") + src = os.path.join( + path.dirname(__file__), "template", "runtime", "java8", "src" + ) dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, "src") shutil.copytree(src, dst) - src = os.path.join(path.dirname(__file__), "template", "runtime", "java8", "jars") + src = os.path.join( + path.dirname(__file__), "template", "runtime", "java8", "jars" + ) dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, "jars") shutil.copytree(src, dst) - src = os.path.join(path.dirname(__file__), "template", "runtime", "java8", "build.gradle") + src = os.path.join( + path.dirname(__file__), "template", "runtime", "java8", "build.gradle" + ) dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, "build.gradle") shutil.copyfile(src, dst) - def __create_dotnet_rule(self): - runtime_path = os.path.join(path.dirname(__file__), "template", "runtime", self.args.runtime) - dst_path = os.path.join(os.getcwd(), rules_dir, self.args.rulename) - for obj in os.listdir(runtime_path): - src = os.path.join(runtime_path, obj) - dst = os.path.join(dst_path, obj) - if os.path.isfile(src): - shutil.copyfile(src, dst) - else: - shutil.copytree(src, dst) - def __print_log_event(self, event): - time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(event["timestamp"] / 1000)) + time_string = time.strftime( + "%Y-%m-%d %H:%M:%S", time.localtime(event["timestamp"] / 1000) + ) rows = 24 columns = 80 - try: - rows, columns = os.popen("stty size", "r").read().split() - except ValueError as e: - # This was probably being run in a headless test environment which had no stty. - print("Using default terminal rows and columns.") + if shutil.which("stty") is not None: + try: + rows, columns = os.popen("stty size", "r").read().split() + except Exception as e: + # This was probably being run in a headless test environment which had no stty. + print("Using default terminal rows and columns.") + else: + print("stty not present -- using default terminal rows and columns.") line_wrap = int(columns) - 22 message_lines = str(event["message"]).splitlines() @@ -3273,7 +3884,11 @@ def __print_log_event(self, event): for line in message_lines: line = line.replace("\t", " ") - formatted_lines.append("\n".join(line[i : i + line_wrap] for i in range(0, len(line), line_wrap))) + formatted_lines.append( + "\n".join( + line[i : i + line_wrap] for i in range(0, len(line), line_wrap) + ) + ) message_string = "\n".join(formatted_lines) message_string = message_string.replace("\n", "\n ") @@ -3326,7 +3941,11 @@ def __get_caller_identity_details(self, my_session): response = my_sts.get_caller_identity() arn_split = response["Arn"].split(":") - return {"account_id": response["Account"], "partition": arn_split[1], "region": arn_split[3]} + return { + "account_id": response["Account"], + "partition": arn_split[1], + "region": arn_split[3], + } def __get_stack_name_from_rule_name(self, rule_name): output = rule_name.replace("_", "") @@ -3341,22 +3960,33 @@ def __get_alphanumeric_rule_name(self, rule_name): def __get_rule_list_for_command(self, Command="deploy"): rule_names = [] if self.args.all: - d = "." for obj_name in os.listdir("."): obj_path = os.path.join(".", obj_name) if os.path.isdir(obj_path) and not obj_name == "rdk": for file_name in os.listdir(obj_path): if obj_name not in rule_names: - if os.path.exists(os.path.join(obj_path, "parameters.json")): + if os.path.exists( + os.path.join(obj_path, "parameters.json") + ): rule_names.append(obj_name) else: if file_name.split(".")[0] == obj_name: rule_names.append(obj_name) if os.path.exists( - os.path.join(obj_path, "src", "main", "java", "com", "rdk", "RuleCode.java") + os.path.join( + obj_path, + "src", + "main", + "java", + "com", + "rdk", + "RuleCode.java", + ) ): rule_names.append(obj_name) - if os.path.exists(os.path.join(obj_path, "RuleCode.cs")): + if os.path.exists( + os.path.join(obj_path, "RuleCode.cs") + ): rule_names.append(obj_name) elif self.args.rulesets: for obj_name in os.listdir("."): @@ -3376,7 +4006,10 @@ def __get_rule_list_for_command(self, Command="deploy"): if os.path.isdir(cleaned_rule_name): rule_names.append(cleaned_rule_name) else: - print('Invalid Option: Specify Rule Name or RuleSet. Run "rdk %s -h" for more info.' % (Command)) + print( + 'Invalid Option: Specify Rule Name or RuleSet. Run "rdk %s -h" for more info.' + % (Command) + ) sys.exit(1) if len(rule_names) == 0: @@ -3387,21 +4020,21 @@ def __get_rule_list_for_command(self, Command="deploy"): for name in rule_names: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) return rule_names def __get_rule_parameters(self, rule_name): - params_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, parameter_file_name) + params_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, parameter_file_name + ) try: parameters_file = open(params_file_path, "r") except IOError as e: - print("Failed to open parameters file for rule '{}'".format(rule_name)) + print(f"Failed to open parameters file for rule '{rule_name}'") print(e.message) sys.exit(1) @@ -3410,12 +4043,12 @@ def __get_rule_parameters(self, rule_name): try: my_json = json.load(parameters_file) except ValueError as ve: # includes simplejson.decoder.JSONDecodeError - print("Failed to decode JSON in parameters file for Rule {}".format(rule_name)) + print(f"Failed to decode JSON in parameters file for Rule {rule_name}") print(ve.message) parameters_file.close() sys.exit(1) except Exception as e: - print("Error loading parameters file for Rule {}".format(rule_name)) + print(f"Error loading parameters file for Rule {rule_name}") print(e.message) parameters_file.close() sys.exit(1) @@ -3436,7 +4069,9 @@ def __get_rule_parameters(self, rule_name): return my_json["Parameters"], my_tags def __parse_rule_args(self, is_required): - self.args = get_rule_parser(is_required, self.args.command).parse_args(self.args.command_args, self.args) + self.args = get_rule_parser(is_required, self.args.command).parse_args( + self.args.command_args, self.args + ) if self.args.rulename: if len(self.args.rulename) > 128: @@ -3448,7 +4083,10 @@ def __parse_rule_args(self, is_required): for resource_type in self.args.resource_types.split(","): if resource_type not in accepted_resource_types: resource_type_error = ( - resource_type_error + ' "' + resource_type + '" not found in list of accepted resource types.' + resource_type_error + + ' "' + + resource_type + + '" not found in list of accepted resource types.' ) if resource_type_error: print(resource_type_error) @@ -3459,8 +4097,14 @@ def __parse_rule_args(self, is_required): "Skip-Supported-Resource-Check Flag set (--skip-supported-resource-check), ignoring missing resource type error." ) - if is_required and not self.args.resource_types and not self.args.maximum_frequency: - print("You must specify either a resource type trigger or a maximum frequency.") + if ( + is_required + and not self.args.resource_types + and not self.args.maximum_frequency + ): + print( + "You must specify either a resource type trigger or a maximum frequency." + ) sys.exit(1) if self.args.input_parameters: @@ -3473,16 +4117,20 @@ def __parse_rule_args(self, is_required): if self.args.optional_parameters: try: - optional_params_dict = json.loads(self.args.optional_parameters, strict=False) + optional_params_dict = json.loads( + self.args.optional_parameters, strict=False + ) except Exception as e: - print("Failed to parse optional parameters.") + print(f"Failed to parse optional parameters. {repr(e)}") sys.exit(1) if self.args.rulesets: self.args.rulesets = self.args.rulesets.split(",") def __parse_test_args(self): - self.args = get_test_parser(self.args.command).parse_args(self.args.command_args, self.args) + self.args = get_test_parser(self.args.command).parse_args( + self.args.command_args, self.args + ) if self.args.all and self.args.rulename: print("You may specify either specific rules or --all, but not both.") @@ -3494,12 +4142,15 @@ def __parse_test_args(self): return self.args def __parse_deploy_args(self, ForceArgument=False): + self.args = get_deployment_parser(ForceArgument).parse_args( + self.args.command_args, self.args + ) - self.args = get_deployment_parser(ForceArgument).parse_args(self.args.command_args, self.args) - - ### Validate inputs ### + # Validate inputs # if self.args.stack_name and not self.args.functions_only: - print("--stack-name can only be specified when using the --functions-only feature.") + print( + "--stack-name can only be specified when using the --functions-only feature." + ) sys.exit(1) # Make sure we're not exceeding Layer limits @@ -3508,12 +4159,20 @@ def __parse_deploy_args(self, ForceArgument=False): if layer_count > 5: print("You may only specify 5 Lambda Layers.") sys.exit(1) - if self.args.rdklib_layer_arn or self.args.generated_lambda_layer and layer_count > 4: - print("Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers.") + if ( + self.args.rdklib_layer_arn + or self.args.generated_lambda_layer + and layer_count > 4 + ): + print( + "Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers." + ) sys.exit(1) # RDKLib version and RDKLib Layer ARN/Generated RDKLib Layer are mutually exclusive. - if "rdk_lib_version" in self.args and (self.args.rdklib_layer_arn or self.args.generated_lambda_layer): + if "rdk_lib_version" in self.args and ( + self.args.rdklib_layer_arn or self.args.generated_lambda_layer + ): print( "Specify EITHER an RDK Lib version to use the official release OR a specific Layer ARN to use a custom implementation." ) @@ -3521,7 +4180,9 @@ def __parse_deploy_args(self, ForceArgument=False): # RDKLib version and RDKLib Layer ARN/Generated RDKLib Layer are mutually exclusive. if self.args.rdklib_layer_arn and self.args.generated_lambda_layer: - print("Specify EITHER an RDK Lib Layer ARN OR the generated lambda layer flag.") + print( + "Specify EITHER an RDK Lib Layer ARN OR the generated lambda layer flag." + ) sys.exit(1) # Check rule names to make sure none are too long. This is needed to catch Rules created before length constraint was added. @@ -3529,9 +4190,7 @@ def __parse_deploy_args(self, ForceArgument=False): for name in self.args.rulename: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) @@ -3542,12 +4201,15 @@ def __parse_deploy_args(self, ForceArgument=False): self.args.rulesets = self.args.rulesets.split(",") def __parse_deploy_organization_args(self, ForceArgument=False): + self.args = get_deployment_organization_parser(ForceArgument).parse_args( + self.args.command_args, self.args + ) - self.args = get_deployment_organization_parser(ForceArgument).parse_args(self.args.command_args, self.args) - - ### Validate inputs ### + # Validate inputs # if self.args.stack_name and not self.args.functions_only: - print("--stack-name can only be specified when using the --functions-only feature.") + print( + "--stack-name can only be specified when using the --functions-only feature." + ) sys.exit(1) # Make sure we're not exceeding Layer limits @@ -3557,7 +4219,9 @@ def __parse_deploy_organization_args(self, ForceArgument=False): print("You may only specify 5 Lambda Layers.") sys.exit(1) if self.args.rdklib_layer_arn and layer_count > 4: - print("Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers.") + print( + "Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers." + ) sys.exit(1) # RDKLib version and RDKLib Layer ARN are mutually exclusive. @@ -3572,9 +4236,7 @@ def __parse_deploy_organization_args(self, ForceArgument=False): for name in self.args.rulename: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) @@ -3585,17 +4247,16 @@ def __parse_deploy_organization_args(self, ForceArgument=False): self.args.rulesets = self.args.rulesets.split(",") def __parse_export_args(self, ForceArgument=False): - - self.args = get_export_parser(ForceArgument).parse_args(self.args.command_args, self.args) + self.args = get_export_parser(ForceArgument).parse_args( + self.args.command_args, self.args + ) # Check rule names to make sure none are too long. This is needed to catch Rules created before length constraint was added. if self.args.rulename: for name in self.args.rulename: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) @@ -3609,35 +4270,14 @@ def __package_function_code(self, rule_name, params): subprocess.call(command, cwd=working_dir) # set source as distribution zip - s3_src = os.path.join(os.getcwd(), rules_dir, rule_name, "build", "distributions", rule_name + ".zip") - elif params["SourceRuntime"] in ["dotnetcore1.0", "dotnetcore2.0"]: - print("Packaging " + rule_name) - working_dir = os.path.join(os.getcwd(), rules_dir, rule_name) - commands = [["dotnet", "restore"]] - - app_runtime = "netcoreapp1.0" - if params["SourceRuntime"] == "dotnetcore2.0": - app_runtime = "netcoreapp2.0" - - commands.append(["dotnet", "lambda", "package", "-c", "Release", "-f", app_runtime]) - - for command in commands: - subprocess.call(command, cwd=working_dir) - - # Remove old zip file if it already exists - package_file_dst = os.path.join(rule_name, rule_name + ".zip") - self.__delete_package_file(package_file_dst) - - # Create new package in temp directory, copy to rule directory - # This copy avoids the archiver trying to include the output zip in itself - s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name, "bin", "Release", app_runtime, "publish") - tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir + s3_src = os.path.join( + os.getcwd(), + rules_dir, + rule_name, + "build", + "distributions", + rule_name + ".zip", ) - if not (os.path.exists(package_file_dst)): - shutil.copy(tmp_src, package_file_dst) - s3_src = os.path.abspath(package_file_dst) - self.__delete_package_file(tmp_src) else: print("Zipping " + rule_name) @@ -3648,7 +4288,9 @@ def __package_function_code(self, rule_name, params): # zip rule code files and upload to s3 bucket s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name) tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir + os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), + "zip", + s3_src_dir, ) if not (os.path.exists(package_file_dst)): shutil.copy(tmp_src, package_file_dst) @@ -3687,7 +4329,9 @@ def __populate_params(self): if self.args.optional_parameters: # As above, but with the optional input parameters. try: - my_optional_params = json.loads(self.args.optional_parameters, strict=False) + my_optional_params = json.loads( + self.args.optional_parameters, strict=False + ) except Exception as e: print( "Error parsing optional input parameter JSON. Make sure your JSON keys and values are enclosed in properly escaped double quotes and your optional-parameters string is enclosed in single quotes." @@ -3699,7 +4343,7 @@ def __populate_params(self): # As above, but with the optional tag key value pairs. try: my_tags = json.loads(self.args.tags, strict=False) - except Exception as e: + except Exception: print( "Error parsing optional tags JSON. Make sure your JSON keys and values are enclosed in properly escaped double quotes and tags string is enclosed in single quotes." ) @@ -3719,12 +4363,14 @@ def __populate_params(self): ) and not self.args.remediation_action ): - print("Remediation Flags detected but no remediation action (--remediation-action) set") + print( + "Remediation Flags detected but no remediation action (--remediation-action) set" + ) if self.args.remediation_action: try: my_remediation = self.__generate_remediation_params() - except Exception as e: + except Exception: print("Error parsing remediation configuration.") # create config file and place in rule directory @@ -3732,7 +4378,7 @@ def __populate_params(self): "RuleName": self.args.rulename, "Description": self.args.rulename, "SourceRuntime": self.args.runtime, - #'CodeBucket': code_bucket_prefix + account_id, + # 'CodeBucket': code_bucket_prefix + account_id, "CodeKey": self.args.rulename + my_session.region_name + ".zip", "InputParameters": json.dumps(my_input_params), "OptionalParameters": json.dumps(my_optional_params), @@ -3771,7 +4417,9 @@ def __generate_remediation_params(self): ssm_controls = {} if self.args.remediation_concurrent_execution_percent: - ssm_controls["ConcurrentExecutionRatePercentage"] = self.args.remediation_concurrent_execution_percent + ssm_controls[ + "ConcurrentExecutionRatePercentage" + ] = self.args.remediation_concurrent_execution_percent if self.args.remediation_error_rate_percent: ssm_controls["ErrorPercentage"] = self.args.remediation_error_rate_percent @@ -3780,7 +4428,9 @@ def __generate_remediation_params(self): params["ExecutionControls"] = {"SsmControls": ssm_controls} if self.args.auto_remediation_retry_attempts: - params["MaximumAutomaticAttempts"] = self.args.auto_remediation_retry_attempts + params[ + "MaximumAutomaticAttempts" + ] = self.args.auto_remediation_retry_attempts if self.args.remediation_parameters: params["Parameters"] = json.loads(self.args.remediation_parameters) @@ -3801,7 +4451,9 @@ def __generate_remediation_params(self): def __write_params_file(self, rulename, parameters, tags): my_params = {"Version": "1.0", "Parameters": parameters, "Tags": tags} - params_file_path = os.path.join(os.getcwd(), rules_dir, rulename, parameter_file_name) + params_file_path = os.path.join( + os.getcwd(), rules_dir, rulename, parameter_file_name + ) parameters_file = open(params_file_path, "w") json.dump(my_params, parameters_file, indent=2) parameters_file.close() @@ -3814,8 +4466,8 @@ def __wait_for_cfn_stack(self, cfn_client, stackname): response = cfn_client.list_stacks() all_stacks = response["StackSummaries"] - while 'NextToken' in response: - response = cfn_client.list_stacks(NextToken=response['NextToken']) + while "NextToken" in response: + response = cfn_client.list_stacks(NextToken=response["NextToken"]) all_stacks += response["StackSummaries"] for stack in all_stacks: @@ -3833,26 +4485,44 @@ def __wait_for_cfn_stack(self, cfn_client, stackname): # If all stacks have been deleted, clearly we're done! if all_deleted: in_progress = False - print(f"[{my_session.region_name}]: CloudFormation stack operation complete.") + print( + f"[{my_session.region_name}]: CloudFormation stack operation complete." + ) continue else: if "FAILED" in active_stack["StackStatus"]: in_progress = False - print(f"[{my_session.region_name}]: CloudFormation stack operation Failed for " + stackname + ".") + print( + f"[{my_session.region_name}]: CloudFormation stack operation Failed for " + + stackname + + "." + ) if "StackStatusReason" in active_stack: - print(f"[{my_session.region_name}]: Reason: " + active_stack["StackStatusReason"]) + print( + f"[{my_session.region_name}]: Reason: " + + active_stack["StackStatusReason"] + ) elif active_stack["StackStatus"] == "ROLLBACK_COMPLETE": in_progress = False print( - f"[{my_session.region_name}]: CloudFormation stack operation Rolled Back for " + stackname + "." + f"[{my_session.region_name}]: CloudFormation stack operation Rolled Back for " + + stackname + + "." ) if "StackStatusReason" in active_stack: - print(f"[{my_session.region_name}]: Reason: " + active_stack["StackStatusReason"]) + print( + f"[{my_session.region_name}]: Reason: " + + active_stack["StackStatusReason"] + ) elif "COMPLETE" in active_stack["StackStatus"]: in_progress = False - print(f"[{my_session.region_name}]: CloudFormation stack operation complete.") + print( + f"[{my_session.region_name}]: CloudFormation stack operation complete." + ) else: - print(f"[{my_session.region_name}]: Waiting for CloudFormation stack operation to complete...") + print( + f"[{my_session.region_name}]: Waiting for CloudFormation stack operation to complete..." + ) time.sleep(5) def __get_handler(self, rule_name, params): @@ -3865,21 +4535,19 @@ def __get_handler(self, rule_name, params): "python3.8-lib", "python3.9", "python3.9-lib", - "nodejs6.10", - "nodejs8.10", + "python3.10", + "python3.10-lib", ]: return rule_name + ".lambda_handler" elif params["SourceRuntime"] in ["java8"]: return "com.rdk.RuleUtil::handler" - elif params["SourceRuntime"] in ["dotnetcore1.0", "dotnetcore2.0"]: - return "csharp7.0::Rdk.CustomConfigHandler::FunctionHandler" def __get_runtime_string(self, params): if params["SourceRuntime"] in [ - "python3.6-managed", "python3.7-lib", "python3.8-lib", "python3.9-lib", + "python3.10-lib", ]: runtime = params["SourceRuntime"].split("-") return runtime[0] @@ -3896,9 +4564,13 @@ def __get_test_CIs(self, rulename): test_ci_list.append(my_test_ci.get_json()) else: # Check to see if there is a test_ci.json file in the Rule directory - tests_path = os.path.join(os.getcwd(), rules_dir, rulename, test_ci_filename) + tests_path = os.path.join( + os.getcwd(), rules_dir, rulename, test_ci_filename + ) if os.path.exists(tests_path): - print("\tTesting with CI's provided in test_ci.json file. NOT YET IMPLEMENTED") # TODO + print( + "\tTesting with CI's provided in test_ci.json file. NOT YET IMPLEMENTED" + ) # TODO # test_ci_list self._load_cis_from_file(tests_path) else: print("\tTesting with generic CI for configured Resource Type(s)") @@ -3916,7 +4588,8 @@ def __get_lambda_arn_for_stack(self, stack_name): my_cfn = my_session.client("cloudformation") - # Since CFN won't detect changes to the lambda code stored in S3 as a reason to update the stack, we need to manually update the code reference in Lambda once the CFN has run. + # Since CFN won't detect changes to the lambda code stored in S3 as a reason to update the stack, + # we need to manually update the code reference in Lambda once the CFN has run. self.__wait_for_cfn_stack(my_cfn, stack_name) # Lambda function is an output of the stack. @@ -3928,7 +4601,9 @@ def __get_lambda_arn_for_stack(self, stack_name): my_lambda_arn = output["OutputValue"] if my_lambda_arn == "NOTFOUND": - print(f"[{my_session.region_name}]: Could not read CloudFormation stack output to find Lambda function.") + print( + f"[{my_session.region_name}]: Could not read CloudFormation stack output to find Lambda function." + ) sys.exit(1) return my_lambda_arn @@ -3938,27 +4613,27 @@ def __get_lambda_name(self, rule_name, params): lambda_name = params["CustomLambdaName"] if len(lambda_name) > 64: print( - "Error: Found Rule's Lambda function with name over 64 characters: {} \n Recreate the lambda name with a shorter name.".format( - lambda_name - ) + f"Error: Found Rule's Lambda function with name over 64 characters: {lambda_name}." + + "\nRecreate the lambda name with a shorter name." ) sys.exit(1) return lambda_name else: - lambda_name = "RDK-Rule-Function-" + self.__get_stack_name_from_rule_name(rule_name) + lambda_name = "RDK-Rule-Function-" + self.__get_stack_name_from_rule_name( + rule_name + ) if len(lambda_name) > 64: print( - "Error: Found Rule's Lambda function with name over 64 characters: {} \n Recreate the rule with a shorter name or with CustomLambdaName attribute in parameter.json. If you are using 'rdk create', you can add '--custom-lambda-name ' to create your RDK rules".format( - lambda_name - ) + f"Error: Found Rule's Lambda function with name over 64 characters: {lambda_name}." + + "\nRecreate the rule with a shorter name or with CustomLambdaName attribute in parameter.json." + + "\nIf you are using 'rdk create', you can add '--custom-lambda-name ' to create your RDK rules" ) sys.exit(1) return lambda_name def __get_lambda_arn_for_rule(self, rule_name, partition, region, account, params): - return "arn:{}:lambda:{}:{}:function:{}".format( - partition, region, account, self.__get_lambda_name(rule_name, params) - ) + lambda_name = self.__get_lambda_name(rule_name, params) + return f"arn:{partition}:lambda:{region}:{account}:function:{lambda_name}" def __delete_package_file(self, file): try: @@ -3966,7 +4641,9 @@ def __delete_package_file(self, file): except OSError: pass - def __upload_function_code(self, rule_name, params, account_id, my_session, code_bucket_name): + def __upload_function_code( + self, rule_name, params, account_id, my_session, code_bucket_name + ): if params["SourceRuntime"] == "java8": # Do java build and package. print(f"[{my_session.region_name}]: Running Gradle Build for " + rule_name) @@ -3976,7 +4653,12 @@ def __upload_function_code(self, rule_name, params, account_id, my_session, code # set source as distribution zip s3_src = os.path.join( - os.getcwd(), rules_dir, rule_name, "build", "distributions", rule_name + my_session.region_name + ".zip" + os.getcwd(), + rules_dir, + rule_name, + "build", + "distributions", + rule_name + my_session.region_name + ".zip", ) s3_dst = "/".join((rule_name, rule_name + ".zip")) @@ -3986,41 +4668,6 @@ def __upload_function_code(self, rule_name, params, account_id, my_session, code my_s3.meta.client.upload_file(s3_src, code_bucket_name, s3_dst) print(f"[{my_session.region_name}]: Upload complete.") - elif params["SourceRuntime"] in ["dotnetcore1.0", "dotnetcore2.0"]: - print("Packaging " + rule_name) - working_dir = os.path.join(os.getcwd(), rules_dir, rule_name) - commands = [["dotnet", "restore"]] - - app_runtime = "netcoreapp1.0" - if params["SourceRuntime"] == "dotnetcore2.0": - app_runtime = "netcoreapp2.0" - - commands.append(["dotnet", "lambda", "package", "-c", "Release", "-f", app_runtime]) - - for command in commands: - subprocess.call(command, cwd=working_dir) - - # Remove old zip file if it already exists - package_file_dst = os.path.join(rule_name, rule_name + ".zip") - self.__delete_package_file(package_file_dst) - - # Create new package in temp directory, copy to rule directory - # This copy avoids the archiver trying to include the output zip in itself - s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name, "bin", "Release", app_runtime, "publish") - tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir - ) - s3_dst = "/".join((rule_name, rule_name + ".zip")) - - my_s3 = my_session.resource("s3") - - print(f"[{my_session.region_name}]: Uploading " + rule_name) - my_s3.meta.client.upload_file(tmp_src, code_bucket_name, s3_dst) - print(f"[{my_session.region_name}]: Upload complete.") - if not (os.path.exists(package_file_dst)): - shutil.copy(tmp_src, package_file_dst) - self.__delete_package_file(tmp_src) - else: print(f"[{my_session.region_name}]: Zipping " + rule_name) # Remove old zip file if it already exists @@ -4031,7 +4678,9 @@ def __upload_function_code(self, rule_name, params, account_id, my_session, code s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name) tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir + os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), + "zip", + s3_src_dir, ) s3_dst = "/".join((rule_name, rule_name + ".zip")) @@ -4058,7 +4707,6 @@ def __create_remediation_cloudformation_block(self, remediation_config): def __create_automation_cloudformation_block(self, ssm_automation, rule_name): print("Generate SSM Resources") - current_working_direcoty = os.getcwd() ssm_json_dir = os.path.join(os.getcwd(), ssm_automation["Document"]) print("Reading SSM JSON From -> " + ssm_json_dir) # params_file_path = os.path.join(os.getcwd(), rules_dir, rulename, parameter_file_name) @@ -4066,19 +4714,28 @@ def __create_automation_cloudformation_block(self, ssm_automation, rule_name): ssm_automation_json = json.loads(ssm_automation_content) ssm_automation_config = { "Type": "AWS::SSM::Document", - "Properties": {"DocumentType": "Automation", "Content": ssm_automation_json}, + "Properties": { + "DocumentType": "Automation", + "Content": ssm_automation_json, + }, } return ssm_automation_config def __create_automation_iam_cloudformation_block(self, ssm_automation, rule_name): - - print("Generate IAM Role for SSM Document with these actions", str(ssm_automation["IAM"])) + print( + "Generate IAM Role for SSM Document with these actions", + str(ssm_automation["IAM"]), + ) assume_role_template = { "Version": "2012-10-17", "Statement": [ - {"Effect": "Allow", "Principal": {"Service": "ssm.amazonaws.com"}, "Action": "sts:AssumeRole"} + { + "Effect": "Allow", + "Principal": {"Service": "ssm.amazonaws.com"}, + "Action": "sts:AssumeRole", + } ], } @@ -4086,7 +4743,8 @@ def __create_automation_iam_cloudformation_block(self, ssm_automation, rule_name ssm_automation_iam_role = { "Type": "AWS::IAM::Role", "Properties": { - "Description": "IAM Role to Support Config Remediation for " + rule_name, + "Description": "IAM Role to Support Config Remediation for " + + rule_name, "Path": "/rdk-remediation-role/", # "RoleName": {"Fn::Sub": "" + rule_name + "-Remediation-Role-${AWS::Region}"}, "AssumeRolePolicyDocument": assume_role_template, @@ -4097,11 +4755,21 @@ def __create_automation_iam_cloudformation_block(self, ssm_automation, rule_name "Type": "AWS::IAM::Policy", "Properties": { "PolicyDocument": { - "Statement": [{"Action": ssm_automation["IAM"], "Effect": "Allow", "Resource": "*"}], + "Statement": [ + { + "Action": ssm_automation["IAM"], + "Effect": "Allow", + "Resource": "*", + } + ], "Version": "2012-10-17", }, - "PolicyName": {"Fn::Sub": "" + rule_name + "-Remediation-Policy-${AWS::Region}"}, - "Roles": [{"Ref": self.__get_alphanumeric_rule_name(rule_name + "Role")}], + "PolicyName": { + "Fn::Sub": "" + rule_name + "-Remediation-Policy-${AWS::Region}" + }, + "Roles": [ + {"Ref": self.__get_alphanumeric_rule_name(rule_name + "Role")} + ], }, } @@ -4119,7 +4787,9 @@ def __create_function_cloudformation_template(self): parameters = {} parameters["SourceBucket"] = {} - parameters["SourceBucket"]["Description"] = "Name of the S3 bucket that you have stored the rule zip files in." + parameters["SourceBucket"][ + "Description" + ] = "Name of the S3 bucket that you have stored the rule zip files in." parameters["SourceBucket"]["Type"] = "String" parameters["SourceBucket"]["MinLength"] = "1" parameters["SourceBucket"]["MaxLength"] = "255" @@ -4134,10 +4804,16 @@ def __create_function_cloudformation_template(self): partition = identity_details["partition"] lambdaRoleArn = "" if self.args.lambda_role_arn: - print(f"[{my_session.region_name}]: Existing IAM Role provided: " + self.args.lambda_role_arn) + print( + f"[{my_session.region_name}]: Existing IAM Role provided: " + + self.args.lambda_role_arn + ) lambdaRoleArn = self.args.lambda_role_arn elif self.args.lambda_role_name: - print(f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + self.args.lambda_role_name) + print( + f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + + self.args.lambda_role_name + ) arn = f"arn:{partition}:iam::{account_id}:role/{self.args.lambda_role_name}" lambdaRoleArn = arn else: @@ -4158,12 +4834,6 @@ def __create_function_cloudformation_template(self): ], } lambda_policy_statements = [ - { - "Sid": "1", - "Action": ["s3:GetObject"], - "Effect": "Allow", - "Resource": {"Fn::Sub": "arn:${AWS::Partition}:s3:::${SourceBucket}/*"}, - }, { "Sid": "2", "Action": [ @@ -4175,9 +4845,24 @@ def __create_function_cloudformation_template(self): "Effect": "Allow", "Resource": "*", }, - {"Sid": "3", "Action": ["config:PutEvaluations"], "Effect": "Allow", "Resource": "*"}, - {"Sid": "4", "Action": ["iam:List*", "iam:Describe*", "iam:Get*"], "Effect": "Allow", "Resource": "*"}, - {"Sid": "5", "Action": ["sts:AssumeRole"], "Effect": "Allow", "Resource": "*"}, + { + "Sid": "3", + "Action": ["config:PutEvaluations"], + "Effect": "Allow", + "Resource": "*", + }, + { + "Sid": "4", + "Action": ["iam:List*", "iam:Get*"], + "Effect": "Allow", + "Resource": "*", + }, + { + "Sid": "5", + "Action": ["sts:AssumeRole"], + "Effect": "Allow", + "Resource": "*", + }, ] if self.args.lambda_subnets and self.args.lambda_security_groups: vpc_policy = { @@ -4194,7 +4879,10 @@ def __create_function_cloudformation_template(self): lambda_role["Properties"]["Policies"] = [ { "PolicyName": "ConfigRulePolicy", - "PolicyDocument": {"Version": "2012-10-17", "Statement": lambda_policy_statements}, + "PolicyDocument": { + "Version": "2012-10-17", + "Statement": lambda_policy_statements, + }, } ] lambda_role["Properties"]["ManagedPolicyArns"] = [ @@ -4215,7 +4903,10 @@ def __create_function_cloudformation_template(self): lambda_function["Type"] = "AWS::Lambda::Function" properties = {} properties["FunctionName"] = self.__get_lambda_name(rule_name, params) - properties["Code"] = {"S3Bucket": {"Ref": "SourceBucket"}, "S3Key": rule_name + "/" + rule_name + ".zip"} + properties["Code"] = { + "S3Bucket": {"Ref": "SourceBucket"}, + "S3Key": rule_name + "/" + rule_name + ".zip", + } properties["Description"] = "Function for AWS Config Rule " + rule_name properties["Handler"] = self.__get_handler(rule_name, params) properties["MemorySize"] = "256" @@ -4248,7 +4939,9 @@ def __create_function_cloudformation_template(self): lambda_permissions["Type"] = "AWS::Lambda::Permission" lambda_permissions["DependsOn"] = alphanum_rule_name + "LambdaFunction" lambda_permissions["Properties"] = { - "FunctionName": {"Fn::GetAtt": [alphanum_rule_name + "LambdaFunction", "Arn"]}, + "FunctionName": { + "Fn::GetAtt": [alphanum_rule_name + "LambdaFunction", "Arn"] + }, "Action": "lambda:InvokeFunction", "Principal": "config.amazonaws.com", } @@ -4260,15 +4953,25 @@ def __create_function_cloudformation_template(self): def __tag_config_rule(self, rule_name, cfn_tags, my_session): config_client = my_session.client("config") - config_arn = config_client.describe_config_rules(ConfigRuleNames=[rule_name])["ConfigRules"][0]["ConfigRuleArn"] + config_arn = config_client.describe_config_rules(ConfigRuleNames=[rule_name])[ + "ConfigRules" + ][0]["ConfigRuleArn"] response = config_client.tag_resource(ResourceArn=config_arn, Tags=cfn_tags) return response def __get_lambda_layers(self, my_session, args, params): layers = [] if "SourceRuntime" in params: - if params["SourceRuntime"] in ["python3.7-lib", "python3.8-lib", "python3.9-lib"]: - if hasattr(args, "generated_lambda_layer") and args.generated_lambda_layer: + if params["SourceRuntime"] in [ + "python3.7-lib", + "python3.8-lib", + "python3.9-lib", + "python3.10-lib", + ]: + if ( + hasattr(args, "generated_lambda_layer") + and args.generated_lambda_layer + ): lambda_layer_version = self.__get_existing_lambda_layer( my_session, layer_name=args.custom_layer_name ) @@ -4276,7 +4979,9 @@ def __get_lambda_layers(self, my_session, args, params): print( f"{my_session.region_name} generated-lambda-layer flag received, but layer [{args.custom_layer_name}] not found in {my_session.region_name}. Creating one now" ) - self.__create_new_lambda_layer(my_session, layer_name=args.custom_layer_name) + self.__create_new_lambda_layer( + my_session, layer_name=args.custom_layer_name + ) lambda_layer_version = self.__get_existing_lambda_layer( my_session, layer_name=args.custom_layer_name ) @@ -4285,7 +4990,9 @@ def __get_lambda_layers(self, my_session, args, params): layers.append(args.rdklib_layer_arn) else: rdk_lib_version = RDKLIB_LAYER_VERSION[my_session.region_name] - rdklib_arn = RDKLIB_ARN_STRING.format(region=my_session.region_name, version=rdk_lib_version) + rdklib_arn = RDKLIB_ARN_STRING.format( + region=my_session.region_name, version=rdk_lib_version + ) layers.append(rdklib_arn) return layers @@ -4300,10 +5007,11 @@ def __get_existing_lambda_layer(self, my_session, layer_name="rdklib-layer"): return None def __create_new_lambda_layer(self, my_session, layer_name="rdklib-layer"): - successful_return = None if layer_name == "rdklib-layer": - successful_return = self.__create_new_lambda_layer_serverless_repo(my_session) + successful_return = self.__create_new_lambda_layer_serverless_repo( + my_session + ) # If that doesn't work, create it locally and upload - SAR doesn't support the custom layer name if layer_name != "rdklib-layer" or not successful_return: @@ -4334,7 +5042,9 @@ def __create_new_lambda_layer_serverless_repo(self, my_session): change_set_arn = sar_client.create_cloud_formation_change_set( ApplicationId=RDKLIB_LAYER_SAR_ID, StackName="rdklib" )["ChangeSetId"] - print(f"[{my_session.region_name}]: Creating change set to deploy rdklib-layer") + print( + f"[{my_session.region_name}]: Creating change set to deploy rdklib-layer" + ) code = self.__check_on_change_set(cfn_client, change_set_arn) if code == 1: print( @@ -4342,9 +5052,13 @@ def __create_new_lambda_layer_serverless_repo(self, my_session): ) return 1 if code == -1: - print(f"[{my_session.region_name}]: Error creating change set, attempting to use manual deployment") + print( + f"[{my_session.region_name}]: Error creating change set, attempting to use manual deployment" + ) raise ClientError() - print(f"[{my_session.region_name}]: Executing change set to deploy rdklib-layer") + print( + f"[{my_session.region_name}]: Executing change set to deploy rdklib-layer" + ) cfn_client.execute_change_set(ChangeSetName=change_set_arn) waiter = cfn_client.get_waiter(f"stack_{create_type}_complete") waiter.wait(StackName="serverlessrepo-rdklib") @@ -4358,7 +5072,9 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer region = my_session.region_name print(f"[{region}]: Creating new {layer_name}") folder_name = "lib" + str(uuid.uuid4()) - shell_command = f"pip3 install --target python boto3 botocore rdk rdklib future mock" + shell_command = ( + "pip3 install --target python boto3 botocore rdk rdklib future mock" + ) print(f"[{region}]: Installing Packages to {folder_name}/python") try: @@ -4367,7 +5083,7 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer print(e) sys.exit(1) os.chdir(folder_name) - ret = subprocess.run(shell_command, capture_output=True, shell=True) + _ = subprocess.run(shell_command, capture_output=True, shell=True) print(f"[{region}]: Creating rdk_lib_layer.zip") shutil.make_archive(f"rdk_lib_layer", "zip", ".", "python") @@ -4378,12 +5094,17 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer print(f"[{region}]: Creating temporary S3 Bucket") bucket_name = "rdkliblayertemp" + str(uuid.uuid4()) if region != "us-east-1": - s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={"LocationConstraint": region}) + s3_client.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": region}, + ) if region == "us-east-1": s3_client.create_bucket(Bucket=bucket_name) print(f"[{region}]: Uploading rdk_lib_layer.zip to S3") - s3_resource.Bucket(bucket_name).upload_file(f"{folder_name}/rdk_lib_layer.zip", layer_name) + s3_resource.Bucket(bucket_name).upload_file( + f"{folder_name}/rdk_lib_layer.zip", layer_name + ) lambda_client = my_session.client("lambda") @@ -4391,7 +5112,7 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer lambda_client.publish_layer_version( LayerName=layer_name, Content={"S3Bucket": bucket_name, "S3Key": layer_name}, - CompatibleRuntimes=["python3.7", "python3.8", "python3.9"], + CompatibleRuntimes=["python3.7", "python3.8", "python3.9", "python3.10"], ) print(f"[{region}]: Deleting temporary S3 Bucket") @@ -4424,14 +5145,20 @@ def __init__(self, ci_type): ci_file = ci_type.replace("::", "_") + ".json" try: self.ci_json = json.load( - open(os.path.join(path.dirname(__file__), "template", example_ci_dir, ci_file), "r") + open( + os.path.join( + path.dirname(__file__), "template", example_ci_dir, ci_file + ), + "r", + ) ) except FileNotFoundError: + resource_url = "https://github.com/awslabs/aws-config-resource-schema/blob/master/config/properties/resource-types/" print( "No sample CI found for " + ci_type + ", even though it appears to be a supported CI. Please log an issue at https://github.com/awslabs/aws-config-rdk." - + "\nLook here: https://github.com/awslabs/aws-config-resource-schema/blob/master/config/properties/resource-types/ for additional info" + + f"\nLook here: {resource_url} for additional info" ) exit(1) diff --git a/rdk/template/configRule.json b/rdk/template/configRule.json index e8fe4fa9..43ab74a5 100644 --- a/rdk/template/configRule.json +++ b/rdk/template/configRule.json @@ -221,14 +221,6 @@ "PolicyDocument": { "Version": "2012-10-17", "Statement": [ - { - "Sid": "1", - "Action": [ - "s3:GetObject" - ], - "Effect": "Allow", - "Resource": { "Fn::Sub": "arn:${AWS::Partition}:s3:::${SourceBucket}/${SourcePath}" } - }, { "Sid": "2", "Action": [ @@ -252,7 +244,6 @@ "Sid": "4", "Action": [ "iam:List*", - "iam:Describe*", "iam:Get*" ], "Effect": "Allow", diff --git a/rdk/template/configRuleOrganization.json b/rdk/template/configRuleOrganization.json index 59d1c58f..52dd506e 100644 --- a/rdk/template/configRuleOrganization.json +++ b/rdk/template/configRuleOrganization.json @@ -199,14 +199,6 @@ "PolicyDocument": { "Version": "2012-10-17", "Statement": [ - { - "Sid": "1", - "Action": [ - "s3:GetObject" - ], - "Effect": "Allow", - "Resource": { "Fn::Sub": "arn:${AWS::Partition}:s3:::${SourceBucket}/${SourcePath}" } - }, { "Sid": "2", "Action": [ @@ -230,7 +222,6 @@ "Sid": "4", "Action": [ "iam:List*", - "iam:Describe*", "iam:Get*" ], "Effect": "Allow", diff --git a/rdk/template/example_ci/AWS_R53_HostedZone.json b/rdk/template/example_ci/AWS_R53_HostedZone.json new file mode 100644 index 00000000..e93a324a --- /dev/null +++ b/rdk/template/example_ci/AWS_R53_HostedZone.json @@ -0,0 +1,39 @@ +{ + "version": "1.3", + "accountId": "123456789012", + "configurationItemCaptureTime": "2023-05-01T18:00:07.672Z", + "configurationItemStatus": "ResourceDiscovered", + "configurationStateId": "1682964007672", + "configurationItemMD5Hash": "", + "arn": "arn:aws:route53:::hostedzone/Z017455410COBZEF0ABCD", + "resourceType": "AWS::Route53::HostedZone", + "resourceId": "Z017455410COBZEF0ABCD", + "resourceName": "testdomain.lab.", + "awsRegion": "us-east-1", + "availabilityZone": "Regional", + "tags": {}, + "relatedEvents": [], + "relationships": [], + "configuration": { + "Id": "Z017455410COBZEF0ABCD", + "HostedZoneConfig": { + "Comment": "This is a test domain" + }, + "Name": "testdomain.lab.", + "NameServers": [ + "ns-1965.awsdns-53.co.uk", + "ns-944.awsdns-54.net", + "ns-1144.awsdns-15.org", + "ns-430.awsdns-53.com" + ], + "VPCs": [], + "HostedZoneTags": [ + { + "Key": "cost_center", + "Value": "payroll" + } + ] + }, + "supplementaryConfiguration": {}, + "resourceTransitionStatus": "None" + } \ No newline at end of file diff --git a/rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json b/rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json new file mode 100644 index 00000000..23b6d759 --- /dev/null +++ b/rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json @@ -0,0 +1,23 @@ +{ + "version": "1.3", + "accountId": "123456789012", + "configurationItemCaptureTime": "2022-05-20T15:53:57.732Z", + "configurationItemStatus": "ResourceDiscovered", + "configurationStateId": "1653062037732", + "configurationItemMD5Hash": "", + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": "123456789012", + "awsRegion": "us-east-1", + "availabilityZone": "Not Applicable", + "tags": {}, + "relatedEvents": [], + "relationships": [], + "configuration": { + "blockPublicAcls": true, + "ignorePublicAcls": true, + "blockPublicPolicy": true, + "restrictPublicBuckets": true + }, + "supplementaryConfiguration": {}, + "resourceTransitionStatus": "None" + } \ No newline at end of file diff --git a/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json b/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json index 2846342f..109534b5 100644 --- a/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json +++ b/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json @@ -48,1772 +48,1519 @@ "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2894856", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2896496", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2919355", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2919442", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2920189", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB2934520", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2938066", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2938772", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB2949621", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2954879", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2955164", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB2959626", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2962409", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB2962806", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2965500", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2967917", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2969339", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2971203", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2973351", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2973448", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2975061", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2975719", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2976627", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2977765", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2978041", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2978126", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2984006", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2987107", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2989647", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2989930", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2993100", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2995004", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2995388", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB2996799", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2998174", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB2999226", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3000483", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3000850", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3003057", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, February 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3004361", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3004365", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3004545", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3008923", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3012199", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3012702", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013172", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013769", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013791", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013816", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3014442", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3019978", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, February 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3020338", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3021910", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, February 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3021952", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3022345", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3022777", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3023222", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3023266", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3024751", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3024755", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3029603", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3030377", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3030947", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3032359", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3032663", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3033446", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3035126", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3036612", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3037579", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3037924", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3038002", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3038314", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3038701", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3041857", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3042085", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3042553", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3044374", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3044673", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045634", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3045685", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045717", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045719", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3045755", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045992", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3045999", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3046017", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3046737", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3048043", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3049563", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054169", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054203", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054256", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054464", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3055323", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3055343", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3055642", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3059316", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3059317", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3060681", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3060793", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3061512", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3063843", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3064209", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3068708", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3071756", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3074228", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3074548", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3075220", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3075853", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3077715", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3078071", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3078405", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3078676", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3080042", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3080149", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3082089", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3083325", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3083711", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3083992", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3084135", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3084905", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3086255", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3087038", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3087041", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3087137", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3091297", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3092601", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3092627", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3093983", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3094486", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3095701", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3096433", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3097997", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3098779", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3098785", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3099834", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3100473", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3100773", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3100919", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3100956", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3102429", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3102467", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3102812", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3103616", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3103696", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3103709", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3104002", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3109094", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3109103", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3109976", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3110329", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3112148", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3112336", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3115224", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3118401", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3121255", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3121261", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3121461", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3121918", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3122654", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3122660", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3123242", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3123245", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3123479", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3124275", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3125424", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3126033", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3126434", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3126587", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3126593", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3127226", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3127231", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3128650", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3133043", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3133681", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3133690", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3133924", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3134179", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3134242", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3134814", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3134815", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3135449", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3135456", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3135998", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3137061", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3137725", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3137728", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3138602", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3138615", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3139164", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3139398", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3139914", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3139929", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3140219", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3140234", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3141092", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3142036", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3145384", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3145432", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3146604", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3146723", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3146751", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3146963", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3147071", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3148198", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3148851", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3149090", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3149157", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3153704", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3154070", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3155784", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156016", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156017", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156019", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156059", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3156418", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3159398", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3160005", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3161561", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3161949", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3161958", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3162343", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3162835", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164024", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164033", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164035", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164294", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3167679", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3169704", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3170377", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3170455", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3172614", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3172727", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3172729", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3173424", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3174644", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3175024", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3175443", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3175887", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3177108", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3177186", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3177723", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3177725", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3178034", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3178539", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3179574", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, October 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3179948", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, October 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3182203", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3184122", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3184943", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3185319", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3185911", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, October 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3185331", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" } ] }, @@ -1922,13 +1669,13 @@ SYSTEM" "tags": {}, "configurationItemVersion": "1.2", "configurationItemCaptureTime": "2016-10-26T19:11:44.151Z", - "configurationStateId": 1477509104151, - "awsAccountId": "123456789012", + "configurationStateId": "1477509104151", + "awsAccountId": "138920347130", "configurationItemStatus": "ResourceDiscovered", "resourceType": "AWS::SSM::ManagedInstanceInventory", "resourceId": "i-07f6b44c44bab9e8e", "resourceName": "", - "ARN": "arn:aws:ssm:us-east-1:123456789012:managed-instance-inventory/i-07f6b44c44bab9e8e", + "ARN": "arn:aws:ssm:us-east-1:138920347130:managed-instance-inventory/i-07f6b44c44bab9e8e", "awsRegion": "us-east-1", "availabilityZone": null, "configurationStateMd5Hash": "f5edb28b271ef50dddb2c5b08a535f14", diff --git a/rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs b/rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs deleted file mode 100644 index d5fefc36..00000000 --- a/rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs +++ /dev/null @@ -1,189 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.IO; -using System.Text; - -using System.Threading.Tasks; - -using Amazon.Lambda.Serialization.Json; -using Amazon.Lambda.Core; - -using Amazon.Lambda.ConfigEvents; -using Amazon.CloudWatchEvents; -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Runtime; -using Amazon.Lambda.Model; -using Newtonsoft.Json.Linq; - -// Assembly attribute to enable the Lambda function's JSON input to be converted into a .NET class. -[assembly: LambdaSerializer(typeof(Amazon.Lambda.Serialization.Json.JsonSerializer))] - -namespace Rdk -{ - public class CustomConfigHandler - { - public const String AWS_REGION_PROPERTY = "AWS_DEFAULT_REGION"; - public const String MESSAGE_TYPE_PROPERTY = "messageType"; - public const String HOST_ID = "hostId"; - public const String PLACEMENT = "placement"; - public const String CONFIGURATION = "configuration"; - public const String IMAGE_ID = "imageId"; - public const String STATUS_PATH = "configurationItemStatus"; - public const String TENANCY = "tenancy"; - public const String RESOURCE_DELETED = "ResourceDeleted"; - public const String RESOURCE_DELETED_NOT_RECORDED = "ResourceDeletedNotRecorded"; - public const String CAPTURE_TIME_PATH = "configurationItemCaptureTime"; - public const String CONFIGURATION_ITEM = "configurationItem"; - public const String RESOURCE_ID = "resourceId"; - public const String RESOURCE_NOT_RECORDED = "ResourceNotRecorded"; - public const String RESOURCE_TYPE = "resourceType"; - - - IAmazonConfigService ConfigService { get; set; } - - /// - /// Default constructor. This constructor is used by Lambda to construct the instance. When invoked in a Lambda environment - /// the AWS credentials will come from the IAM role associated with the function and the AWS region will be set to the - /// region the Lambda function is executed in. - /// - public CustomConfigHandler() - { - Console.WriteLine("inside constructor..."); - } - - /// - /// Constructs an instance with a preconfigured S3 client. This can be used for testing the outside of the Lambda environment. - /// - /// - public CustomConfigHandler(IAmazonConfigService configService) - { - this.ConfigService = configService; - } - - /// - /// This method is called for every Lambda invocation. This method takes in an Config event object and can be used - /// to respond to Config notifications. - /// - /// - /// - /// Nothing - public async Task FunctionHandler(ConfigEvent evnt, ILambdaContext context) - { - Console.WriteLine("inside function handler..."); - Amazon.RegionEndpoint region = Amazon.RegionEndpoint.GetBySystemName(System.Environment.GetEnvironmentVariable(AWS_REGION_PROPERTY)); - AmazonConfigServiceClient configServiceClient = new AmazonConfigServiceClient(region); - await DoHandle(evnt, context, configServiceClient); - } - - private async Task DoHandle(ConfigEvent configEvent, ILambdaContext context, AmazonConfigServiceClient configServiceClient) - { - JObject ruleParamsObj; - JObject configItem; - - if (configEvent.RuleParameters != null){ - ruleParamsObj = JObject.Parse(configEvent.RuleParameters.ToString()); - } else { - ruleParamsObj = new JObject(); - } - - JObject invokingEventObj = JObject.Parse(configEvent.InvokingEvent.ToString()); - if(invokingEventObj["configurationItem"] != null){ - configItem = JObject.Parse(invokingEventObj[CONFIGURATION_ITEM].ToString()); - } else { - configItem = new JObject(); - } - - FailForIncompatibleEventTypes(invokingEventObj); - ComplianceType myCompliance = ComplianceType.NOT_APPLICABLE; - - if (!IsEventNotApplicable(configItem, configEvent.EventLeftScope)) - { - myCompliance = RuleCode.EvaluateCompliance(invokingEventObj, ruleParamsObj, context); - } - - // Associates the evaluation result with the AWS account published in the event. - Evaluation evaluation = new Evaluation { - ComplianceResourceId = GetResourceId(configItem), - ComplianceResourceType = GetResourceType(configItem), - OrderingTimestamp = GetCiCapturedTime(configItem), - ComplianceType = myCompliance - }; - - await DoPutEvaluations(configServiceClient, configEvent, evaluation); - } - - private String GetResourceType(JObject configItem) - { - return (String) configItem[RESOURCE_TYPE]; - } - - private void FailForIncompatibleEventTypes(JObject invokingEventObj) - { - String messageType = (String) invokingEventObj[MESSAGE_TYPE_PROPERTY]; - if (!IsCompatibleMessageType(messageType)) - { - throw new Exception(String.Format("Events with the message type '{0}' are not evaluated for this Config rule.", messageType)); - } - } - - private String GetResourceId(JObject configItem) - { - return (String) configItem[RESOURCE_ID]; - } - - private DateTime GetCiCapturedTime(JObject configItem) - { - return DateTime.Parse((String) configItem[CAPTURE_TIME_PATH]); - } - - private bool IsCompatibleMessageType(String messageType) - { - return String.Equals(MessageType.ConfigurationItemChangeNotification.ToString(), messageType); - } - - private bool IsEventNotApplicable(JObject configItem, bool eventLeftScope) - { - String status = configItem[STATUS_PATH].ToString(); - return (IsStatusNotApplicable(status) || eventLeftScope); - } - - private bool IsStatusNotApplicable(String status) - { - return String.Equals(RESOURCE_DELETED, status) - || String.Equals(RESOURCE_DELETED_NOT_RECORDED, status) - || String.Equals(RESOURCE_NOT_RECORDED, status); - } - - // Sends the evaluation results to AWS Config. - private async Task DoPutEvaluations(AmazonConfigServiceClient configClient, ConfigEvent configEvent, Evaluation evaluation) - { - Console.WriteLine("inside DoPutEvaluations..."); - PutEvaluationsRequest req = new PutEvaluationsRequest(); - req.Evaluations.Add(evaluation); - req.ResultToken = configEvent.ResultToken; - - - Task taskResp = configClient.PutEvaluationsAsync(req); - PutEvaluationsResponse response = await taskResp; - - // Ends the function execution if any evaluation results are not successfully reported. - if (response.FailedEvaluations.Count > 0) { - throw new Exception(String.Format( - "The following evaluations were not successfully reported to AWS Config: %s", - response.FailedEvaluations)); - } - } - - private DateTime GetDate(String dateString) - { - return DateTime.Parse(dateString, null, System.Globalization.DateTimeStyles.RoundtripKind); - } - - static void Main(string[] args) - { - Console.WriteLine("Hello World!"); - } - } -} diff --git a/rdk/template/runtime/dotnetcore1.0/RuleCode.cs b/rdk/template/runtime/dotnetcore1.0/RuleCode.cs deleted file mode 100755 index 4d376498..00000000 --- a/rdk/template/runtime/dotnetcore1.0/RuleCode.cs +++ /dev/null @@ -1,27 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Lambda.Core; -using Amazon.Lambda.Model; -using Amazon.Lambda.ConfigEvents; -using Newtonsoft.Json.Linq; - -namespace Rdk -{ - class RuleCode - { - public static ComplianceType EvaluateCompliance(JObject invokingEvent, JObject ruleParameters, ILambdaContext context) - { - context.Logger.LogLine("Beginning Custom Config Rule Evaluation"); - - /* - YOUR CODE GOES HERE! - */ - - return ComplianceType.NON_COMPLIANT; - } - } -} diff --git a/rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json b/rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json deleted file mode 100755 index 7cf6db07..00000000 --- a/rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Information": [ - "This file provides default values for the deployment wizard inside Visual Studio and the AWS Lambda commands added to the .NET Core CLI.", - "To learn more about the Lambda commands with the .NET Core CLI execute the following command at the command line in the project root directory.", - - "dotnet lambda help", - - "All the command line options for the Lambda command can be specified in this file." - ], - - "profile":"default", - "region" : "us-west-2", - "configuration": "Release", - "framework": "netcoreapp1.0", - "function-runtime": "dotnetcore1.0", - "function-memory-size": 256, - "function-timeout": 30, - "function-handler": "csharp7.0::Rdk.CustomConfigHandler::FunctionHandler" -} diff --git a/rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj b/rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj deleted file mode 100644 index 08059c59..00000000 --- a/rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj +++ /dev/null @@ -1,28 +0,0 @@ - - - - netcoreapp1.0 - - - Exe - - - - - - - - - - - - - - - - - - - - - diff --git a/rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs b/rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs deleted file mode 100644 index d5fefc36..00000000 --- a/rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs +++ /dev/null @@ -1,189 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.IO; -using System.Text; - -using System.Threading.Tasks; - -using Amazon.Lambda.Serialization.Json; -using Amazon.Lambda.Core; - -using Amazon.Lambda.ConfigEvents; -using Amazon.CloudWatchEvents; -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Runtime; -using Amazon.Lambda.Model; -using Newtonsoft.Json.Linq; - -// Assembly attribute to enable the Lambda function's JSON input to be converted into a .NET class. -[assembly: LambdaSerializer(typeof(Amazon.Lambda.Serialization.Json.JsonSerializer))] - -namespace Rdk -{ - public class CustomConfigHandler - { - public const String AWS_REGION_PROPERTY = "AWS_DEFAULT_REGION"; - public const String MESSAGE_TYPE_PROPERTY = "messageType"; - public const String HOST_ID = "hostId"; - public const String PLACEMENT = "placement"; - public const String CONFIGURATION = "configuration"; - public const String IMAGE_ID = "imageId"; - public const String STATUS_PATH = "configurationItemStatus"; - public const String TENANCY = "tenancy"; - public const String RESOURCE_DELETED = "ResourceDeleted"; - public const String RESOURCE_DELETED_NOT_RECORDED = "ResourceDeletedNotRecorded"; - public const String CAPTURE_TIME_PATH = "configurationItemCaptureTime"; - public const String CONFIGURATION_ITEM = "configurationItem"; - public const String RESOURCE_ID = "resourceId"; - public const String RESOURCE_NOT_RECORDED = "ResourceNotRecorded"; - public const String RESOURCE_TYPE = "resourceType"; - - - IAmazonConfigService ConfigService { get; set; } - - /// - /// Default constructor. This constructor is used by Lambda to construct the instance. When invoked in a Lambda environment - /// the AWS credentials will come from the IAM role associated with the function and the AWS region will be set to the - /// region the Lambda function is executed in. - /// - public CustomConfigHandler() - { - Console.WriteLine("inside constructor..."); - } - - /// - /// Constructs an instance with a preconfigured S3 client. This can be used for testing the outside of the Lambda environment. - /// - /// - public CustomConfigHandler(IAmazonConfigService configService) - { - this.ConfigService = configService; - } - - /// - /// This method is called for every Lambda invocation. This method takes in an Config event object and can be used - /// to respond to Config notifications. - /// - /// - /// - /// Nothing - public async Task FunctionHandler(ConfigEvent evnt, ILambdaContext context) - { - Console.WriteLine("inside function handler..."); - Amazon.RegionEndpoint region = Amazon.RegionEndpoint.GetBySystemName(System.Environment.GetEnvironmentVariable(AWS_REGION_PROPERTY)); - AmazonConfigServiceClient configServiceClient = new AmazonConfigServiceClient(region); - await DoHandle(evnt, context, configServiceClient); - } - - private async Task DoHandle(ConfigEvent configEvent, ILambdaContext context, AmazonConfigServiceClient configServiceClient) - { - JObject ruleParamsObj; - JObject configItem; - - if (configEvent.RuleParameters != null){ - ruleParamsObj = JObject.Parse(configEvent.RuleParameters.ToString()); - } else { - ruleParamsObj = new JObject(); - } - - JObject invokingEventObj = JObject.Parse(configEvent.InvokingEvent.ToString()); - if(invokingEventObj["configurationItem"] != null){ - configItem = JObject.Parse(invokingEventObj[CONFIGURATION_ITEM].ToString()); - } else { - configItem = new JObject(); - } - - FailForIncompatibleEventTypes(invokingEventObj); - ComplianceType myCompliance = ComplianceType.NOT_APPLICABLE; - - if (!IsEventNotApplicable(configItem, configEvent.EventLeftScope)) - { - myCompliance = RuleCode.EvaluateCompliance(invokingEventObj, ruleParamsObj, context); - } - - // Associates the evaluation result with the AWS account published in the event. - Evaluation evaluation = new Evaluation { - ComplianceResourceId = GetResourceId(configItem), - ComplianceResourceType = GetResourceType(configItem), - OrderingTimestamp = GetCiCapturedTime(configItem), - ComplianceType = myCompliance - }; - - await DoPutEvaluations(configServiceClient, configEvent, evaluation); - } - - private String GetResourceType(JObject configItem) - { - return (String) configItem[RESOURCE_TYPE]; - } - - private void FailForIncompatibleEventTypes(JObject invokingEventObj) - { - String messageType = (String) invokingEventObj[MESSAGE_TYPE_PROPERTY]; - if (!IsCompatibleMessageType(messageType)) - { - throw new Exception(String.Format("Events with the message type '{0}' are not evaluated for this Config rule.", messageType)); - } - } - - private String GetResourceId(JObject configItem) - { - return (String) configItem[RESOURCE_ID]; - } - - private DateTime GetCiCapturedTime(JObject configItem) - { - return DateTime.Parse((String) configItem[CAPTURE_TIME_PATH]); - } - - private bool IsCompatibleMessageType(String messageType) - { - return String.Equals(MessageType.ConfigurationItemChangeNotification.ToString(), messageType); - } - - private bool IsEventNotApplicable(JObject configItem, bool eventLeftScope) - { - String status = configItem[STATUS_PATH].ToString(); - return (IsStatusNotApplicable(status) || eventLeftScope); - } - - private bool IsStatusNotApplicable(String status) - { - return String.Equals(RESOURCE_DELETED, status) - || String.Equals(RESOURCE_DELETED_NOT_RECORDED, status) - || String.Equals(RESOURCE_NOT_RECORDED, status); - } - - // Sends the evaluation results to AWS Config. - private async Task DoPutEvaluations(AmazonConfigServiceClient configClient, ConfigEvent configEvent, Evaluation evaluation) - { - Console.WriteLine("inside DoPutEvaluations..."); - PutEvaluationsRequest req = new PutEvaluationsRequest(); - req.Evaluations.Add(evaluation); - req.ResultToken = configEvent.ResultToken; - - - Task taskResp = configClient.PutEvaluationsAsync(req); - PutEvaluationsResponse response = await taskResp; - - // Ends the function execution if any evaluation results are not successfully reported. - if (response.FailedEvaluations.Count > 0) { - throw new Exception(String.Format( - "The following evaluations were not successfully reported to AWS Config: %s", - response.FailedEvaluations)); - } - } - - private DateTime GetDate(String dateString) - { - return DateTime.Parse(dateString, null, System.Globalization.DateTimeStyles.RoundtripKind); - } - - static void Main(string[] args) - { - Console.WriteLine("Hello World!"); - } - } -} diff --git a/rdk/template/runtime/dotnetcore2.0/RuleCode.cs b/rdk/template/runtime/dotnetcore2.0/RuleCode.cs deleted file mode 100644 index 4d376498..00000000 --- a/rdk/template/runtime/dotnetcore2.0/RuleCode.cs +++ /dev/null @@ -1,27 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Lambda.Core; -using Amazon.Lambda.Model; -using Amazon.Lambda.ConfigEvents; -using Newtonsoft.Json.Linq; - -namespace Rdk -{ - class RuleCode - { - public static ComplianceType EvaluateCompliance(JObject invokingEvent, JObject ruleParameters, ILambdaContext context) - { - context.Logger.LogLine("Beginning Custom Config Rule Evaluation"); - - /* - YOUR CODE GOES HERE! - */ - - return ComplianceType.NON_COMPLIANT; - } - } -} diff --git a/rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json b/rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json deleted file mode 100644 index 7cf6db07..00000000 --- a/rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Information": [ - "This file provides default values for the deployment wizard inside Visual Studio and the AWS Lambda commands added to the .NET Core CLI.", - "To learn more about the Lambda commands with the .NET Core CLI execute the following command at the command line in the project root directory.", - - "dotnet lambda help", - - "All the command line options for the Lambda command can be specified in this file." - ], - - "profile":"default", - "region" : "us-west-2", - "configuration": "Release", - "framework": "netcoreapp1.0", - "function-runtime": "dotnetcore1.0", - "function-memory-size": 256, - "function-timeout": 30, - "function-handler": "csharp7.0::Rdk.CustomConfigHandler::FunctionHandler" -} diff --git a/rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj b/rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj deleted file mode 100644 index dfbe6d79..00000000 --- a/rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj +++ /dev/null @@ -1,28 +0,0 @@ - - - - netcoreapp2.0 - - - Exe - - - - - - - - - - - - - - - - - - - - - diff --git a/rdk/template/runtime/nodejs4.3/rule_code.js b/rdk/template/runtime/nodejs4.3/rule_code.js deleted file mode 100644 index a8840f66..00000000 --- a/rdk/template/runtime/nodejs4.3/rule_code.js +++ /dev/null @@ -1,183 +0,0 @@ -'use strict'; - -const aws = require('aws-sdk'); - -const config = new aws.ConfigService(); - -function evaluateCompliance(configurationItem, ruleParameters, callback) { - - /* - ############################### - # Add your custom logic here. # - ############################### - */ - - callback('NOT_APPLICABLE'); -} - -//Boilerplate Code - You should not need to change anything below this comment. -function rule_handler(event, context, callback) { - //console.info(event); - const invokingEvent = JSON.parse(event.invokingEvent); - const configItem = invokingEvent.configurationItem; - const ruleParameters = JSON.parse(event.ruleParameters); - evaluateCompliance(configItem, ruleParameters, function(results){ - console.log(results); - callback(null, results); - }); -} - -// Helper function used to validate input -function checkDefined(reference, referenceName) { - if (!reference) { - throw new Error(`Error: ${referenceName} is not defined`); - } - return reference; -} - -// Check whether the message is OversizedConfigurationItemChangeNotification or not -function isOverSizedChangeNotification(messageType) { - checkDefined(messageType, 'messageType'); - return messageType === 'OversizedConfigurationItemChangeNotification'; -} - -// Check whether the message is a ScheduledNotification or not -function isScheduledNotification(messageType) { - checkDefined(messageType, 'messageType'); - return messageType === 'ScheduledNotification' -} - -// Get configurationItem using getResourceConfigHistory API. -function getConfiguration(resourceType, resourceId, configurationCaptureTime, callback) { - config.getResourceConfigHistory({ resourceType, resourceId, laterTime: new Date(configurationCaptureTime), limit: 1 }, (err, data) => { - if (err) { - callback(err, null); - } - const configurationItem = data.configurationItems[0]; - callback(null, configurationItem); - }); -} - -// Convert from the API model to the original invocation model -/*eslint no-param-reassign: ["error", { "props": false }]*/ -function convertApiConfiguration(apiConfiguration) { - apiConfiguration.awsAccountId = apiConfiguration.accountId; - apiConfiguration.ARN = apiConfiguration.arn; - apiConfiguration.configurationStateMd5Hash = apiConfiguration.configurationItemMD5Hash; - apiConfiguration.configurationItemVersion = apiConfiguration.version; - apiConfiguration.configuration = JSON.parse(apiConfiguration.configuration); - if ({}.hasOwnProperty.call(apiConfiguration, 'relationships')) { - for (let i = 0; i < apiConfiguration.relationships.length; i++) { - apiConfiguration.relationships[i].name = apiConfiguration.relationships[i].relationshipName; - } - } - return apiConfiguration; -} - -// Based on the type of message get the configuration item either from configurationItem in the invoking event or using the getResourceConfigHistory API in getConfiguration function. -function getConfigurationItem(invokingEvent, callback) { - checkDefined(invokingEvent, 'invokingEvent'); - if (isOverSizedChangeNotification(invokingEvent.messageType)) { - const configurationItemSummary = checkDefined(invokingEvent.configurationItemSummary, 'configurationItemSummary'); - getConfiguration(configurationItemSummary.resourceType, configurationItemSummary.resourceId, configurationItemSummary.configurationItemCaptureTime, (err, apiConfigurationItem) => { - if (err) { - callback(err); - } - const configurationItem = convertApiConfiguration(apiConfigurationItem); - callback(null, configurationItem); - }); - } else if (isScheduledNotification(invokingEvent.messageType)) { - callback(null, null) - } else { - checkDefined(invokingEvent.configurationItem, 'configurationItem'); - callback(null, invokingEvent.configurationItem); - } -} - -// Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. -function isApplicable(configurationItem, event) { - //checkDefined(configurationItem, 'configurationItem'); - checkDefined(event, 'event'); - //const status = configurationItem.configurationItemStatus; - const eventLeftScope = event.eventLeftScope; - //return (status === 'OK' || status === 'ResourceDiscovered') && eventLeftScope === false; - return (eventLeftScope === false); -} - -// This is the handler that's invoked by Lambda -// Most of this code is boilerplate; use as is -exports.lambda_handler = function(event, context, callback) { - checkDefined(event, 'event'); - const invokingEvent = JSON.parse(event.invokingEvent); - const ruleParameters = JSON.parse(event.ruleParameters); - getConfigurationItem(invokingEvent, (err, configurationItem) => { - if (err) { - callback(err); - } - //let compliance = 'NOT_APPLICABLE'; - if (isApplicable(configurationItem, event)) { - invokingEvent.configurationItem = configurationItem; - event.invokingEvent = JSON.stringify(invokingEvent); - rule_handler(event, context, (err, compliance_results) => { - if (err) { - callback(err); - } - //compliance = computedCompliance; - var putEvaluationsRequest = {}; - - // Put together the request that reports the evaluation status - if (typeof compliance_results === 'string' || compliance_results instanceof String){ - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: compliance_results, - OrderingTimestamp: configurationItem.configurationItemCaptureTime - } - ]; - } else if (compliance_results instanceof Array) { - putEvaluationsRequest.Evaluations = []; - - var fields = ['ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp']; - - for (var i = 0; i < compliance_results.length; i++) { - var missing_fields = false; - for (var j = 0; j < fields.length; j++) { - if (!compliance_results[i].hasOwnProperty(fields[j])) { - console.info("Missing " + fields[j] + " from custom evaluation."); - missing_fields = true; - } - } - - if (!missing_fields){ - putEvaluationsRequest.Evaluations.push(compliance_results[i]); - } - } - } else { - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: 'INSUFFICIENT_DATA', - OrderingTimestamp: configurationItem.configurationItemCaptureTime - } - ]; - } - - putEvaluationsRequest.ResultToken = event.resultToken; - - // Invoke the Config API to report the result of the evaluation - config.putEvaluations(putEvaluationsRequest, (error, data) => { - if (error) { - callback(error, null); - } else if (data.FailedEvaluations.length > 0) { - // Ends the function execution if any evaluation results are not successfully reported. - callback(JSON.stringify(data), null); - } else { - callback(null, data); - } - }); - }); - } - }); -}; diff --git a/rdk/template/runtime/nodejs6.10/rule_code.js b/rdk/template/runtime/nodejs6.10/rule_code.js deleted file mode 100644 index bb90065a..00000000 --- a/rdk/template/runtime/nodejs6.10/rule_code.js +++ /dev/null @@ -1,215 +0,0 @@ -"use strict"; - -const aws = require("aws-sdk"); - -const config = new aws.ConfigService(); - -function evaluateCompliance(configurationItem, ruleParameters, callback) { - /* - ############################### - # Add your custom logic here. # - ############################### - */ - - callback("NOT_APPLICABLE"); -} - -//Boilerplate Code - You should not need to change anything below this comment. -function rule_handler(event, context, callback) { - //console.info(event); - const invokingEvent = JSON.parse(event.invokingEvent); - const configItem = invokingEvent.configurationItem; - const ruleParameters = JSON.parse(event.ruleParameters); - evaluateCompliance(configItem, ruleParameters, function (results) { - console.log(results); - callback(null, results); - }); -} - -// Helper function used to validate input -function checkDefined(reference, referenceName) { - if (!reference) { - throw new Error(`Error: ${referenceName} is not defined`); - } - return reference; -} - -// Check whether the message is OversizedConfigurationItemChangeNotification or not -function isOverSizedChangeNotification(messageType) { - checkDefined(messageType, "messageType"); - return messageType === "OversizedConfigurationItemChangeNotification"; -} - -// Check whether the message is a ScheduledNotification or not -function isScheduledNotification(messageType) { - checkDefined(messageType, "messageType"); - return messageType === "ScheduledNotification"; -} - -// Get configurationItem using getResourceConfigHistory API. -function getConfiguration( - resourceType, - resourceId, - configurationCaptureTime, - callback -) { - config.getResourceConfigHistory( - { - resourceType, - resourceId, - laterTime: new Date(configurationCaptureTime), - limit: 1, - }, - (err, data) => { - if (err) { - callback(err, null); - } - const configurationItem = data.configurationItems[0]; - callback(null, configurationItem); - } - ); -} - -// Convert from the API model to the original invocation model -/*eslint no-param-reassign: ["error", { "props": false }]*/ -function convertApiConfiguration(apiConfiguration) { - apiConfiguration.awsAccountId = apiConfiguration.accountId; - apiConfiguration.ARN = apiConfiguration.arn; - apiConfiguration.configurationStateMd5Hash = - apiConfiguration.configurationItemMD5Hash; - apiConfiguration.configurationItemVersion = apiConfiguration.version; - apiConfiguration.configuration = JSON.parse(apiConfiguration.configuration); - if ({}.hasOwnProperty.call(apiConfiguration, "relationships")) { - for (let i = 0; i < apiConfiguration.relationships.length; i++) { - apiConfiguration.relationships[i].name = - apiConfiguration.relationships[i].relationshipName; - } - } - return apiConfiguration; -} - -// Based on the type of message get the configuration item either from configurationItem in the invoking event or using the getResourceConfigHistory API in getConfiguration function. -function getConfigurationItem(invokingEvent, callback) { - checkDefined(invokingEvent, "invokingEvent"); - if (isOverSizedChangeNotification(invokingEvent.messageType)) { - const configurationItemSummary = checkDefined( - invokingEvent.configurationItemSummary, - "configurationItemSummary" - ); - getConfiguration( - configurationItemSummary.resourceType, - configurationItemSummary.resourceId, - configurationItemSummary.configurationItemCaptureTime, - (err, apiConfigurationItem) => { - if (err) { - callback(err); - } - const configurationItem = convertApiConfiguration(apiConfigurationItem); - callback(null, configurationItem); - } - ); - } else if (isScheduledNotification(invokingEvent.messageType)) { - callback(null, null); - } else { - checkDefined(invokingEvent.configurationItem, "configurationItem"); - callback(null, invokingEvent.configurationItem); - } -} - -// Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. -function isApplicable(configurationItem, event) { - //checkDefined(configurationItem, 'configurationItem'); - checkDefined(event, "event"); - //const status = configurationItem.configurationItemStatus; - const eventLeftScope = event.eventLeftScope; - //return (status === 'OK' || status === 'ResourceDiscovered') && eventLeftScope === false; - return eventLeftScope === false; -} - -// This is the handler that's invoked by Lambda -// Most of this code is boilerplate; use as is -exports.lambda_handler = function (event, context, callback) { - checkDefined(event, "event"); - const invokingEvent = JSON.parse(event.invokingEvent); - const ruleParameters = JSON.parse(event.ruleParameters); - getConfigurationItem(invokingEvent, (err, configurationItem) => { - if (err) { - callback(err); - } - //let compliance = 'NOT_APPLICABLE'; - if (isApplicable(configurationItem, event)) { - invokingEvent.configurationItem = configurationItem; - event.invokingEvent = JSON.stringify(invokingEvent); - rule_handler(event, context, (err, compliance_results) => { - if (err) { - callback(err); - } - //compliance = computedCompliance; - var putEvaluationsRequest = {}; - - // Put together the request that reports the evaluation status - if ( - typeof compliance_results === "string" || - compliance_results instanceof String - ) { - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: compliance_results, - OrderingTimestamp: configurationItem.configurationItemCaptureTime, - }, - ]; - } else if (compliance_results instanceof Array) { - putEvaluationsRequest.Evaluations = []; - - var fields = [ - "ComplianceResourceType", - "ComplianceResourceId", - "ComplianceType", - "OrderingTimestamp", - ]; - - for (var i = 0; i < compliance_results.length; i++) { - var missing_fields = false; - for (var j = 0; j < fields.length; j++) { - if (!compliance_results[i].hasOwnProperty(fields[j])) { - console.info( - "Missing " + fields[j] + " from custom evaluation." - ); - missing_fields = true; - } - } - - if (!missing_fields) { - putEvaluationsRequest.Evaluations.push(compliance_results[i]); - } - } - } else { - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: "INSUFFICIENT_DATA", - OrderingTimestamp: configurationItem.configurationItemCaptureTime, - }, - ]; - } - - putEvaluationsRequest.ResultToken = event.resultToken; - - // Invoke the Config API to report the result of the evaluation - config.putEvaluations(putEvaluationsRequest, (error, data) => { - if (error) { - callback(error, null); - } else if (data.FailedEvaluations.length > 0) { - // Ends the function execution if any evaluation results are not successfully reported. - callback(JSON.stringify(data), null); - } else { - callback(null, data); - } - }); - }); - } - }); -}; diff --git a/rdk/template/runtime/python3.10-lib/rule_code.py b/rdk/template/runtime/python3.10-lib/rule_code.py new file mode 100644 index 00000000..90fdebe2 --- /dev/null +++ b/rdk/template/runtime/python3.10-lib/rule_code.py @@ -0,0 +1,25 @@ +from rdklib import Evaluator, Evaluation, ConfigRule, ComplianceType +<%ApplicableResources1%> +class <%RuleName%>(ConfigRule): + def evaluate_change(self, event, client_factory, configuration_item, valid_rule_parameters): + ############################### + # Add your custom logic here. # + ############################### + + return [Evaluation(ComplianceType.NOT_APPLICABLE)] + + #def evaluate_periodic(self, event, client_factory, valid_rule_parameters): + # pass + + def evaluate_parameters(self, rule_parameters): + valid_rule_parameters = rule_parameters + return valid_rule_parameters + + +################################ +# DO NOT MODIFY ANYTHING BELOW # +################################ +def lambda_handler(event, context): + my_rule = <%RuleName%>() + evaluator = Evaluator(my_rule<%ApplicableResources2%>) + return evaluator.handle(event, context) diff --git a/rdk/template/runtime/python3.10-lib/rule_test.py b/rdk/template/runtime/python3.10-lib/rule_test.py new file mode 100644 index 00000000..db0cf30c --- /dev/null +++ b/rdk/template/runtime/python3.10-lib/rule_test.py @@ -0,0 +1,157 @@ +import datetime +import json +import logging +import unittest +from unittest.mock import patch, MagicMock +from botocore.exceptions import ClientError +from rdklib import Evaluation, ComplianceType +import rdklibtest + +############## +# Parameters # +############## + +# Define the default resource to report to Config Rules +# TODO - Replace with your resource type +RESOURCE_TYPE = "AWS::IAM::Role" + +############# +# Main Code # +############# + +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() + +CLIENT_FACTORY = MagicMock() + +# example for mocking IAM API calls +IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() + + +def mock_get_client(client_name, *args, **kwargs): + if client_name == "iam": + return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK + raise Exception("Attempting to create an unknown client") + + +@patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) +class ComplianceTest(unittest.TestCase): + rule_parameters = { + "SomeParameterKey": "SomeParameterValue", + "SomeParameterKey2": "SomeParameterValue2", + } + + role_sample_configuration_abridged = {"arn": "some-arn", "roleName": "testrole"} + + invoking_event_iam_role_sample = { + "configurationItem": { + "relatedEvents": [], + "relationships": [], + "configuration": role_sample_configuration_abridged, + "tags": {}, + "configurationItemCaptureTime": "2018-07-02T03:37:52.418Z", + "awsAccountId": "123456789012", + "configurationItemStatus": "ResourceDiscovered", + "resourceType": "AWS::IAM::Role", + "resourceId": "some-resource-id", + "resourceName": "some-resource-name", + "ARN": "some-arn", + }, + "notificationCreationTime": "2018-07-02T23:05:34.445Z", + "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", + } + + list_roles_response = { + "Roles": [ + { + "Path": "/", + "RoleName": "testrole", + "RoleId": "some-role-id", + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), + "Description": "this is a test role", + "MaxSessionDuration": 123, + "Tags": [ + {"Key": "one_tag", "Value": "its_value"}, + ], + "RoleLastUsed": { + "LastUsedDate": datetime.datetime(2015, 1, 1), + "Region": "us-east-1", + }, + }, + ] + } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} + + def setUp(self): + STS_CLIENT_MOCK.reset_mock() + + def test_sample(self): + self.assertTrue(True) + + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/rdk/template/runtime/python3.10/rule_code.py b/rdk/template/runtime/python3.10/rule_code.py new file mode 100644 index 00000000..682297b0 --- /dev/null +++ b/rdk/template/runtime/python3.10/rule_code.py @@ -0,0 +1,437 @@ +import json +import sys +import datetime +import boto3 +import botocore + +try: + import liblogging +except ImportError: + pass + +############## +# Parameters # +############## + +# Define the default resource to report to Config Rules +DEFAULT_RESOURCE_TYPE = "AWS::::Account" + +# Set to True to get the lambda to assume the Role attached on the Config Service (useful for cross-account). +ASSUME_ROLE_MODE = False + +# Other parameters (no change needed) +CONFIG_ROLE_TIMEOUT_SECONDS = 900 + +############# +# Main Code # +############# + + +def evaluate_compliance(event, configuration_item, valid_rule_parameters): + """Form the evaluation(s) to be return to Config Rules + + Return either: + None -- when no result needs to be displayed + a string -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE + a dictionary -- the evaluation dictionary, usually built by build_evaluation_from_config_item() + a list of dictionary -- a list of evaluation dictionary , usually built by build_evaluation() + + Keyword arguments: + event -- the event variable given in the lambda handler + configuration_item -- the configurationItem dictionary in the invokingEvent + valid_rule_parameters -- the output of the evaluate_parameters() representing validated parameters of the Config Rule + + Advanced Notes: + 1 -- if a resource is deleted and generate a configuration change with ResourceDeleted status, the Boilerplate code will put a NOT_APPLICABLE on this resource automatically. + 2 -- if a None or a list of dictionary is returned, the old evaluation(s) which are not returned in the new evaluation list are returned as NOT_APPLICABLE by the Boilerplate code + 3 -- if None or an empty string, list or dict is returned, the Boilerplate code will put a "shadow" evaluation to feedback that the evaluation took place properly + """ + + ############################### + # Add your custom logic here. # + ############################### + + return "NOT_APPLICABLE" + + +def evaluate_parameters(rule_parameters): + """Evaluate the rule parameters dictionary validity. Raise a ValueError for invalid parameters. + + Return: + anything suitable for the evaluate_compliance() + + Keyword arguments: + rule_parameters -- the Key/Value dictionary of the Config Rules parameters + """ + valid_rule_parameters = rule_parameters + return valid_rule_parameters + + +#################### +# Helper Functions # +#################### + +# Build an error to be displayed in the logs when the parameter is invalid. +def build_parameters_value_error_response(ex): + """Return an error dictionary when the evaluate_parameters() raises a ValueError. + + Keyword arguments: + ex -- Exception text + """ + return build_error_response( + internal_error_message="Parameter value is invalid", + internal_error_details="An ValueError was raised during the validation of the Parameter value", + customer_error_code="InvalidParameterValueException", + customer_error_message=str(ex), + ) + + +# This gets the client after assuming the Config service role +# either in the same AWS account or cross-account. +def get_client(service, event, region=None): + """Return the service boto client. It should be used instead of directly calling the client. + + Keyword arguments: + service -- the service name used for calling the boto.client() + event -- the event variable given in the lambda handler + region -- the region where the client is called (default: None) + """ + if not ASSUME_ROLE_MODE: + return boto3.client(service, region) + credentials = get_assume_role_credentials(get_execution_role_arn(event), region) + return boto3.client( + service, + aws_access_key_id=credentials["AccessKeyId"], + aws_secret_access_key=credentials["SecretAccessKey"], + aws_session_token=credentials["SessionToken"], + region_name=region, + ) + + +# This generates an evaluation for config +def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None): + """Form an evaluation as a dictionary. Usually suited to report on scheduled rules. + + Keyword arguments: + resource_id -- the unique id of the resource to report + compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE + event -- the event variable given in the lambda handler + resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule (default DEFAULT_RESOURCE_TYPE) + annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer. + """ + eval_cc = {} + if annotation: + eval_cc["Annotation"] = build_annotation(annotation) + eval_cc["ComplianceResourceType"] = resource_type + eval_cc["ComplianceResourceId"] = resource_id + eval_cc["ComplianceType"] = compliance_type + eval_cc["OrderingTimestamp"] = str(json.loads(event["invokingEvent"])["notificationCreationTime"]) + return eval_cc + + +def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None): + """Form an evaluation as a dictionary. Usually suited to report on configuration change rules. + + Keyword arguments: + configuration_item -- the configurationItem dictionary in the invokingEvent + compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE + annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer. + """ + eval_ci = {} + if annotation: + eval_ci["Annotation"] = build_annotation(annotation) + eval_ci["ComplianceResourceType"] = configuration_item["resourceType"] + eval_ci["ComplianceResourceId"] = configuration_item["resourceId"] + eval_ci["ComplianceType"] = compliance_type + eval_ci["OrderingTimestamp"] = configuration_item["configurationItemCaptureTime"] + return eval_ci + + +#################### +# Boilerplate Code # +#################### + +# Get execution role for Lambda function +def get_execution_role_arn(event): + role_arn = None + if "ruleParameters" in event: + rule_params = json.loads(event["ruleParameters"]) + role_name = rule_params.get("ExecutionRoleName") + if role_name: + execution_role_prefix = event["executionRoleArn"].split("/")[0] + role_arn = "{}/{}".format(execution_role_prefix, role_name) + + if not role_arn: + role_arn = event["executionRoleArn"] + + return role_arn + + +# Build annotation within Service constraints +def build_annotation(annotation_string): + if len(annotation_string) > 256: + return annotation_string[:244] + " [truncated]" + return annotation_string + + +# Helper function used to validate input +def check_defined(reference, reference_name): + if not reference: + raise Exception("Error: ", reference_name, "is not defined") + return reference + + +# Check whether the message is OversizedConfigurationItemChangeNotification or not +def is_oversized_changed_notification(message_type): + check_defined(message_type, "messageType") + return message_type == "OversizedConfigurationItemChangeNotification" + + +# Check whether the message is a ScheduledNotification or not. +def is_scheduled_notification(message_type): + check_defined(message_type, "messageType") + return message_type == "ScheduledNotification" + + +# Get configurationItem using getResourceConfigHistory API +# in case of OversizedConfigurationItemChangeNotification +def get_configuration(resource_type, resource_id, configuration_capture_time): + result = AWS_CONFIG_CLIENT.get_resource_config_history( + resourceType=resource_type, resourceId=resource_id, laterTime=configuration_capture_time, limit=1 + ) + configuration_item = result["configurationItems"][0] + return convert_api_configuration(configuration_item) + + +# Convert from the API model to the original invocation model +def convert_api_configuration(configuration_item): + for k, v in configuration_item.items(): + if isinstance(v, datetime.datetime): + configuration_item[k] = str(v) + configuration_item["awsAccountId"] = configuration_item["accountId"] + configuration_item["ARN"] = configuration_item["arn"] + configuration_item["configurationStateMd5Hash"] = configuration_item["configurationItemMD5Hash"] + configuration_item["configurationItemVersion"] = configuration_item["version"] + configuration_item["configuration"] = json.loads(configuration_item["configuration"]) + if "relationships" in configuration_item: + for i in range(len(configuration_item["relationships"])): + configuration_item["relationships"][i]["name"] = configuration_item["relationships"][i]["relationshipName"] + return configuration_item + + +# Based on the type of message get the configuration item +# either from configurationItem in the invoking event +# or using the getResourceConfigHistory API in getConfiguration function. +def get_configuration_item(invoking_event): + check_defined(invoking_event, "invokingEvent") + if is_oversized_changed_notification(invoking_event["messageType"]): + configuration_item_summary = check_defined( + invoking_event["configurationItemSummary"], "configurationItemSummary" + ) + return get_configuration( + configuration_item_summary["resourceType"], + configuration_item_summary["resourceId"], + configuration_item_summary["configurationItemCaptureTime"], + ) + if is_scheduled_notification(invoking_event["messageType"]): + return None + return check_defined(invoking_event["configurationItem"], "configurationItem") + + +# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. +def is_applicable(configuration_item, event): + try: + check_defined(configuration_item, "configurationItem") + check_defined(event, "event") + except: + return True + status = configuration_item["configurationItemStatus"] + event_left_scope = event["eventLeftScope"] + if status == "ResourceDeleted": + print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.") + + return status in ("OK", "ResourceDiscovered") and not event_left_scope + + +def get_assume_role_credentials(role_arn, region=None): + sts_client = boto3.client("sts", region) + try: + assume_role_response = sts_client.assume_role( + RoleArn=role_arn, RoleSessionName="configLambdaExecution", DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS + ) + if "liblogging" in sys.modules: + liblogging.logSession(role_arn, assume_role_response) + return assume_role_response["Credentials"] + except botocore.exceptions.ClientError as ex: + # Scrub error message for any internal account info leaks + print(str(ex)) + if "AccessDenied" in ex.response["Error"]["Code"]: + ex.response["Error"]["Message"] = "AWS Config does not have permission to assume the IAM role." + else: + ex.response["Error"]["Message"] = "InternalError" + ex.response["Error"]["Code"] = "InternalError" + raise ex + + +# This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account). +def clean_up_old_evaluations(latest_evaluations, event): + + cleaned_evaluations = [] + + old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule( + ConfigRuleName=event["configRuleName"], ComplianceTypes=["COMPLIANT", "NON_COMPLIANT"], Limit=100 + ) + + old_eval_list = [] + + while True: + for old_result in old_eval["EvaluationResults"]: + old_eval_list.append(old_result) + if "NextToken" in old_eval: + next_token = old_eval["NextToken"] + old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule( + ConfigRuleName=event["configRuleName"], + ComplianceTypes=["COMPLIANT", "NON_COMPLIANT"], + Limit=100, + NextToken=next_token, + ) + else: + break + + for old_eval in old_eval_list: + old_resource_id = old_eval["EvaluationResultIdentifier"]["EvaluationResultQualifier"]["ResourceId"] + newer_founded = False + for latest_eval in latest_evaluations: + if old_resource_id == latest_eval["ComplianceResourceId"]: + newer_founded = True + if not newer_founded: + cleaned_evaluations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event)) + + return cleaned_evaluations + latest_evaluations + + +def lambda_handler(event, context): + if "liblogging" in sys.modules: + liblogging.logEvent(event) + + global AWS_CONFIG_CLIENT + + # print(event) + check_defined(event, "event") + invoking_event = json.loads(event["invokingEvent"]) + rule_parameters = {} + if "ruleParameters" in event: + rule_parameters = json.loads(event["ruleParameters"]) + + try: + valid_rule_parameters = evaluate_parameters(rule_parameters) + except ValueError as ex: + return build_parameters_value_error_response(ex) + + try: + AWS_CONFIG_CLIENT = get_client("config", event) + if invoking_event["messageType"] in [ + "ConfigurationItemChangeNotification", + "ScheduledNotification", + "OversizedConfigurationItemChangeNotification", + ]: + configuration_item = get_configuration_item(invoking_event) + if is_applicable(configuration_item, event): + compliance_result = evaluate_compliance(event, configuration_item, valid_rule_parameters) + else: + compliance_result = "NOT_APPLICABLE" + else: + return build_internal_error_response("Unexpected message type", str(invoking_event)) + except botocore.exceptions.ClientError as ex: + if is_internal_error(ex): + return build_internal_error_response("Unexpected error while completing API request", str(ex)) + return build_error_response( + "Customer error while making API request", + str(ex), + ex.response["Error"]["Code"], + ex.response["Error"]["Message"], + ) + except ValueError as ex: + return build_internal_error_response(str(ex), str(ex)) + + evaluations = [] + latest_evaluations = [] + + if not compliance_result: + latest_evaluations.append( + build_evaluation(event["accountId"], "NOT_APPLICABLE", event, resource_type="AWS::::Account") + ) + evaluations = clean_up_old_evaluations(latest_evaluations, event) + elif isinstance(compliance_result, str): + if configuration_item: + evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result)) + else: + evaluations.append( + build_evaluation(event["accountId"], compliance_result, event, resource_type=DEFAULT_RESOURCE_TYPE) + ) + elif isinstance(compliance_result, list): + for evaluation in compliance_result: + missing_fields = False + for field in ("ComplianceResourceType", "ComplianceResourceId", "ComplianceType", "OrderingTimestamp"): + if field not in evaluation: + print("Missing " + field + " from custom evaluation.") + missing_fields = True + + if not missing_fields: + latest_evaluations.append(evaluation) + evaluations = clean_up_old_evaluations(latest_evaluations, event) + elif isinstance(compliance_result, dict): + missing_fields = False + for field in ("ComplianceResourceType", "ComplianceResourceId", "ComplianceType", "OrderingTimestamp"): + if field not in compliance_result: + print("Missing " + field + " from custom evaluation.") + missing_fields = True + if not missing_fields: + evaluations.append(compliance_result) + else: + evaluations.append(build_evaluation_from_config_item(configuration_item, "NOT_APPLICABLE")) + + # Put together the request that reports the evaluation status + result_token = event["resultToken"] + test_mode = False + if result_token == "TESTMODE": + # Used solely for RDK test to skip actual put_evaluation API call + test_mode = True + + # Invoke the Config API to report the result of the evaluation + evaluation_copy = [] + evaluation_copy = evaluations[:] + while evaluation_copy: + AWS_CONFIG_CLIENT.put_evaluations( + Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode + ) + del evaluation_copy[:100] + + # Used solely for RDK test to be able to test Lambda function + return evaluations + + +def is_internal_error(exception): + return ( + (not isinstance(exception, botocore.exceptions.ClientError)) + or exception.response["Error"]["Code"].startswith("5") + or "InternalError" in exception.response["Error"]["Code"] + or "ServiceError" in exception.response["Error"]["Code"] + ) + + +def build_internal_error_response(internal_error_message, internal_error_details=None): + return build_error_response(internal_error_message, internal_error_details, "InternalError", "InternalError") + + +def build_error_response( + internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None +): + error_response = { + "internalErrorMessage": internal_error_message, + "internalErrorDetails": internal_error_details, + "customerErrorMessage": customer_error_message, + "customerErrorCode": customer_error_code, + } + print(error_response) + return error_response diff --git a/rdk/template/runtime/python3.10/rule_test.py b/rdk/template/runtime/python3.10/rule_test.py new file mode 100644 index 00000000..e0f8c974 --- /dev/null +++ b/rdk/template/runtime/python3.10/rule_test.py @@ -0,0 +1,177 @@ +import sys +import unittest +from unittest.mock import MagicMock +import botocore + +############## +# Parameters # +############## + +# Define the default resource to report to Config Rules +DEFAULT_RESOURCE_TYPE = "AWS::::Account" + +############# +# Main Code # +############# + +CONFIG_CLIENT_MOCK = MagicMock() +STS_CLIENT_MOCK = MagicMock() + + +class Boto3Mock: + @staticmethod + def client(client_name, *args, **kwargs): + if client_name == "config": + return CONFIG_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK + raise Exception("Attempting to create an unknown client") + + +sys.modules["boto3"] = Boto3Mock() + +RULE = __import__("<%RuleName%>") + + +class ComplianceTest(unittest.TestCase): + + rule_parameters = '{"SomeParameterKey":"SomeParameterValue","SomeParameterKey2":"SomeParameterValue2"}' + + invoking_event_iam_role_sample = '{"configurationItem":{"relatedEvents":[],"relationships":[],"configuration":{},"tags":{},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","awsAccountId":"123456789012","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::IAM::Role","resourceId":"some-resource-id","resourceName":"some-resource-name","ARN":"some-arn"},"notificationCreationTime":"2018-07-02T23:05:34.445Z","messageType":"ConfigurationItemChangeNotification"}' + + def setUp(self): + pass + + def test_sample(self): + self.assertTrue(True) + + # def test_sample_2(self): + # RULE.ASSUME_ROLE_MODE = False + # response = RULE.lambda_handler(build_lambda_configurationchange_event(self.invoking_event_iam_role_sample, self.rule_parameters), {}) + # resp_expected = [] + # resp_expected.append(build_expected_response('NOT_APPLICABLE', 'some-resource-id', 'AWS::IAM::Role')) + # assert_successful_evaluation(self, response, resp_expected) + + +#################### +# Helper Functions # +#################### + + +def build_lambda_configurationchange_event(invoking_event, rule_parameters=None): + event_to_return = { + "configRuleName": "myrule", + "executionRoleArn": "roleArn", + "eventLeftScope": False, + "invokingEvent": invoking_event, + "accountId": "123456789012", + "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", + "resultToken": "token", + } + if rule_parameters: + event_to_return["ruleParameters"] = rule_parameters + return event_to_return + + +def build_lambda_scheduled_event(rule_parameters=None): + invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}' + event_to_return = { + "configRuleName": "myrule", + "executionRoleArn": "roleArn", + "eventLeftScope": False, + "invokingEvent": invoking_event, + "accountId": "123456789012", + "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", + "resultToken": "token", + } + if rule_parameters: + event_to_return["ruleParameters"] = rule_parameters + return event_to_return + + +def build_expected_response( + compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None +): + if not annotation: + return { + "ComplianceType": compliance_type, + "ComplianceResourceId": compliance_resource_id, + "ComplianceResourceType": compliance_resource_type, + } + return { + "ComplianceType": compliance_type, + "ComplianceResourceId": compliance_resource_id, + "ComplianceResourceType": compliance_resource_type, + "Annotation": annotation, + } + + +def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1): + if isinstance(response, dict): + test_class.assertEquals(resp_expected["ComplianceResourceType"], response["ComplianceResourceType"]) + test_class.assertEquals(resp_expected["ComplianceResourceId"], response["ComplianceResourceId"]) + test_class.assertEquals(resp_expected["ComplianceType"], response["ComplianceType"]) + test_class.assertTrue(response["OrderingTimestamp"]) + if "Annotation" in resp_expected or "Annotation" in response: + test_class.assertEquals(resp_expected["Annotation"], response["Annotation"]) + elif isinstance(response, list): + test_class.assertEquals(evaluations_count, len(response)) + for i, response_expected in enumerate(resp_expected): + test_class.assertEquals(response_expected["ComplianceResourceType"], response[i]["ComplianceResourceType"]) + test_class.assertEquals(response_expected["ComplianceResourceId"], response[i]["ComplianceResourceId"]) + test_class.assertEquals(response_expected["ComplianceType"], response[i]["ComplianceType"]) + test_class.assertTrue(response[i]["OrderingTimestamp"]) + if "Annotation" in response_expected or "Annotation" in response[i]: + test_class.assertEquals(response_expected["Annotation"], response[i]["Annotation"]) + + +def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None): + if customer_error_code: + test_class.assertEqual(customer_error_code, response["customerErrorCode"]) + if customer_error_message: + test_class.assertEqual(customer_error_message, response["customerErrorMessage"]) + test_class.assertTrue(response["customerErrorCode"]) + test_class.assertTrue(response["customerErrorMessage"]) + if "internalErrorMessage" in response: + test_class.assertTrue(response["internalErrorMessage"]) + if "internalErrorDetails" in response: + test_class.assertTrue(response["internalErrorDetails"]) + + +def sts_mock(): + assume_role_response = { + "Credentials": {"AccessKeyId": "string", "SecretAccessKey": "string", "SessionToken": "string"} + } + STS_CLIENT_MOCK.reset_mock(return_value=True) + STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response) + + +################## +# Common Testing # +################## + + +class TestStsErrors(unittest.TestCase): + def test_sts_unknown_error(self): + RULE.ASSUME_ROLE_MODE = True + RULE.evaluate_parameters = MagicMock(return_value=True) + STS_CLIENT_MOCK.assume_role = MagicMock( + side_effect=botocore.exceptions.ClientError( + {"Error": {"Code": "unknown-code", "Message": "unknown-message"}}, "operation" + ) + ) + response = RULE.lambda_handler(build_lambda_configurationchange_event("{}"), {}) + assert_customer_error_response(self, response, "InternalError", "InternalError") + + def test_sts_access_denied(self): + RULE.ASSUME_ROLE_MODE = True + RULE.evaluate_parameters = MagicMock(return_value=True) + STS_CLIENT_MOCK.assume_role = MagicMock( + side_effect=botocore.exceptions.ClientError( + {"Error": {"Code": "AccessDenied", "Message": "access-denied"}}, "operation" + ) + ) + response = RULE.lambda_handler(build_lambda_configurationchange_event("{}"), {}) + assert_customer_error_response( + self, response, "AccessDenied", "AWS Config does not have permission to assume the IAM role." + ) diff --git a/rdk/template/runtime/python3.7-lib/rule_test.py b/rdk/template/runtime/python3.7-lib/rule_test.py index 8f11d043..db0cf30c 100644 --- a/rdk/template/runtime/python3.7-lib/rule_test.py +++ b/rdk/template/runtime/python3.7-lib/rule_test.py @@ -4,7 +4,6 @@ import unittest from unittest.mock import patch, MagicMock from botocore.exceptions import ClientError -import rdklib from rdklib import Evaluation, ComplianceType import rdklibtest @@ -20,24 +19,27 @@ # Main Code # ############# -MODULE = __import__("<%RuleName%>") -RULE = MODULE.<%RuleName%>() +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() CLIENT_FACTORY = MagicMock() # example for mocking IAM API calls IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() def mock_get_client(client_name, *args, **kwargs): if client_name == "iam": return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK raise Exception("Attempting to create an unknown client") @patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) class ComplianceTest(unittest.TestCase): - rule_parameters = { "SomeParameterKey": "SomeParameterValue", "SomeParameterKey2": "SomeParameterValue2", @@ -61,6 +63,7 @@ class ComplianceTest(unittest.TestCase): }, "notificationCreationTime": "2018-07-02T23:05:34.445Z", "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", } list_roles_response = { @@ -69,74 +72,85 @@ class ComplianceTest(unittest.TestCase): "Path": "/", "RoleName": "testrole", "RoleId": "some-role-id", - "Arn": "arn:aws:iam00000056789012:role/testrole", - "CreateDate": datetime(2015, 1, 1), + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), "Description": "this is a test role", "MaxSessionDuration": 123, "Tags": [ {"Key": "one_tag", "Value": "its_value"}, ], "RoleLastUsed": { - "LastUsedDate": datetime(2015, 1, 1), + "LastUsedDate": datetime.datetime(2015, 1, 1), "Region": "us-east-1", }, }, ] } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} def setUp(self): - pass + STS_CLIENT_MOCK.reset_mock() def test_sample(self): self.assertTrue(True) - # def test_configurationchange_rule(self): - # # Example of how to evaluate a configuration change rule - # response = RULE.evaluate_change( - # event=json.dumps(self.invoking_event_iam_role_sample), - # client_factory=CLIENT_FACTORY, - # configuration_item=self.role_sample_configuration_abridged, - # valid_rule_parameters=json.dumps(self.rule_parameters), - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # annotation="This is a configuration change rule's annotation.", - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("resourceId", None), - # resourceType=RESOURCE_TYPE, - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) - - # def test_periodic_rule(self): - # # Example of how to mock the client response for a list_roles API call - # IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) - # # Example of how to evaluate a periodic rule - # response = RULE.evaluate_periodic( - # event=rdklibtest.create_test_scheduled_event(self.rule_parameters), - # client_factory=CLIENT_FACTORY, - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("awsAccountId", None), - # resourceType="AWS::::Account", - # annotation="This is a periodic rule's annotation.", - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) if __name__ == "__main__": diff --git a/rdk/template/runtime/python3.8-lib/rule_test.py b/rdk/template/runtime/python3.8-lib/rule_test.py index 8f11d043..db0cf30c 100644 --- a/rdk/template/runtime/python3.8-lib/rule_test.py +++ b/rdk/template/runtime/python3.8-lib/rule_test.py @@ -4,7 +4,6 @@ import unittest from unittest.mock import patch, MagicMock from botocore.exceptions import ClientError -import rdklib from rdklib import Evaluation, ComplianceType import rdklibtest @@ -20,24 +19,27 @@ # Main Code # ############# -MODULE = __import__("<%RuleName%>") -RULE = MODULE.<%RuleName%>() +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() CLIENT_FACTORY = MagicMock() # example for mocking IAM API calls IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() def mock_get_client(client_name, *args, **kwargs): if client_name == "iam": return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK raise Exception("Attempting to create an unknown client") @patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) class ComplianceTest(unittest.TestCase): - rule_parameters = { "SomeParameterKey": "SomeParameterValue", "SomeParameterKey2": "SomeParameterValue2", @@ -61,6 +63,7 @@ class ComplianceTest(unittest.TestCase): }, "notificationCreationTime": "2018-07-02T23:05:34.445Z", "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", } list_roles_response = { @@ -69,74 +72,85 @@ class ComplianceTest(unittest.TestCase): "Path": "/", "RoleName": "testrole", "RoleId": "some-role-id", - "Arn": "arn:aws:iam00000056789012:role/testrole", - "CreateDate": datetime(2015, 1, 1), + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), "Description": "this is a test role", "MaxSessionDuration": 123, "Tags": [ {"Key": "one_tag", "Value": "its_value"}, ], "RoleLastUsed": { - "LastUsedDate": datetime(2015, 1, 1), + "LastUsedDate": datetime.datetime(2015, 1, 1), "Region": "us-east-1", }, }, ] } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} def setUp(self): - pass + STS_CLIENT_MOCK.reset_mock() def test_sample(self): self.assertTrue(True) - # def test_configurationchange_rule(self): - # # Example of how to evaluate a configuration change rule - # response = RULE.evaluate_change( - # event=json.dumps(self.invoking_event_iam_role_sample), - # client_factory=CLIENT_FACTORY, - # configuration_item=self.role_sample_configuration_abridged, - # valid_rule_parameters=json.dumps(self.rule_parameters), - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # annotation="This is a configuration change rule's annotation.", - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("resourceId", None), - # resourceType=RESOURCE_TYPE, - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) - - # def test_periodic_rule(self): - # # Example of how to mock the client response for a list_roles API call - # IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) - # # Example of how to evaluate a periodic rule - # response = RULE.evaluate_periodic( - # event=rdklibtest.create_test_scheduled_event(self.rule_parameters), - # client_factory=CLIENT_FACTORY, - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("awsAccountId", None), - # resourceType="AWS::::Account", - # annotation="This is a periodic rule's annotation.", - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) if __name__ == "__main__": diff --git a/rdk/template/runtime/python3.9-lib/rule_test.py b/rdk/template/runtime/python3.9-lib/rule_test.py index 8f11d043..db0cf30c 100644 --- a/rdk/template/runtime/python3.9-lib/rule_test.py +++ b/rdk/template/runtime/python3.9-lib/rule_test.py @@ -4,7 +4,6 @@ import unittest from unittest.mock import patch, MagicMock from botocore.exceptions import ClientError -import rdklib from rdklib import Evaluation, ComplianceType import rdklibtest @@ -20,24 +19,27 @@ # Main Code # ############# -MODULE = __import__("<%RuleName%>") -RULE = MODULE.<%RuleName%>() +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() CLIENT_FACTORY = MagicMock() # example for mocking IAM API calls IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() def mock_get_client(client_name, *args, **kwargs): if client_name == "iam": return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK raise Exception("Attempting to create an unknown client") @patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) class ComplianceTest(unittest.TestCase): - rule_parameters = { "SomeParameterKey": "SomeParameterValue", "SomeParameterKey2": "SomeParameterValue2", @@ -61,6 +63,7 @@ class ComplianceTest(unittest.TestCase): }, "notificationCreationTime": "2018-07-02T23:05:34.445Z", "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", } list_roles_response = { @@ -69,74 +72,85 @@ class ComplianceTest(unittest.TestCase): "Path": "/", "RoleName": "testrole", "RoleId": "some-role-id", - "Arn": "arn:aws:iam00000056789012:role/testrole", - "CreateDate": datetime(2015, 1, 1), + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), "Description": "this is a test role", "MaxSessionDuration": 123, "Tags": [ {"Key": "one_tag", "Value": "its_value"}, ], "RoleLastUsed": { - "LastUsedDate": datetime(2015, 1, 1), + "LastUsedDate": datetime.datetime(2015, 1, 1), "Region": "us-east-1", }, }, ] } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} def setUp(self): - pass + STS_CLIENT_MOCK.reset_mock() def test_sample(self): self.assertTrue(True) - # def test_configurationchange_rule(self): - # # Example of how to evaluate a configuration change rule - # response = RULE.evaluate_change( - # event=json.dumps(self.invoking_event_iam_role_sample), - # client_factory=CLIENT_FACTORY, - # configuration_item=self.role_sample_configuration_abridged, - # valid_rule_parameters=json.dumps(self.rule_parameters), - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # annotation="This is a configuration change rule's annotation.", - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("resourceId", None), - # resourceType=RESOURCE_TYPE, - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) - - # def test_periodic_rule(self): - # # Example of how to mock the client response for a list_roles API call - # IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) - # # Example of how to evaluate a periodic rule - # response = RULE.evaluate_periodic( - # event=rdklibtest.create_test_scheduled_event(self.rule_parameters), - # client_factory=CLIENT_FACTORY, - # ) - # resp_expected = [] - # resp_expected.append( - # Evaluation( - # complianceType=ComplianceType.NOT_APPLICABLE, - # resourceId=self.invoking_event_iam_role_sample.get( - # "configurationItem", {} - # ).get("awsAccountId", None), - # resourceType="AWS::::Account", - # annotation="This is a periodic rule's annotation.", - # ) - # ) - # if vars(response[0]) != vars(resp_expected[0]): - # logging.warning(f"Actual response: {vars(response[0])}") - # logging.warning(f"Expected response: {vars(resp_expected[0])}") - # rdklib.assert_successful_evaluation(self, response, resp_expected) + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) if __name__ == "__main__": diff --git a/rdk/template/terraform/0.11/config_rule.tf b/rdk/template/terraform/0.11/config_rule.tf index 4df8f241..6e190217 100644 --- a/rdk/template/terraform/0.11/config_rule.tf +++ b/rdk/template/terraform/0.11/config_rule.tf @@ -9,13 +9,6 @@ data "aws_iam_policy" "read_only_access" { } data "aws_iam_policy_document" "config_iam_policy" { - - statement{ - actions=["s3:GetObject"] - resources =["arn:${data.aws_partition.current.partition}:s3:::${var.source_bucket}/${var.rule_name}.zip"] - effect = "Allow" - sid= "1" - } statement{ actions=[ "logs:CreateLogGroup", @@ -36,7 +29,6 @@ data "aws_iam_policy_document" "config_iam_policy" { statement{ actions=[ "iam:List*", - "iam:Describe*", "iam:Get*" ] resources = ["*"] diff --git a/rdk/template/terraform/0.12/config_rule.tf b/rdk/template/terraform/0.12/config_rule.tf index 13e2b6f6..ab384bdc 100644 --- a/rdk/template/terraform/0.12/config_rule.tf +++ b/rdk/template/terraform/0.12/config_rule.tf @@ -10,12 +10,6 @@ data "aws_iam_policy" "read_only_access" { data "aws_iam_policy_document" "config_iam_policy" { - statement{ - actions=["s3:GetObject"] - resources = [format("arn:%s:s3:::%s/%s",data.aws_partition.current.partition,var.source_bucket,local.rule_name_source)] - effect = "Allow" - sid= "1" - } statement{ actions=[ "logs:CreateLogGroup", @@ -36,7 +30,6 @@ data "aws_iam_policy_document" "config_iam_policy" { statement{ actions=[ "iam:List*", - "iam:Describe*", "iam:Get*" ] resources = ["*"] diff --git a/testing/linux-python3-buildspec.yaml b/testing/linux-python3-buildspec.yaml index 74930af5..715d06ac 100644 --- a/testing/linux-python3-buildspec.yaml +++ b/testing/linux-python3-buildspec.yaml @@ -14,7 +14,7 @@ phases: commands: - rdk create-region-set -o test-region - rdk -f test-region.yaml init - - rdk create MFA_ENABLED_RULE --runtime python3.8 --resource-types AWS::IAM::User + - rdk create MFA_ENABLED_RULE --runtime python3.10 --resource-types AWS::IAM::User - rdk -f test-region.yaml deploy MFA_ENABLED_RULE - sleep 30 - python3 testing/multi_region_execution_test.py @@ -22,22 +22,26 @@ phases: - rdk -f test-region.yaml undeploy --force MFA_ENABLED_RULE - python3 testing/partition_test.py - rdk init --generate-lambda-layer + - rdk create LP3_TestRule_P310_lib --runtime python3.10-lib --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P39_lib --runtime python3.9-lib --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P38_lib --runtime python3.8-lib --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P37_lib --runtime python3.7-lib --resource-types AWS::EC2::SecurityGroup + - rdk -f test-region.yaml deploy LP3_TestRule_P310_lib --generated-lambda-layer - rdk -f test-region.yaml deploy LP3_TestRule_P39_lib --generated-lambda-layer - rdk -f test-region.yaml deploy LP3_TestRule_P38_lib --generated-lambda-layer - rdk -f test-region.yaml deploy LP3_TestRule_P37_lib --generated-lambda-layer + - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P310_lib - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P39_lib - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P38_lib - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P37_lib + - rdk create LP3_TestRule_P310 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P39 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P38 --runtime python3.8 --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P37 --runtime python3.7 --resource-types AWS::EC2::SecurityGroup - - rdk create LP3_TestRule_P3 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - - rdk create LP3_TestRule_EFSFS --runtime python3.9 --resource-types AWS::EFS::FileSystem - - rdk create LP3_TestRule_ECSTD --runtime python3.7 --resource-types AWS::ECS::TaskDefinition - - rdk create LP3_TestRule_ECSS --runtime python3.9 --resource-types AWS::ECS::Service + - rdk create LP3_TestRule_P3 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup + - rdk create LP3_TestRule_EFSFS --runtime python3.10 --resource-types AWS::EFS::FileSystem + - rdk create LP3_TestRule_ECSTD --runtime python3.10 --resource-types AWS::ECS::TaskDefinition + - rdk create LP3_TestRule_ECSS --runtime python3.10 --resource-types AWS::ECS::Service - rdk modify LP3_TestRule_P3 --input-parameters '{"TestParameter":"TestValue"}' - rdk create LP3_TestRule_P37_Periodic --runtime python3.7 --maximum-frequency One_Hour - rdk create LP3_TestRule_P37lib_Periodic --runtime python3.7-lib --maximum-frequency One_Hour @@ -45,6 +49,8 @@ phases: - rdk create LP3_TestRule_P38lib_Periodic --runtime python3.8-lib --maximum-frequency One_Hour - rdk create LP3_TestRule_P39_Periodic --runtime python3.9 --maximum-frequency One_Hour - rdk create LP3_TestRule_P39lib_Periodic --runtime python3.9-lib --maximum-frequency One_Hour + - rdk create LP3_TestRule_P310_Periodic --runtime python3.10 --maximum-frequency One_Hour + - rdk create LP3_TestRule_P310lib_Periodic --runtime python3.10-lib --maximum-frequency One_Hour - rdk test-local --all - rdk deploy --all - yes | rdk undeploy LP3_TestRule_P3 @@ -54,6 +60,8 @@ phases: - yes | rdk undeploy LP3_TestRule_P38_Periodic - yes | rdk undeploy LP3_TestRule_P39 - yes | rdk undeploy LP3_TestRule_P39_Periodic + - yes | rdk undeploy LP3_TestRule_P310 + - yes | rdk undeploy LP3_TestRule_P310_Periodic - sleep 30 - rdk logs LP3_TestRule_P3 - yes | rdk undeploy -a diff --git a/testing/windows-python2-buildspec.yaml b/testing/windows-python2-buildspec.yaml deleted file mode 100644 index 57dd3042..00000000 --- a/testing/windows-python2-buildspec.yaml +++ /dev/null @@ -1,13 +0,0 @@ -version: 0.1 - -phases: - install: - commands: - - apt-get update -y - build: - commands: - - echo Creating Windows build server and running tests - - bash testing/test_windows.sh 2 - post_build: - commands: - - echo Build completed on `date` diff --git a/testing/windows-python3-buildspec.yaml b/testing/windows-python3-buildspec.yaml index 4e385fd6..5c92b963 100644 --- a/testing/windows-python3-buildspec.yaml +++ b/testing/windows-python3-buildspec.yaml @@ -14,35 +14,41 @@ phases: commands: - rdk create-region-set -o test-region - rdk -f test-region.yaml init - - rdk create W_MFA_ENABLED_RULE --runtime python3.8 --resource-types AWS::IAM::User + - rdk create W_MFA_ENABLED_RULE --runtime python3.10 --resource-types AWS::IAM::User - rdk -f test-region.yaml deploy W_MFA_ENABLED_RULE - python testing/win_multi_region_execution_test.py - rdk -f test-region.yaml undeploy --force W_MFA_ENABLED_RULE - python testing/win_partition_test.py - rdk init --generate-lambda-layer + - rdk create WP3_TestRule_P310_lib --runtime python3.10-lib --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P39_lib --runtime python3.9-lib --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P38_lib --runtime python3.8-lib --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P37_lib --runtime python3.7-lib --resource-types AWS::EC2::SecurityGroup + - rdk -f test-region.yaml deploy WP3_TestRule_P310_lib --generated-lambda-layer - rdk -f test-region.yaml deploy WP3_TestRule_P39_lib --generated-lambda-layer - rdk -f test-region.yaml deploy WP3_TestRule_P38_lib --generated-lambda-layer - rdk -f test-region.yaml deploy WP3_TestRule_P37_lib --generated-lambda-layer + - rdk -f test-region.yaml undeploy WP3_TestRule_P310_lib --force - rdk -f test-region.yaml undeploy WP3_TestRule_P39_lib --force - rdk -f test-region.yaml undeploy WP3_TestRule_P38_lib --force - rdk -f test-region.yaml undeploy WP3_TestRule_P37_lib --force + - rdk create WP3_TestRule_P310 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P39 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P38 --runtime python3.8 --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P37 --runtime python3.7 --resource-types AWS::EC2::SecurityGroup - - rdk create WP3_TestRule_P3 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - - rdk create WP3_TestRule_EFSFS --runtime python3.9 --resource-types AWS::EFS::FileSystem - - rdk create WP3_TestRule_ECSTD --runtime python3.7 --resource-types AWS::ECS::TaskDefinition - - rdk create WP3_TestRule_ECSS --runtime python3.9 --resource-types AWS::ECS::Service - - rdk modify WP3_TestRule_P3 --runtime python3.8 + - rdk create WP3_TestRule_P3 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup + - rdk create WP3_TestRule_EFSFS --runtime python3.10 --resource-types AWS::EFS::FileSystem + - rdk create WP3_TestRule_ECSTD --runtime python3.10 --resource-types AWS::ECS::TaskDefinition + - rdk create WP3_TestRule_ECSS --runtime python3.10 --resource-types AWS::ECS::Service + - rdk modify WP3_TestRule_P3 --runtime python3.10 - rdk create WP3_TestRule_P37_Periodic --runtime python3.7 --maximum-frequency One_Hour - rdk create WP3_TestRule_P37lib_Periodic --runtime python3.7-lib --maximum-frequency One_Hour - rdk create WP3_TestRule_P38_Periodic --runtime python3.8 --maximum-frequency One_Hour - rdk create WP3_TestRule_P38lib_Periodic --runtime python3.8-lib --maximum-frequency One_Hour - rdk create WP3_TestRule_P39_Periodic --runtime python3.9 --maximum-frequency One_Hour - rdk create WP3_TestRule_P39lib_Periodic --runtime python3.9-lib --maximum-frequency One_Hour + - rdk create WP3_TestRule_P310_Periodic --runtime python3.10 --maximum-frequency One_Hour + - rdk create WP3_TestRule_P310lib_Periodic --runtime python3.10-lib --maximum-frequency One_Hour - rdk test-local --all - rdk deploy --all - rdk undeploy WP3_TestRule_P3 --force @@ -51,7 +57,9 @@ phases: - rdk undeploy WP3_TestRule_P38 --force - rdk undeploy WP3_TestRule_P38_Periodic --force - rdk undeploy WP3_TestRule_P39 --force - - rdk undeploy WP3_TestRule_P39_Periodic --force + - rdk undeploy WP3_TestRule_P39_Periodic --force + - rdk undeploy WP3_TestRule_P310 --force + - rdk undeploy WP3_TestRule_P310_Periodic --force - rdk logs WP3_TestRule_P3 - rdk undeploy -a --force post_build: diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..66a14670 --- /dev/null +++ b/tox.ini @@ -0,0 +1,2 @@ +[flake8] +max-line-length=140 \ No newline at end of file From f71583a8bdd44a817d3929533fa8143621805c35 Mon Sep 17 00:00:00 2001 From: Benjamin Morris Date: Tue, 2 May 2023 08:34:29 -0700 Subject: [PATCH 07/12] new resource types and version bump --- rdk/rdk.py | 74 ++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 25 deletions(-) diff --git a/rdk/rdk.py b/rdk/rdk.py index 12052e51..4cd52bbc 100644 --- a/rdk/rdk.py +++ b/rdk/rdk.py @@ -87,8 +87,8 @@ # This need to be update whenever config service supports more resource types # See: https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html accepted_resource_types = [ - "AWS::AccessAnalyzer::Analyzer", "AWS::ACM::Certificate", + "AWS::AccessAnalyzer::Analyzer", "AWS::AmazonMQ::Broker", "AWS::ApiGateway::RestApi", "AWS::ApiGateway::Stage", @@ -96,11 +96,14 @@ "AWS::ApiGatewayV2::Stage", "AWS::AppConfig::Application", "AWS::AppConfig::ConfigurationProfile", + "AWS::AppConfig::DeploymentStrategy", "AWS::AppConfig::Environment", + "AWS::AppFlow::Flow", "AWS::AppStream::DirectoryConfig", "AWS::AppSync::GraphQLApi", - "AWS::Athena::WorkGroup", "AWS::Athena::DataCatalog", + "AWS::Athena::WorkGroup", + "AWS::AuditManager::Assessment", "AWS::AutoScaling::AutoScalingGroup", "AWS::AutoScaling::LaunchConfiguration", "AWS::AutoScaling::ScalingPolicy", @@ -120,6 +123,7 @@ "AWS::CloudFront::StreamingDistribution", "AWS::CloudTrail::Trail", "AWS::CloudWatch::Alarm", + "AWS::CloudWatch::MetricStream", "AWS::CodeBuild::Project", "AWS::CodeDeploy::Application", "AWS::CodeDeploy::DeploymentConfig", @@ -131,6 +135,11 @@ "AWS::Config::ResourceCompliance", "AWS::Connect::PhoneNumber", "AWS::CustomerProfiles::Domain", + "AWS::DMS::Certificate", + "AWS::DMS::EventSubscription", + "AWS::DMS::ReplicationInstance", + "AWS::DMS::ReplicationSubnetGroup", + "AWS::DMS::ReplicationTask", "AWS::DataSync::LocationEFS", "AWS::DataSync::LocationFSxLustre", "AWS::DataSync::LocationFSxWindows", @@ -141,22 +150,20 @@ "AWS::DataSync::LocationSMB", "AWS::DataSync::Task", "AWS::Detective::Graph", + "AWS::DeviceFarm::InstanceProfile", + "AWS::DeviceFarm::Project", "AWS::DeviceFarm::TestGridProject", - "AWS::DMS::Certificate", - "AWS::DMS::EventSubscription", - "AWS::DMS::ReplicationInstance", - "AWS::DMS::ReplicationSubnetGroup", - "AWS::DMS::ReplicationTask", "AWS::DynamoDB::Table", "AWS::EC2::CustomerGateway", "AWS::EC2::DHCPOptions", + "AWS::EC2::EC2Fleet", "AWS::EC2::EIP", "AWS::EC2::EgressOnlyInternetGateway", "AWS::EC2::FlowLog", "AWS::EC2::Host", + "AWS::EC2::IPAM", "AWS::EC2::Instance", "AWS::EC2::InternetGateway", - "AWS::EC2::IPAM", "AWS::EC2::LaunchTemplate", "AWS::EC2::NatGateway", "AWS::EC2::NetworkAcl", @@ -167,6 +174,7 @@ "AWS::EC2::RouteTable", "AWS::EC2::SecurityGroup", "AWS::EC2::Subnet", + "AWS::EC2::SubnetRouteTableAssociation", "AWS::EC2::TrafficMirrorFilter", "AWS::EC2::TrafficMirrorSession", "AWS::EC2::TrafficMirrorTarget", @@ -181,8 +189,9 @@ "AWS::EC2::VPNGateway", "AWS::EC2::Volume", "AWS::ECR::PublicRepository", - "AWS::ECR::Repository", + "AWS::ECR::PullThroughCacheRule", "AWS::ECR::RegistryPolicy", + "AWS::ECR::Repository", "AWS::ECS::Cluster", "AWS::ECS::Service", "AWS::ECS::TaskDefinition", @@ -200,16 +209,16 @@ "AWS::ElasticLoadBalancingV2::Listener", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::ElasticSearch::Domain", - "AWS::Events::Archive", + "AWS::EventSchemas::Discoverer", + "AWS::EventSchemas::Registry", + "AWS::EventSchemas::RegistryPolicy", + "AWS::EventSchemas::Schema", "AWS::Events::ApiDestination", + "AWS::Events::Archive", "AWS::Events::Connection", "AWS::Events::Endpoint", "AWS::Events::EventBus", "AWS::Events::Rule", - "AWS::EventSchemas::Discoverer", - "AWS::EventSchemas::Registry", - "AWS::EventSchemas::RegistryPolicy", - "AWS::EventSchemas::Schema", "AWS::FIS::ExperimentTemplate", "AWS::FraudDetector::EntityType", "AWS::FraudDetector::Label", @@ -221,6 +230,7 @@ "AWS::Glue::Classifier", "AWS::Glue::Job", "AWS::Glue::MLTransform", + "AWS::GroundStation::Config", "AWS::GuardDuty::Detector", "AWS::GuardDuty::Filter", "AWS::GuardDuty::IPSet", @@ -230,13 +240,18 @@ "AWS::IAM::Policy", "AWS::IAM::Role", "AWS::IAM::User", + "AWS::IVS::Channel", + "AWS::IVS::PlaybackKeyPair", + "AWS::IVS::RecordingConfiguration", "AWS::ImageBuilder::ContainerRecipe", "AWS::ImageBuilder::DistributionConfiguration", + "AWS::ImageBuilder::ImagePipeline", "AWS::ImageBuilder::InfrastructureConfiguration", "AWS::IoT::AccountAuditConfiguration", "AWS::IoT::Authorizer", "AWS::IoT::CustomMetric", "AWS::IoT::Dimension", + "AWS::IoT::FleetMetric", "AWS::IoT::MitigationAction", "AWS::IoT::Policy", "AWS::IoT::RoleAlias", @@ -257,9 +272,7 @@ "AWS::IoTTwinMaker::Entity", "AWS::IoTTwinMaker::Scene", "AWS::IoTTwinMaker::Workspace", - "AWS::IVS::Channel", - "AWS::IVS::PlaybackKeyPair", - "AWS::IVS::RecordingConfiguration", + "AWS::IoTWireless::ServiceProfile", "AWS::KMS::Alias", "AWS::KMS::Key", "AWS::Kinesis::Stream", @@ -275,14 +288,21 @@ "AWS::Lightsail::StaticIp", "AWS::LookoutMetrics::Alert", "AWS::LookoutVision::Project", + "AWS::MSK::Cluster", "AWS::MediaPackage::PackagingConfiguration", "AWS::MediaPackage::PackagingGroup", - "AWS::MSK::Cluster", "AWS::NetworkFirewall::Firewall", "AWS::NetworkFirewall::FirewallPolicy", "AWS::NetworkFirewall::RuleGroup", + "AWS::NetworkFirewall::TLSInspectionConfiguration", + "AWS::NetworkManager::Device", + "AWS::NetworkManager::GlobalNetwork", + "AWS::NetworkManager::Link", + "AWS::NetworkManager::Site", "AWS::NetworkManager::TransitGatewayRegistration", "AWS::OpenSearch::Domain", + "AWS::Panorama::Package", + "AWS::Pinpoint::App", "AWS::Pinpoint::ApplicationSettings", "AWS::Pinpoint::Segment", "AWS::QLDB::Ledger", @@ -294,12 +314,14 @@ "AWS::RDS::DBSubnetGroup", "AWS::RDS::EventSubscription", "AWS::RDS::GlobalCluster", + "AWS::RUM::AppMonitor", "AWS::Redshift::Cluster", "AWS::Redshift::ClusterParameterGroup", "AWS::Redshift::ClusterSecurityGroup", "AWS::Redshift::ClusterSnapshot", "AWS::Redshift::ClusterSubnetGroup", "AWS::Redshift::EventSubscription", + "AWS::Redshift::ScheduledAction", "AWS::ResilienceHub::ResiliencyPolicy", "AWS::RoboMaker::RobotApplication", "AWS::RoboMaker::RobotApplicationVersion", @@ -310,27 +332,34 @@ "AWS::Route53RecoveryControl::ControlPanel", "AWS::Route53RecoveryControl::RoutingControl", "AWS::Route53RecoveryControl::SafetyRule", - "AWS::Route53RecoveryReadiness::ResourceSet", "AWS::Route53RecoveryReadiness::Cell", "AWS::Route53RecoveryReadiness::ReadinessCheck", "AWS::Route53RecoveryReadiness::RecoveryGroup", + "AWS::Route53RecoveryReadiness::ResourceSet", "AWS::Route53Resolver::FirewallDomainList", + "AWS::Route53Resolver::FirewallRuleGroupAssociation", "AWS::Route53Resolver::ResolverEndpoint", "AWS::Route53Resolver::ResolverRule", "AWS::Route53Resolver::ResolverRuleAssociation", - "AWS::RUM::AppMonitor", "AWS::S3::AccountPublicAccessBlock", "AWS::S3::Bucket", "AWS::S3::MultiRegionAccessPoint", "AWS::S3::StorageLens", + "AWS::SES::ConfigurationSet", + "AWS::SES::ContactList", + "AWS::SES::ReceiptFilter", + "AWS::SES::ReceiptRuleSet", + "AWS::SES::Template", "AWS::SNS::Topic", "AWS::SQS::Queue", "AWS::SSM::AssociationCompliance", "AWS::SSM::FileData", "AWS::SSM::ManagedInstanceInventory", "AWS::SSM::PatchCompliance", + "AWS::SageMaker::AppImageConfig", "AWS::SageMaker::CodeRepository", "AWS::SageMaker::EndpointConfig", + "AWS::SageMaker::Image", "AWS::SageMaker::Model", "AWS::SageMaker::NotebookInstance", "AWS::SageMaker::NotebookInstanceLifecycleConfig", @@ -342,11 +371,6 @@ "AWS::ServiceDiscovery::HttpNamespace", "AWS::ServiceDiscovery::PublicDnsNamespace", "AWS::ServiceDiscovery::Service", - "AWS::SES::ConfigurationSet", - "AWS::SES::ContactList", - "AWS::SES::ReceiptFilter", - "AWS::SES::ReceiptRuleSet", - "AWS::SES::Template", "AWS::Shield::Protection", "AWS::ShieldRegional::Protection", "AWS::StepFunctions::Activity", From cb48bb1d1e008989f85fb19e30528cef0a7aa58c Mon Sep 17 00:00:00 2001 From: Benjamin Morris <93620006+bmorrissirromb@users.noreply.github.com> Date: Mon, 1 May 2023 14:01:55 -0700 Subject: [PATCH 08/12] reference the correct readthedocs link --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index d5cfe7ec..de8d303a 100644 --- a/README.rst +++ b/README.rst @@ -14,7 +14,7 @@ We greatly appreciate feedback and bug reports at rdk-maintainers@amazon.com! Yo The RDK is designed to support a "Compliance-as-Code" workflow that is intuitive and productive. It abstracts away much of the undifferentiated heavy lifting associated with deploying AWS Config rules backed by custom lambda functions, and provides a streamlined develop-deploy-monitor iterative process. -For complete documentation, including command reference, check out the `ReadTheDocs documentation `_. +For complete documentation, including command reference, check out the `ReadTheDocs documentation `_. Getting Started =============== From c3390a96ecb93e87ae391ae579144634168cb2ec Mon Sep 17 00:00:00 2001 From: Benjamin Morris Date: Wed, 24 May 2023 15:31:48 -0700 Subject: [PATCH 09/12] resolve conflicts from 0.14.0 --- NOTICE.txt | 2 +- README.md | 56 +-- README.rst | 365 ------------------- NEW_RUNTIME_PROCESS.md => developer_notes.md | 29 +- pyproject.toml | 2 +- rdk/__init__.py | 2 +- tox.ini | 2 - 7 files changed, 47 insertions(+), 411 deletions(-) delete mode 100644 README.rst rename NEW_RUNTIME_PROCESS.md => developer_notes.md (52%) delete mode 100644 tox.ini diff --git a/NOTICE.txt b/NOTICE.txt index 5bad47f4..433f02db 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,2 +1,2 @@ rdk -Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2017-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/README.md b/README.md index 8cc1c093..f7d851c5 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ For complete documentation, including command reference, check out the ## Getting Started -Uses python 3.7/3.8/3.9 and is installed via pip. Requires you to have +Uses Python 3.7+ and is installed via pip. Requires you to have an AWS account and sufficient permissions to manage the Config service, and to create S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy Document that describes the minimum necessary permissions can be found @@ -126,7 +126,7 @@ rule and populate it with several files, including a skeleton of your Lambda code. ```bash -rdk create MyRule --runtime python3.8 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' +rdk create MyRule --runtime python3.10 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' Running create! Local Rule files created. ``` @@ -220,7 +220,7 @@ will overwrite existing values, any that you do not specify will not be changed. ```bash -rdk modify MyRule --runtime python3.9 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' +rdk modify MyRule --runtime python3.10 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' Running modify! Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. ``` @@ -239,7 +239,7 @@ Once you have completed your compliance validation code and set your Rule's configuration, you can deploy the Rule to your account using the `deploy` command. This will zip up your code (and the other associated code files, if any) into a deployable package (or run a gradle build if -you have selected the java8 runtime or run the lambda packaging step +you have selected the java8 runtime or run the Lambda packaging step from the dotnet CLI if you have selected the dotnetcore1.0 runtime), copy that zip file to S3, and then launch or update a CloudFormation stack that defines your Config Rule, Lambda function, and the necessary @@ -272,8 +272,8 @@ You can also deploy the Rule to your AWS Organization using the `deploy-organization` command. For successful evaluation of custom rules in child accounts, please make sure you do one of the following: -1. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume the Role attached on the Config Service and confirm that the role trusts the master account where the Lambda function is going to be deployed. -2. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume a custom role and define an optional parameter with key as ExecutionRoleName and set the value to your custom role name; confirm that the role trusts the master account of the organization where the Lambda function will be deployed. +1. Set ASSUME_ROLE_MODE in Lambda code to True, to get the Lambda to assume the Role attached on the Config Service and confirm that the role trusts the master account where the Lambda function is going to be deployed. +2. Set ASSUME_ROLE_MODE in Lambda code to True, to get the Lambda to assume a custom role and define an optional parameter with key as ExecutionRoleName and set the value to your custom role name; confirm that the role trusts the master account of the organization where the Lambda function will be deployed. ```bash rdk deploy-organization MyRule @@ -300,7 +300,7 @@ doesn't exist within 7 hours of adding an account to your organization. ### View Logs For Deployed Rule Once the Rule has been deployed to AWS you can get the CloudWatch logs -associated with your lambda function using the `logs` command. +associated with your Lambda function using the `logs` command. ```bash rdk logs MyRule -n 5 @@ -321,8 +321,8 @@ make sure it is behaving as expected. The `testing` directory contains scripts and buildspec files that I use to run basic functionality tests across a variety of CLI environments -(currently Ubuntu linux running python 3.7/3.8/3.9, and Windows Server -running python3.9). If there is interest I can release a CloudFormation +(currently Ubuntu Linux running Python 3.7/3.8/3.9/3.10, and Windows Server +running Python 3.10). If there is interest I can release a CloudFormation template that could be used to build the test environment, let me know if this is something you want! @@ -340,10 +340,10 @@ are used by other teams or departments. This gives the compliance team confidence that their rule logic cannot be tampered with and makes it much easier for them to modify rule logic without having to go through a complex deployment process to potentially hundreds of AWS accounts. The -cross-account pattern uses two advanced RDK features +cross-account pattern uses two advanced RDK features: -- Functions-only deployment -- create-rule-template command +- `--functions-only` (`-f`) deployment +- `create-rule-template` command #### Functions-Only Deployment @@ -369,8 +369,8 @@ This command generates a CloudFormation template that defines the AWS Config rules themselves, along with the Config Role, Config data bucket, Configuration Recorder, and Delivery channel necessary for the Config rules to work in a satellite account. You must specify the file name for -the generated template using the [--output-file]{.title-ref} or -[o]{.title-ref} command line flags. The generated template takes a +the generated template using the `--output-file` or +`-o` command line flags. The generated template takes a single parameter of the AccountID of the central compliance account that contains the Lambda functions that will back your custom Config Rules. The generated template can be deployed in the desired satellite accounts @@ -394,7 +394,7 @@ by rdk. To disable the supported resource check use the optional flag '--skip-supported-resource-check' during the create command. ```bash -rdk create MyRule --runtime python3.8 --resource-types AWS::New::ResourceType --skip-supported-resource-check +rdk create MyRule --runtime python3.10 --resource-types AWS::New::ResourceType --skip-supported-resource-check 'AWS::New::ResourceType' not found in list of accepted resource types. Skip-Supported-Resource-Check Flag set (--skip-supported-resource-check), ignoring missing resource type error. Running create! @@ -413,7 +413,7 @@ performing `rdk create`. This opens up new features like : 2. Custom lambda function naming as per personal or enterprise standards. ```bash -rdk create MyLongerRuleName --runtime python3.8 --resource-types AWS::EC2::Instance --custom-lambda-name custom-prefix-for-MyLongerRuleName +rdk create MyLongerRuleName --runtime python3.10 --resource-types AWS::EC2::Instance --custom-lambda-name custom-prefix-for-MyLongerRuleName Running create! Local Rule files created. ``` @@ -533,21 +533,21 @@ are happy to help and discuss. ## Contacts -- **Benjamin Morris** - [bmorrissirromb](https://github.com/bmorrissirromb) - *current maintainer* -- **Julio Delgado Jr** - [tekdj7](https://github.com/tekdj7) - *current maintainer* +- **Benjamin Morris** - [bmorrissirromb](https://github.com/bmorrissirromb) - _current maintainer_ +- **Julio Delgado Jr** - [tekdj7](https://github.com/tekdj7) - _current maintainer_ ## Past Contributors -- **Michael Borchert** - *Original Python version* -- **Jonathan Rault** - *Original Design, testing, feedback* -- **Greg Kim and Chris Gutierrez** - *Initial work and CI definitions* -- **Henry Huang** - *Original CFN templates and other code* -- **Santosh Kumar** - *maintainer* -- **Jose Obando** - *maintainer* -- **Jarrett Andrulis** - [jarrettandrulis](https://github.com/jarrettandrulis) - *maintainer* -- **Sandeep Batchu** - [batchus](https://github.com/batchus) - *maintainer* -- **Mark Beacom** - [mbeacom](https://github.com/mbeacom) - *maintainer* -- **Ricky Chau** - [rickychau2780](https://github.com/rickychau2780) - *maintainer* +- **Michael Borchert** - _Original Python version_ +- **Jonathan Rault** - _Original Design, testing, feedback_ +- **Greg Kim and Chris Gutierrez** - _Initial work and CI definitions_ +- **Henry Huang** - _Original CFN templates and other code_ +- **Santosh Kumar** - _maintainer_ +- **Jose Obando** - _maintainer_ +- **Jarrett Andrulis** - [jarrettandrulis](https://github.com/jarrettandrulis) - _maintainer_ +- **Sandeep Batchu** - [batchus](https://github.com/batchus) - _maintainer_ +- **Mark Beacom** - [mbeacom](https://github.com/mbeacom) - _maintainer_ +- **Ricky Chau** - [rickychau2780](https://github.com/rickychau2780) - _maintainer_ ## License diff --git a/README.rst b/README.rst deleted file mode 100644 index de8d303a..00000000 --- a/README.rst +++ /dev/null @@ -1,365 +0,0 @@ -rdk -=== -|pypibadge| |downloadsbadge| - - -.. |pypibadge| image:: https://static.pepy.tech/personalized-badge/rdk?period=total&units=international_system&left_color=black&right_color=blue&left_text=downloads - :target: https://pepy.tech/project/rdk -.. |downloadsbadge| image:: https://img.shields.io/pypi/v/rdk - :alt: PyPI - -Rule Development Kit - -We greatly appreciate feedback and bug reports at rdk-maintainers@amazon.com! You may also create an issue on this repo. - -The RDK is designed to support a "Compliance-as-Code" workflow that is intuitive and productive. It abstracts away much of the undifferentiated heavy lifting associated with deploying AWS Config rules backed by custom lambda functions, and provides a streamlined develop-deploy-monitor iterative process. - -For complete documentation, including command reference, check out the `ReadTheDocs documentation `_. - -Getting Started -=============== -Uses python 3.7/3.8/3.9/3.10 and is installed via pip. Requires you to have an AWS account and sufficient permissions to manage the Config service, and to create S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy Document that describes the minimum necessary permissions can be found at policy/rdk-minimum-permissions.json. - -Under the hood, rdk uses boto3 to make API calls to AWS, so you can set your credentials any way that boto3 recognizes (options 3 through 8 here: http://boto3.readthedocs.io/en/latest/guide/configuration.html) or pass them in with the command-line parameters --profile, --region, --access-key-id, or --secret-access-key - -If you just want to use the RDK, go ahead and install it using pip:: - -$ pip install rdk - -Alternately, if you want to see the code and/or contribute you can clone the git repo, and then from the repo directory use pip to install the package. Use the '-e' flag to generate symlinks so that any edits you make will be reflected when you run the installed package. - -If you are going to author your Lambda functions using Java you will need to have Java 8 and gradle installed. If you are going to author your Lambda functions in C# you will need to have the dotnet CLI and the .NET Core Runtime 1.08 installed. -:: - - $ pip install -e . - -To make sure the rdk is installed correctly, running the package from the command line without any arguments should display help information. - -:: - - $ rdk - usage: rdk [-h] [-p PROFILE] [-k ACCESS_KEY] [-s SECRET_ACCESS_KEY] - [-r REGION] - ... - rdk: error: the following arguments are required: , - - -Usage -===== - -Configure your env ------------------- -To use the RDK, it's recommended to create a directory that will be your working directory. This should be committed to a source code repo, and ideally created as a python virtualenv. In that directory, run the ``init`` command to set up your AWS Config environment. - -:: - - $ rdk init - Running init! - Creating Config bucket config-bucket-780784666283 - Creating IAM role config-role - Waiting for IAM role to propagate - Config Service is ON - Config setup complete. - Creating Code bucket config-rule-code-bucket-780784666283ap-southeast-1 - -Running ``init`` subsequent times will validate your AWS Config setup and re-create any S3 buckets or IAM resources that are needed. - -- If you have config delivery bucket already present in some other AWS account then use **--config-bucket-exists-in-another-account** as argument::: - - $ rdk init --config-bucket-exists-in-another-account -- If you have AWS Organizations/ControlTower Setup in your AWS environment then additionally, use **--control-tower** as argument::: - - $ rdk init --control-tower --config-bucket-exists-in-another-account -- If bucket for custom lambda code is already present in current account then use **--skip-code-bucket-creation** argument::: - - $ rdk init --skip-code-bucket-creation - -- If you want rdk to create/update and upload the rdklib-layer for you, then use **--generate-lambda-layer** argument. In supported regions, rdk will deploy the layer using the Serverless Application Repository, otherwise it will build a local lambda layer archive and upload it for use::: - - $ rdk init --generate-lambda-layer -- If you want rdk to give a custom name to the lambda layer for you, then use **--custom-layer-namer** argument. The Serverless Application Repository currently cannot be used for custom lambda layers.::: - - $ rdk init --generate-lambda-layer --custom-layer-name - -Create Rules ------------- -In your working directory, use the ``create`` command to start creating a new custom rule. You must specify the runtime for the lambda function that will back the Rule, and you can also specify a resource type (or comma-separated list of types) that the Rule will evaluate or a maximum frequency for a periodic rule. This will add a new directory for the rule and populate it with several files, including a skeleton of your Lambda code. - -:: - - $ rdk create MyRule --runtime python3.8 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running create! - Local Rule files created. - -On Windows it is necessary to escape the double-quotes when specifying input parameters, so the `--input-parameters` argument would instead look something like this:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -Note that you can create rules that use EITHER resource-types OR maximum-frequency, but not both. We have found that rules that try to be both event-triggered as well as periodic wind up being very complicated and so we do not recommend it as a best practice. - -Edit Rules Locally ---------------------------- -Once you have created the rule, edit the python file in your rule directory (in the above example it would be ``MyRule/MyRule.py``, but may be deeper into the rule directory tree depending on your chosen Lambda runtime) to add whatever logic your Rule requires in the ``evaluate_compliance`` function. You will have access to the CI that was sent by Config, as well as any parameters configured for the Config Rule. Your function should return either a simple compliance status (one of ``COMPLIANT``, ``NONCOMPLIANT``, or ``NOT_APPLICABLE``), or if you're using the python or node runtimes you can return a JSON object with multiple evaluation responses that the RDK will send back to AWS Config. An example would look like:: - - for sg in response['SecurityGroups']: - evaluations.append( - { - 'ComplianceResourceType': 'AWS::EC2::SecurityGroup', - 'ComplianceResourceId': sg['GroupId'], - 'ComplianceType': 'COMPLIANT', - 'Annotation': 'This is an important note.', - 'OrderingTimestamp': str(datetime.datetime.now()) - }) - - - return evaluations - -This is necessary for periodic rules that are not triggered by any CI change (which means the CI that is passed in will be null), and also for attaching annotations to your evaluation results. - -If you want to see what the JSON structure of a CI looks like for creating your logic, you can use - -:: - -$ rdk sample-ci - -to output a formatted JSON document. - -Write and Run Unit Tests ------------------------- -If you are writing Config Rules using either of the Python runtimes there will be a _test.py file deployed along with your Lambda function skeleton. This can be used to write unit tests according to the standard Python unittest framework (documented here: https://docs.python.org/3/library/unittest.html), which can be run using the `test-local` rdk command:: - - $ rdk test-local MyTestRule - Running local test! - Testing MyTestRule - Looking for tests in /Users/mborch/Code/rdk-dev/MyTestRule - - --------------------------------------------------------------------- - - Ran 0 tests in 0.000s - - OK - - -The test file includes setup for the MagicMock library that can be used to stub boto3 API calls if your rule logic will involve making API calls to gather additional information about your AWS environment. For some tips on how to do this, check out this blog post: https://sgillies.net/2017/10/19/mock-is-magic.html - -Modify Rule ------------ -If you need to change the parameters of a Config rule in your working directory you can use the ``modify`` command. Any parameters you specify will overwrite existing values, any that you do not specify will not be changed. - -:: - - $ rdk modify MyRule --runtime python3.10 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running modify! - Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. - -Again, on Windows the input parameters would look like:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -It is worth noting that until you actually call the ``deploy`` command your rule only exists in your working directory, none of the Rule commands discussed thus far actually makes changes to your account. - -Deploy Rule ------------ -Once you have completed your compliance validation code and set your Rule's configuration, you can deploy the Rule to your account using the ``deploy`` command. This will zip up your code (and the other associated code files, if any) into a deployable package (or run a gradle build if you have selected the java8 runtime or run the lambda packaging step from the dotnet CLI if you have selected the dotnetcore1.0 runtime), copy that zip file to S3, and then launch or update a CloudFormation stack that defines your Config Rule, Lambda function, and the necessary permissions and IAM Roles for it to function. Since CloudFormation does not deeply inspect Lambda code objects in S3 to construct its changeset, the ``deploy`` command will also directly update the Lambda function for any subsequent deployments to make sure code changes are propagated correctly. - -:: - - $ rdk deploy MyRule - Running deploy! - Zipping MyRule - Uploading MyRule - Creating CloudFormation Stack for MyRule - Waiting for CloudFormation stack operation to complete... - ... - Waiting for CloudFormation stack operation to complete... - Config deploy complete. - -The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. If you used the --generate-lambda-layer flag in rdk init, use the --generated-lambda-layer flag for rdk deploy. - -Deploy Organization Rule ------------------------- -You can also deploy the Rule to your AWS Organization using the ``deploy-organization`` command. -For successful evaluation of custom rules in child accounts, please make sure you do one of the following: - -1. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume the Role attached on the Config Service and confirm that the role trusts the master account where the Lambda function is going to be deployed. -2. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume a custom role and define an optional parameter with key as ExecutionRoleName and set the value to your custom role name; confirm that the role trusts the master account of the organization where the Lambda function will be deployed. - -:: - - $ rdk deploy-organization MyRule - Running deploy! - Zipping MyRule - Uploading MyRule - Creating CloudFormation Stack for MyRule - Waiting for CloudFormation stack operation to complete... - ... - Waiting for CloudFormation stack operation to complete... - Config deploy complete. - -The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. -This command uses 'PutOrganizationConfigRule' API for the rule deployment. If a new account joins an organization, the rule is deployed to that account. When an account leaves an organization, the rule is removed. Deployment of existing organizational AWS Config Rules will only be retried for 7 hours after an account is added to your organization if a recorder is not available. You are expected to create a recorder if one doesn't exist within 7 hours of adding an account to your organization. - -View Logs For Deployed Rule ---------------------------- -Once the Rule has been deployed to AWS you can get the CloudWatch logs associated with your lambda function using the ``logs`` command. - -:: - - $ rdk logs MyRule -n 5 - 2017-11-15 22:59:33 - START RequestId: 96e7639a-ca15-11e7-95a2-b1521890638d Version: $LATEST - 2017-11-15 23:41:13 - REPORT RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Duration: 0.50 ms Billed Duration: 100 ms Memory Size: 256 MB - Max Memory Used: 36 MB - 2017-11-15 23:41:13 - END RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda - 2017-11-15 23:41:13 - Default RDK utility class does not yet support Scheduled Notifications. - 2017-11-15 23:41:13 - START RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Version: $LATEST - -You can use the ``-n`` and ``-f`` command line flags just like the UNIX ``tail`` command to view a larger number of log events and to continuously poll for new events. The latter option can be useful in conjunction with manually initiating Config Evaluations for your deploy Config Rule to make sure it is behaving as expected. - - - -Running the tests -================= - -The `testing` directory contains scripts and buildspec files that I use to run basic functionality tests across a variety of CLI environments (currently Ubuntu linux running python 3.7/3.8/3.9/3.10, and Windows Server running python3.10). If there is interest I can release a CloudFormation template that could be used to build the test environment, let me know if this is something you want! - - -Advanced Features -================= -Cross-Account Deployments -------------------------- -Features have been added to the RDK to facilitate the cross-account deployment pattern that enterprise customers have standardized on for custom Config Rules. A cross-account architecture is one in which the Lambda functions are deployed to a single central "Compliance" account (which may be the same as a central "Security" account), and the Config Rules are deployed to any number of "Satellite" accounts that are used by other teams or departments. This gives the compliance team confidence that their Rule logic cannot be tampered with and makes it much easier for them to modify rule logic without having to go through a complex deployment process to potentially hundreds of AWS accounts. The cross-account pattern uses two advanced RDK features - functions-only deployments and the `create-rule-template` command. - -**Function-Only Deployment** - -By using the `-f` or `--functions-only` flag on the `deploy` command the RDK will deploy only the necessary Lambda Functions, Lambda Execution Role, and Lambda Permissions to the account specified by the execution credentials. It accomplishes this by batching up all of the Lambda function CloudFormation snippets for the selected Rule(s) into a single dynamically generated template and deploy that CloudFormation template. One consequence of this is that subsequent deployments that specify a different set of Rules for the same stack name will update that CloudFormation stack, and any Rules that were included in the first deployment but not in the second will be removed. You can use the `--stack-name` parameter to override the default CloudFormation stack name if you need to manage different subsets of your Lambda Functions independently. The intended usage is to deploy the functions for all of the Config rules in the Security/Compliance account, which can be done simply by using `rdk deploy -f --all` from your working directory. - -**`create-rule-template` command** - -This command generates a CloudFormation template that defines the AWS Config rules themselves, along with the Config Role, Config data bucket, Configuration Recorder, and Delivery channel necessary for the Config rules to work in a satellite account. You must specify the file name for the generated template using the `--output-file` or `o` command line flags. The generated template takes a single parameter of the AccountID of the central compliance account that contains the Lambda functions that will back your custom Config Rules. The generated template can be deployed in the desired satellite accounts through any of the means that you can deploy any other CloudFormation template, including the console, the CLI, as a CodePipeline task, or using StackSets. The `create-rule-template` command takes all of the standard arguments for selecting Rules to include in the generated template, including lists of individual Rule names, an `--all` flag, or using the RuleSets feature described below. - -:: - - $ rdk create-rule-template -o remote-rule-template.json --all - Generating CloudFormation template! - CloudFormation template written to remote-rule-template.json - - -Disable the supported resource types check ------------------------------------------- -It is now possible to define a resource type that is not yet supported by rdk. To disable the supported resource check use the optional flag '--skip-supported-resource-check' during the create command. - -:: - - $ rdk create MyRule --runtime python3.8 --resource-types AWS::New::ResourceType --skip-supported-resource-check - 'AWS::New::ResourceType' not found in list of accepted resource types. - Skip-Supported-Resource-Check Flag set (--skip-supported-resource-check), ignoring missing resource type error. - Running create! - Local Rule files created. - -Custom Lambda Function Name ---------------------------- -As of version 0.7.14, instead of defaulting the lambda function names to 'RDK-Rule-Function-' it is possible to customize the name for the Lambda function to any 64 characters string as per Lambda's naming standards using the optional '--custom-lambda-name' flag while performing rdk create. This opens up new features like : - -1. Longer config rule name. -2. Custom lambda function naming as per personal or enterprise standards. - -:: - - $ rdk create MyLongerRuleName --runtime python3.8 --resource-types AWS::EC2::Instance --custom-lambda-name custom-prefix-for-MyLongerRuleName - Running create! - Local Rule files created. - -The above example would create files with config rule name as 'MyLongerRuleName' and lambda function with the name 'custom-prefix-for-MyLongerRuleName' instead of 'RDK-Rule-Function-MyLongerRuleName' - -RuleSets --------- -New as of version 0.3.11, it is possible to add RuleSet tags to rules that can be used to deploy and test groups of rules together. Rules can belong to multiple RuleSets, and RuleSet membership is stored only in the parameters.json metadata. The `deploy`, `create-rule-template`, and `test-local` commands are RuleSet-aware such that a RuleSet can be passed in as the target instead of `--all` or a specific named Rule. - -A comma-delimited list of RuleSets can be added to a Rule when you create it (using the `--rulesets` flag), as part of a `modify` command, or using new `ruleset` subcommands to add or remove individual rules from a RuleSet. - -Running `rdk rulesets list` will display a list of the RuleSets currently defined across all of the Rules in the working directory - -:: - - rdk-dev $ rdk rulesets list - RuleSets: AnotherRuleSet MyNewSet - -Naming a specific RuleSet will list all of the Rules that are part of that RuleSet. - -:: - - rdk-dev $ rdk rulesets list AnotherRuleSet - Rules in AnotherRuleSet : RSTest - -Rules can be added to or removed from RuleSets using the `add` and `remove` subcommands: - -:: - - rdk-dev $ rdk rulesets add MyNewSet RSTest - RSTest added to RuleSet MyNewSet - - rdk-dev $ rdk rulesets remove AnotherRuleSet RSTest - RSTest removed from RuleSet AnotherRuleSet - -RuleSets are a convenient way to maintain a single repository of Config Rules that may need to have subsets of them deployed to different environments. For example your development environment may contain some of the Rules that you run in Production but not all of them; RuleSets gives you a way to identify and selectively deploy the appropriate Rules to each environment. - -Managed Rules -------------- -The RDK is able to deploy AWS Managed Rules. - -To do so, create a rule using "rdk create" and provide a valid SourceIdentifier via the --source-identifier CLI option. The list of Managed Rules can be found here: https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html, and note that the Identifier can be obtained by replacing the dashes with underscores and using all capitals (for example, the "guardduty-enabled-centralized" rule has the SourceIdentifier "GUARDDUTY_ENABLED_CENTRALIZED"). Just like custom Rules you will need to specify source events and/or a maximum evaluation frequency, and also pass in any Rule parameters. The resulting Rule directory will contain only the parameters.json file, but using `rdk deploy` or `rdk create-rule-template` can be used to deploy the Managed Rule like any other Custom Rule. - -Deploying Rules Across Multiple Regions ---------------------------------------- -The RDK is able to run init/deploy/undeploy across multiple regions with a `rdk -f -t ` - -If no region group is specified, rdk will deploy to the `default` region set - -To create a sample starter region group, run `rdk create-region-set` to specify the filename, add the `-o ` this will create a region set with the following tests and regions `"default":["us-east-1","us-west-1","eu-north-1","ap-east-1"],"aws-cn-region-set":["cn-north-1","cn-northwest-1"]` - -Using RDK to Generate a Lambda Layer in a region (Python3) ----------------------------------------------------------- -By default `rdk init --generate-lambda-layer` will generate an rdklib lambda layer while running init in whatever region it is run, to force re-generation of the layer, run `rdk init --generate-lambda-layer` again over a region - -To use this generated lambda layer, add the flag `--generated-lambda-layer` when running `rdk deploy`. For example: `rdk -f regions.yaml deploy LP3_TestRule_P39_lib --generated-lambda-layer` - -If you created layer with a custom name (by running `rdk init --custom-lambda-layer`, add a similar `custom-lambda-layer` flag when running deploy. - -Contributing -============ - -email us at rdk-maintainers@amazon.com if you have any questions. We are happy to help and discuss. - -Contacts -======== -* **Ricky Chau** - `rickychau2780 `_ - *current maintainer* -* **Benjamin Morris** - `bmorrissirromb `_ - *current maintainer* -* **Mark Beacom** - `mbeacom `_ - *current maintainer* -* **Julio Delgado Jr** - `tekdj7 `_ - *current maintainer* - -Past Contributors -================= -* **Michael Borchert** - *Orignal Python version* -* **Jonathan Rault** - *Orignal Design, testing, feedback* -* **Greg Kim and Chris Gutierrez** - *Initial work and CI definitions* -* **Henry Huang** - *Original CFN templates and other code* -* **Santosh Kumar** - *maintainer* -* **Jose Obando** - *maintainer* -* **Jarrett Andrulis** - `jarrettandrulis `_ - *maintainer* -* **Sandeep Batchu** - `batchus `_ - *maintainer* - -License -======= - -This project is licensed under the Apache 2.0 License - -Acknowledgments -=============== - -* the boto3 team makes all of this magic possible. - - -Link -==== - -* to view example of rules built with the RDK: https://github.com/awslabs/aws-config-rules/tree/master/python diff --git a/NEW_RUNTIME_PROCESS.md b/developer_notes.md similarity index 52% rename from NEW_RUNTIME_PROCESS.md rename to developer_notes.md index 3961a50c..4702f5e6 100644 --- a/NEW_RUNTIME_PROCESS.md +++ b/developer_notes.md @@ -1,31 +1,34 @@ -# New Runtime Support Process +# Developer Notes + +These notes are intended to help RDK developers update the repository consistently. + +## New Runtime Support Process + These instructions document the parts of the repository that need to be updated when support for a new Lambda runtime is added. -## Update pyproject.toml +### Update pyproject.toml - Add to `classifiers` list: -``` + +```yaml "Programming Language :: Python :: ," ``` -- Add to `include` list: -``` +- Add to `include` list: + +```yaml "rdk/template/runtime/python/*", "rdk/template/runtime/python-lib/*", ``` -## Update README.rst +### Update README.md - Update documentation and examples -## Update getting_started.rst - -- Update examples - -## Update rdk.py +### Update rdk.py - Update references to include new version -## Update Linux and Windows Buildspec files (`testing` folder) +### Update Linux and Windows Buildspec files (`testing` folder) -- Add new test cases for the new version \ No newline at end of file +- Add new test cases for the new version diff --git a/pyproject.toml b/pyproject.toml index 9b31317a..6a15747f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. [tool.poetry] name = "rdk" -version = "0.14.0" +version = "0.15.0" description = "Rule Development Kit CLI for AWS Config" authors = [ "AWS RDK Maintainers ", diff --git a/rdk/__init__.py b/rdk/__init__.py index a5407534..d03245c4 100644 --- a/rdk/__init__.py +++ b/rdk/__init__.py @@ -6,4 +6,4 @@ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -MY_VERSION = "0.14.0" +MY_VERSION = "0.15.0" diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 66a14670..00000000 --- a/tox.ini +++ /dev/null @@ -1,2 +0,0 @@ -[flake8] -max-line-length=140 \ No newline at end of file From 4f172c45d5d2fc75b70fa55227f34c0788573a7e Mon Sep 17 00:00:00 2001 From: Benjamin Morris Date: Wed, 24 May 2023 15:34:18 -0700 Subject: [PATCH 10/12] resolve conflicts from 0.14.0 --- NEW_RUNTIME_PROCESS.md | 31 ---- README.rst | 365 --------------------------------------- docs/getting_started.rst | 262 ---------------------------- 3 files changed, 658 deletions(-) delete mode 100644 NEW_RUNTIME_PROCESS.md delete mode 100644 README.rst delete mode 100644 docs/getting_started.rst diff --git a/NEW_RUNTIME_PROCESS.md b/NEW_RUNTIME_PROCESS.md deleted file mode 100644 index 3961a50c..00000000 --- a/NEW_RUNTIME_PROCESS.md +++ /dev/null @@ -1,31 +0,0 @@ -# New Runtime Support Process -These instructions document the parts of the repository that need to be updated when support for a new Lambda runtime is added. - -## Update pyproject.toml - -- Add to `classifiers` list: -``` -"Programming Language :: Python :: ," -``` - -- Add to `include` list: -``` -"rdk/template/runtime/python/*", -"rdk/template/runtime/python-lib/*", -``` - -## Update README.rst - -- Update documentation and examples - -## Update getting_started.rst - -- Update examples - -## Update rdk.py - -- Update references to include new version - -## Update Linux and Windows Buildspec files (`testing` folder) - -- Add new test cases for the new version \ No newline at end of file diff --git a/README.rst b/README.rst deleted file mode 100644 index de8d303a..00000000 --- a/README.rst +++ /dev/null @@ -1,365 +0,0 @@ -rdk -=== -|pypibadge| |downloadsbadge| - - -.. |pypibadge| image:: https://static.pepy.tech/personalized-badge/rdk?period=total&units=international_system&left_color=black&right_color=blue&left_text=downloads - :target: https://pepy.tech/project/rdk -.. |downloadsbadge| image:: https://img.shields.io/pypi/v/rdk - :alt: PyPI - -Rule Development Kit - -We greatly appreciate feedback and bug reports at rdk-maintainers@amazon.com! You may also create an issue on this repo. - -The RDK is designed to support a "Compliance-as-Code" workflow that is intuitive and productive. It abstracts away much of the undifferentiated heavy lifting associated with deploying AWS Config rules backed by custom lambda functions, and provides a streamlined develop-deploy-monitor iterative process. - -For complete documentation, including command reference, check out the `ReadTheDocs documentation `_. - -Getting Started -=============== -Uses python 3.7/3.8/3.9/3.10 and is installed via pip. Requires you to have an AWS account and sufficient permissions to manage the Config service, and to create S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy Document that describes the minimum necessary permissions can be found at policy/rdk-minimum-permissions.json. - -Under the hood, rdk uses boto3 to make API calls to AWS, so you can set your credentials any way that boto3 recognizes (options 3 through 8 here: http://boto3.readthedocs.io/en/latest/guide/configuration.html) or pass them in with the command-line parameters --profile, --region, --access-key-id, or --secret-access-key - -If you just want to use the RDK, go ahead and install it using pip:: - -$ pip install rdk - -Alternately, if you want to see the code and/or contribute you can clone the git repo, and then from the repo directory use pip to install the package. Use the '-e' flag to generate symlinks so that any edits you make will be reflected when you run the installed package. - -If you are going to author your Lambda functions using Java you will need to have Java 8 and gradle installed. If you are going to author your Lambda functions in C# you will need to have the dotnet CLI and the .NET Core Runtime 1.08 installed. -:: - - $ pip install -e . - -To make sure the rdk is installed correctly, running the package from the command line without any arguments should display help information. - -:: - - $ rdk - usage: rdk [-h] [-p PROFILE] [-k ACCESS_KEY] [-s SECRET_ACCESS_KEY] - [-r REGION] - ... - rdk: error: the following arguments are required: , - - -Usage -===== - -Configure your env ------------------- -To use the RDK, it's recommended to create a directory that will be your working directory. This should be committed to a source code repo, and ideally created as a python virtualenv. In that directory, run the ``init`` command to set up your AWS Config environment. - -:: - - $ rdk init - Running init! - Creating Config bucket config-bucket-780784666283 - Creating IAM role config-role - Waiting for IAM role to propagate - Config Service is ON - Config setup complete. - Creating Code bucket config-rule-code-bucket-780784666283ap-southeast-1 - -Running ``init`` subsequent times will validate your AWS Config setup and re-create any S3 buckets or IAM resources that are needed. - -- If you have config delivery bucket already present in some other AWS account then use **--config-bucket-exists-in-another-account** as argument::: - - $ rdk init --config-bucket-exists-in-another-account -- If you have AWS Organizations/ControlTower Setup in your AWS environment then additionally, use **--control-tower** as argument::: - - $ rdk init --control-tower --config-bucket-exists-in-another-account -- If bucket for custom lambda code is already present in current account then use **--skip-code-bucket-creation** argument::: - - $ rdk init --skip-code-bucket-creation - -- If you want rdk to create/update and upload the rdklib-layer for you, then use **--generate-lambda-layer** argument. In supported regions, rdk will deploy the layer using the Serverless Application Repository, otherwise it will build a local lambda layer archive and upload it for use::: - - $ rdk init --generate-lambda-layer -- If you want rdk to give a custom name to the lambda layer for you, then use **--custom-layer-namer** argument. The Serverless Application Repository currently cannot be used for custom lambda layers.::: - - $ rdk init --generate-lambda-layer --custom-layer-name - -Create Rules ------------- -In your working directory, use the ``create`` command to start creating a new custom rule. You must specify the runtime for the lambda function that will back the Rule, and you can also specify a resource type (or comma-separated list of types) that the Rule will evaluate or a maximum frequency for a periodic rule. This will add a new directory for the rule and populate it with several files, including a skeleton of your Lambda code. - -:: - - $ rdk create MyRule --runtime python3.8 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running create! - Local Rule files created. - -On Windows it is necessary to escape the double-quotes when specifying input parameters, so the `--input-parameters` argument would instead look something like this:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -Note that you can create rules that use EITHER resource-types OR maximum-frequency, but not both. We have found that rules that try to be both event-triggered as well as periodic wind up being very complicated and so we do not recommend it as a best practice. - -Edit Rules Locally ---------------------------- -Once you have created the rule, edit the python file in your rule directory (in the above example it would be ``MyRule/MyRule.py``, but may be deeper into the rule directory tree depending on your chosen Lambda runtime) to add whatever logic your Rule requires in the ``evaluate_compliance`` function. You will have access to the CI that was sent by Config, as well as any parameters configured for the Config Rule. Your function should return either a simple compliance status (one of ``COMPLIANT``, ``NONCOMPLIANT``, or ``NOT_APPLICABLE``), or if you're using the python or node runtimes you can return a JSON object with multiple evaluation responses that the RDK will send back to AWS Config. An example would look like:: - - for sg in response['SecurityGroups']: - evaluations.append( - { - 'ComplianceResourceType': 'AWS::EC2::SecurityGroup', - 'ComplianceResourceId': sg['GroupId'], - 'ComplianceType': 'COMPLIANT', - 'Annotation': 'This is an important note.', - 'OrderingTimestamp': str(datetime.datetime.now()) - }) - - - return evaluations - -This is necessary for periodic rules that are not triggered by any CI change (which means the CI that is passed in will be null), and also for attaching annotations to your evaluation results. - -If you want to see what the JSON structure of a CI looks like for creating your logic, you can use - -:: - -$ rdk sample-ci - -to output a formatted JSON document. - -Write and Run Unit Tests ------------------------- -If you are writing Config Rules using either of the Python runtimes there will be a _test.py file deployed along with your Lambda function skeleton. This can be used to write unit tests according to the standard Python unittest framework (documented here: https://docs.python.org/3/library/unittest.html), which can be run using the `test-local` rdk command:: - - $ rdk test-local MyTestRule - Running local test! - Testing MyTestRule - Looking for tests in /Users/mborch/Code/rdk-dev/MyTestRule - - --------------------------------------------------------------------- - - Ran 0 tests in 0.000s - - OK - - -The test file includes setup for the MagicMock library that can be used to stub boto3 API calls if your rule logic will involve making API calls to gather additional information about your AWS environment. For some tips on how to do this, check out this blog post: https://sgillies.net/2017/10/19/mock-is-magic.html - -Modify Rule ------------ -If you need to change the parameters of a Config rule in your working directory you can use the ``modify`` command. Any parameters you specify will overwrite existing values, any that you do not specify will not be changed. - -:: - - $ rdk modify MyRule --runtime python3.10 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running modify! - Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. - -Again, on Windows the input parameters would look like:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -It is worth noting that until you actually call the ``deploy`` command your rule only exists in your working directory, none of the Rule commands discussed thus far actually makes changes to your account. - -Deploy Rule ------------ -Once you have completed your compliance validation code and set your Rule's configuration, you can deploy the Rule to your account using the ``deploy`` command. This will zip up your code (and the other associated code files, if any) into a deployable package (or run a gradle build if you have selected the java8 runtime or run the lambda packaging step from the dotnet CLI if you have selected the dotnetcore1.0 runtime), copy that zip file to S3, and then launch or update a CloudFormation stack that defines your Config Rule, Lambda function, and the necessary permissions and IAM Roles for it to function. Since CloudFormation does not deeply inspect Lambda code objects in S3 to construct its changeset, the ``deploy`` command will also directly update the Lambda function for any subsequent deployments to make sure code changes are propagated correctly. - -:: - - $ rdk deploy MyRule - Running deploy! - Zipping MyRule - Uploading MyRule - Creating CloudFormation Stack for MyRule - Waiting for CloudFormation stack operation to complete... - ... - Waiting for CloudFormation stack operation to complete... - Config deploy complete. - -The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. If you used the --generate-lambda-layer flag in rdk init, use the --generated-lambda-layer flag for rdk deploy. - -Deploy Organization Rule ------------------------- -You can also deploy the Rule to your AWS Organization using the ``deploy-organization`` command. -For successful evaluation of custom rules in child accounts, please make sure you do one of the following: - -1. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume the Role attached on the Config Service and confirm that the role trusts the master account where the Lambda function is going to be deployed. -2. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume a custom role and define an optional parameter with key as ExecutionRoleName and set the value to your custom role name; confirm that the role trusts the master account of the organization where the Lambda function will be deployed. - -:: - - $ rdk deploy-organization MyRule - Running deploy! - Zipping MyRule - Uploading MyRule - Creating CloudFormation Stack for MyRule - Waiting for CloudFormation stack operation to complete... - ... - Waiting for CloudFormation stack operation to complete... - Config deploy complete. - -The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. -This command uses 'PutOrganizationConfigRule' API for the rule deployment. If a new account joins an organization, the rule is deployed to that account. When an account leaves an organization, the rule is removed. Deployment of existing organizational AWS Config Rules will only be retried for 7 hours after an account is added to your organization if a recorder is not available. You are expected to create a recorder if one doesn't exist within 7 hours of adding an account to your organization. - -View Logs For Deployed Rule ---------------------------- -Once the Rule has been deployed to AWS you can get the CloudWatch logs associated with your lambda function using the ``logs`` command. - -:: - - $ rdk logs MyRule -n 5 - 2017-11-15 22:59:33 - START RequestId: 96e7639a-ca15-11e7-95a2-b1521890638d Version: $LATEST - 2017-11-15 23:41:13 - REPORT RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Duration: 0.50 ms Billed Duration: 100 ms Memory Size: 256 MB - Max Memory Used: 36 MB - 2017-11-15 23:41:13 - END RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda - 2017-11-15 23:41:13 - Default RDK utility class does not yet support Scheduled Notifications. - 2017-11-15 23:41:13 - START RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Version: $LATEST - -You can use the ``-n`` and ``-f`` command line flags just like the UNIX ``tail`` command to view a larger number of log events and to continuously poll for new events. The latter option can be useful in conjunction with manually initiating Config Evaluations for your deploy Config Rule to make sure it is behaving as expected. - - - -Running the tests -================= - -The `testing` directory contains scripts and buildspec files that I use to run basic functionality tests across a variety of CLI environments (currently Ubuntu linux running python 3.7/3.8/3.9/3.10, and Windows Server running python3.10). If there is interest I can release a CloudFormation template that could be used to build the test environment, let me know if this is something you want! - - -Advanced Features -================= -Cross-Account Deployments -------------------------- -Features have been added to the RDK to facilitate the cross-account deployment pattern that enterprise customers have standardized on for custom Config Rules. A cross-account architecture is one in which the Lambda functions are deployed to a single central "Compliance" account (which may be the same as a central "Security" account), and the Config Rules are deployed to any number of "Satellite" accounts that are used by other teams or departments. This gives the compliance team confidence that their Rule logic cannot be tampered with and makes it much easier for them to modify rule logic without having to go through a complex deployment process to potentially hundreds of AWS accounts. The cross-account pattern uses two advanced RDK features - functions-only deployments and the `create-rule-template` command. - -**Function-Only Deployment** - -By using the `-f` or `--functions-only` flag on the `deploy` command the RDK will deploy only the necessary Lambda Functions, Lambda Execution Role, and Lambda Permissions to the account specified by the execution credentials. It accomplishes this by batching up all of the Lambda function CloudFormation snippets for the selected Rule(s) into a single dynamically generated template and deploy that CloudFormation template. One consequence of this is that subsequent deployments that specify a different set of Rules for the same stack name will update that CloudFormation stack, and any Rules that were included in the first deployment but not in the second will be removed. You can use the `--stack-name` parameter to override the default CloudFormation stack name if you need to manage different subsets of your Lambda Functions independently. The intended usage is to deploy the functions for all of the Config rules in the Security/Compliance account, which can be done simply by using `rdk deploy -f --all` from your working directory. - -**`create-rule-template` command** - -This command generates a CloudFormation template that defines the AWS Config rules themselves, along with the Config Role, Config data bucket, Configuration Recorder, and Delivery channel necessary for the Config rules to work in a satellite account. You must specify the file name for the generated template using the `--output-file` or `o` command line flags. The generated template takes a single parameter of the AccountID of the central compliance account that contains the Lambda functions that will back your custom Config Rules. The generated template can be deployed in the desired satellite accounts through any of the means that you can deploy any other CloudFormation template, including the console, the CLI, as a CodePipeline task, or using StackSets. The `create-rule-template` command takes all of the standard arguments for selecting Rules to include in the generated template, including lists of individual Rule names, an `--all` flag, or using the RuleSets feature described below. - -:: - - $ rdk create-rule-template -o remote-rule-template.json --all - Generating CloudFormation template! - CloudFormation template written to remote-rule-template.json - - -Disable the supported resource types check ------------------------------------------- -It is now possible to define a resource type that is not yet supported by rdk. To disable the supported resource check use the optional flag '--skip-supported-resource-check' during the create command. - -:: - - $ rdk create MyRule --runtime python3.8 --resource-types AWS::New::ResourceType --skip-supported-resource-check - 'AWS::New::ResourceType' not found in list of accepted resource types. - Skip-Supported-Resource-Check Flag set (--skip-supported-resource-check), ignoring missing resource type error. - Running create! - Local Rule files created. - -Custom Lambda Function Name ---------------------------- -As of version 0.7.14, instead of defaulting the lambda function names to 'RDK-Rule-Function-' it is possible to customize the name for the Lambda function to any 64 characters string as per Lambda's naming standards using the optional '--custom-lambda-name' flag while performing rdk create. This opens up new features like : - -1. Longer config rule name. -2. Custom lambda function naming as per personal or enterprise standards. - -:: - - $ rdk create MyLongerRuleName --runtime python3.8 --resource-types AWS::EC2::Instance --custom-lambda-name custom-prefix-for-MyLongerRuleName - Running create! - Local Rule files created. - -The above example would create files with config rule name as 'MyLongerRuleName' and lambda function with the name 'custom-prefix-for-MyLongerRuleName' instead of 'RDK-Rule-Function-MyLongerRuleName' - -RuleSets --------- -New as of version 0.3.11, it is possible to add RuleSet tags to rules that can be used to deploy and test groups of rules together. Rules can belong to multiple RuleSets, and RuleSet membership is stored only in the parameters.json metadata. The `deploy`, `create-rule-template`, and `test-local` commands are RuleSet-aware such that a RuleSet can be passed in as the target instead of `--all` or a specific named Rule. - -A comma-delimited list of RuleSets can be added to a Rule when you create it (using the `--rulesets` flag), as part of a `modify` command, or using new `ruleset` subcommands to add or remove individual rules from a RuleSet. - -Running `rdk rulesets list` will display a list of the RuleSets currently defined across all of the Rules in the working directory - -:: - - rdk-dev $ rdk rulesets list - RuleSets: AnotherRuleSet MyNewSet - -Naming a specific RuleSet will list all of the Rules that are part of that RuleSet. - -:: - - rdk-dev $ rdk rulesets list AnotherRuleSet - Rules in AnotherRuleSet : RSTest - -Rules can be added to or removed from RuleSets using the `add` and `remove` subcommands: - -:: - - rdk-dev $ rdk rulesets add MyNewSet RSTest - RSTest added to RuleSet MyNewSet - - rdk-dev $ rdk rulesets remove AnotherRuleSet RSTest - RSTest removed from RuleSet AnotherRuleSet - -RuleSets are a convenient way to maintain a single repository of Config Rules that may need to have subsets of them deployed to different environments. For example your development environment may contain some of the Rules that you run in Production but not all of them; RuleSets gives you a way to identify and selectively deploy the appropriate Rules to each environment. - -Managed Rules -------------- -The RDK is able to deploy AWS Managed Rules. - -To do so, create a rule using "rdk create" and provide a valid SourceIdentifier via the --source-identifier CLI option. The list of Managed Rules can be found here: https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html, and note that the Identifier can be obtained by replacing the dashes with underscores and using all capitals (for example, the "guardduty-enabled-centralized" rule has the SourceIdentifier "GUARDDUTY_ENABLED_CENTRALIZED"). Just like custom Rules you will need to specify source events and/or a maximum evaluation frequency, and also pass in any Rule parameters. The resulting Rule directory will contain only the parameters.json file, but using `rdk deploy` or `rdk create-rule-template` can be used to deploy the Managed Rule like any other Custom Rule. - -Deploying Rules Across Multiple Regions ---------------------------------------- -The RDK is able to run init/deploy/undeploy across multiple regions with a `rdk -f -t ` - -If no region group is specified, rdk will deploy to the `default` region set - -To create a sample starter region group, run `rdk create-region-set` to specify the filename, add the `-o ` this will create a region set with the following tests and regions `"default":["us-east-1","us-west-1","eu-north-1","ap-east-1"],"aws-cn-region-set":["cn-north-1","cn-northwest-1"]` - -Using RDK to Generate a Lambda Layer in a region (Python3) ----------------------------------------------------------- -By default `rdk init --generate-lambda-layer` will generate an rdklib lambda layer while running init in whatever region it is run, to force re-generation of the layer, run `rdk init --generate-lambda-layer` again over a region - -To use this generated lambda layer, add the flag `--generated-lambda-layer` when running `rdk deploy`. For example: `rdk -f regions.yaml deploy LP3_TestRule_P39_lib --generated-lambda-layer` - -If you created layer with a custom name (by running `rdk init --custom-lambda-layer`, add a similar `custom-lambda-layer` flag when running deploy. - -Contributing -============ - -email us at rdk-maintainers@amazon.com if you have any questions. We are happy to help and discuss. - -Contacts -======== -* **Ricky Chau** - `rickychau2780 `_ - *current maintainer* -* **Benjamin Morris** - `bmorrissirromb `_ - *current maintainer* -* **Mark Beacom** - `mbeacom `_ - *current maintainer* -* **Julio Delgado Jr** - `tekdj7 `_ - *current maintainer* - -Past Contributors -================= -* **Michael Borchert** - *Orignal Python version* -* **Jonathan Rault** - *Orignal Design, testing, feedback* -* **Greg Kim and Chris Gutierrez** - *Initial work and CI definitions* -* **Henry Huang** - *Original CFN templates and other code* -* **Santosh Kumar** - *maintainer* -* **Jose Obando** - *maintainer* -* **Jarrett Andrulis** - `jarrettandrulis `_ - *maintainer* -* **Sandeep Batchu** - `batchus `_ - *maintainer* - -License -======= - -This project is licensed under the Apache 2.0 License - -Acknowledgments -=============== - -* the boto3 team makes all of this magic possible. - - -Link -==== - -* to view example of rules built with the RDK: https://github.com/awslabs/aws-config-rules/tree/master/python diff --git a/docs/getting_started.rst b/docs/getting_started.rst deleted file mode 100644 index a66052ed..00000000 --- a/docs/getting_started.rst +++ /dev/null @@ -1,262 +0,0 @@ -Getting Started -=============== - -Let's get started using the RDK! - -Prerequisites -------------- - -RDK uses python 3.7+. You will need to have an AWS account and sufficient permissions to manage the Config service, and to create and manage S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy Document that describes the minimum necessary permissions can be found `here `_ on github. - -Under the hood, rdk uses boto3 to make API calls to AWS, so you can set your credentials any way that boto3 recognizes (options 3 through 8 in the `boto docs here `_ ) or pass them in with the command-line parameters --profile, --region, --access-key-id, or --secret-access-key - -.. _permissions: http://www.python.org/ - -Installation ------------- - -If you just want to use the RDK, go ahead and install it using pip:: - -$ pip install rdk - -Alternately, if you want to see the code and/or contribute you can clone the `git repo `_ , and then from the repo directory use pip to install the package. Use the '-e' flag to generate symlinks so that any edits you make will be reflected when you run the installed package. - -If you are going to author your Lambda functions using Java you will need to have Java 8 and gradle installed. If you are going to author your Lambda functions in C# you will need to have the dotnet CLI and the .NET Core Runtime 1.08 installed. -:: - - $ pip install -e . - -To make sure the rdk is installed correctly, running the package from the command line without any arguments should display help information. - -:: - - $ rdk - usage: rdk [-h] [-p PROFILE] [-k ACCESS_KEY] [-s SECRET_ACCESS_KEY] - [-r REGION] - ... - rdk: error: the following arguments are required: , - - -Usage ------ - -Configure your env -~~~~~~~~~~~~~~~~~~ -To use the RDK, it's recommended to create a directory that will be your working directory. This should be committed to a source code repo, and ideally created as a python virtualenv. In that directory, run the ``init`` command to set up your AWS Config environment. - -:: - - $ rdk init - Running init! - Creating Config bucket config-bucket-780784666283 - Creating IAM role config-role - Waiting for IAM role to propagate - Config Service is ON - Config setup complete. - Creating Code bucket config-rule-code-bucket-780784666283ap-southeast-1 - -Running ``init`` subsequent times will validate your AWS Config setup and re-create any S3 buckets or IAM resources that are needed. - -Create Rules -~~~~~~~~~~~~ -In your working directory, use the ``create`` command to start creating a new custom rule. You must specify the runtime for the lambda function that will back the Rule, and you can also specify a resource type (or comma-separated list of types) that the Rule will evaluate or a maximum frequency for a periodic rule. This will add a new directory for the rule and populate it with several files, including a skeleton of your Lambda code. - -:: - - $ rdk create MyRule --runtime python3.8 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running create! - Local Rule files created. - -On Windows it is necessary to escape the double-quotes when specifying input parameters, so the `--input-parameters` argument would instead look something like this:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -Note that you can create rules that use EITHER resource-types OR maximum-frequency, but not both. We have found that rules that try to be both event-triggered as well as periodic wind up being very complicated and so we do not recommend it as a best practice. - -Edit Rules Locally -~~~~~~~~~~~~~~~~~~ -Once you have created the rule, edit the python file in your rule directory (in the above example it would be ``MyRule/MyRule.py``, but may be deeper into the rule directory tree depending on your chosen Lambda runtime) to add whatever logic your Rule requires in the ``evaluate_compliance`` function. You will have access to the CI that was sent by Config, as well as any parameters configured for the Config Rule. Your function should return either a simple compliance status (one of ``COMPLIANT``, ``NONCOMPLIANT``, or ``NOT_APPLICABLE``), or if you're using the python or node runtimes you can return a JSON object with multiple evaluation responses that the RDK will send back to AWS Config. An example would look like:: - - for sg in response['SecurityGroups']: - evaluations.append( - { - 'ComplianceResourceType': 'AWS::EC2::SecurityGroup', - 'ComplianceResourceId': sg['GroupId'], - 'ComplianceType': 'COMPLIANT', - 'Annotation': 'This is an important note.', - 'OrderingTimestamp': str(datetime.datetime.now()) - }) - - - return evaluations - -This is necessary for periodic rules that are not triggered by any CI change (which means the CI that is passed in will be null), and also for attaching annotations to your evaluation results. - -If you want to see what the JSON structure of a CI looks like for creating your logic, you can use - -:: - -$ rdk sample-ci - -to output a formatted JSON document. - -Write and Run Unit Tests -~~~~~~~~~~~~~~~~~~~~~~~~ -If you are writing Config Rules using either of the Python runtimes there will be a _test.py file deployed along with your Lambda function skeleton. This can be used to write unit tests according to the standard Python unittest framework (documented here: https://docs.python.org/3/library/unittest.html), which can be run using the `test-local` rdk command:: - - $ rdk test-local MyTestRule - Running local test! - Testing MyTestRule - Looking for tests in /Users/mborch/Code/rdk-dev/MyTestRule - - --------------------------------------------------------------------- - - Ran 0 tests in 0.000s - - OK - - -The test file includes setup for the MagicMock library that can be used to stub boto3 API calls if your rule logic will involve making API calls to gather additional information about your AWS environment. For some tips on how to do this, check out this blog post: https://sgillies.net/2017/10/19/mock-is-magic.html - -Modify Rule -~~~~~~~~~~~ -If you need to change the parameters of a Config rule in your working directory you can use the ``modify`` command. Any parameters you specify will overwrite existing values, any that you do not specify will not be changed. - -:: - - $ rdk modify MyRule --runtime python3.10 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running modify! - Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. - -Again, on Windows the input parameters would look like:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -It is worth noting that until you actually call the ``deploy`` command your rule only exists in your working directory, none of the Rule commands discussed thus far actually makes changes to your account. - -Deploy Rule -~~~~~~~~~~~ -Once you have completed your compliance validation code and set your Rule's configuration, you can deploy the Rule to your account using the ``deploy`` command. This will zip up your code (and the other associated code files, if any) into a deployable package (or run a gradle build if you have selected the java8 runtime or run the lambda packaging step from the dotnet CLI if you have selected the dotnetcore1.0 runtime), copy that zip file to S3, and then launch or update a CloudFormation stack that defines your Config Rule, Lambda function, and the necessary permissions and IAM Roles for it to function. Since CloudFormation does not deeply inspect Lambda code objects in S3 to construct its changeset, the ``deploy`` command will also directly update the Lambda function for any subsequent deployments to make sure code changes are propagated correctly. - -:: - - $ rdk deploy MyRule - Running deploy! - Zipping MyRule - Uploading MyRule - Creating CloudFormation Stack for MyRule - Waiting for CloudFormation stack operation to complete... - ... - Waiting for CloudFormation stack operation to complete... - Config deploy complete. - -The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. - -View Logs For Deployed Rule -~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Once the Rule has been deployed to AWS you can get the CloudWatch logs associated with your lambda function using the ``logs`` command. - -:: - - $ rdk logs MyRule -n 5 - 2017-11-15 22:59:33 - START RequestId: 96e7639a-ca15-11e7-95a2-b1521890638d Version: $LATEST - 2017-11-15 23:41:13 - REPORT RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Duration: 0.50 ms Billed Duration: 100 ms Memory Size: 256 MB - Max Memory Used: 36 MB - 2017-11-15 23:41:13 - END RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda - 2017-11-15 23:41:13 - Default RDK utility class does not yet support Scheduled Notifications. - 2017-11-15 23:41:13 - START RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Version: $LATEST - -You can use the ``-n`` and ``-f`` command line flags just like the UNIX ``tail`` command to view a larger number of log events and to continuously poll for new events. The latter option can be useful in conjunction with manually initiating Config Evaluations for your deploy Config Rule to make sure it is behaving as expected. - - -Advanced Features ------------------ -Cross-Account Deployments -~~~~~~~~~~~~~~~~~~~~~~~~~ -Features have been added to the RDK to facilitate the cross-account deployment pattern that enterprise customers have standardized on for custom Config Rules. A cross-account architecture is one in which the Lambda functions are deployed to a single central "Compliance" account (which may be the same as a central "Security" account), and the Config Rules are deployed to any number of "Satellite" accounts that are used by other teams or departments. This gives the compliance team confidence that their Rule logic cannot be tampered with and makes it much easier for them to modify rule logic without having to go through a complex deployment process to potentially hundreds of AWS accounts. The cross-account pattern uses two advanced RDK features - functions-only deployments and the `create-rule-template` command. - -**Function-Only Deployment** - -By using the `-f` or `--functions-only` flag on the `deploy` command the RDK will deploy only the necessary Lambda Functions, Lambda Execution Role, and Lambda Permissions to the account specified by the execution credentials. It accomplishes this by batching up all of the Lambda function CloudFormation snippets for the selected Rule(s) into a single dynamically generated template and deploy that CloudFormation template. One consequence of this is that subsequent deployments that specify a different set of Rules for the same stack name will update that CloudFormation stack, and any Rules that were included in the first deployment but not in the second will be removed. You can use the `--stack-name` parameter to override the default CloudFormation stack name if you need to manage different subsets of your Lambda Functions independently. The intended usage is to deploy the functions for all of the Config rules in the Security/Compliance account, which can be done simply by using `rdk deploy -f --all` from your working directory. - -**`create-rule-template` command** - -This command generates a CloudFormation template that defines the AWS Config rules themselves, along with the Config Role, Config data bucket, Configuration Recorder, and Delivery channel necessary for the Config rules to work in a satellite account. You must specify the file name for the generated template using the `--output-file` or `o` command line flags. The generated template takes a single parameter of the AccountID of the central compliance account that contains the Lambda functions that will back your custom Config Rules. The generated template can be deployed in the desired satellite accounts through any of the means that you can deploy any other CloudFormation template, including the console, the CLI, as a CodePipeline task, or using StackSets. The `create-rule-template` command takes all of the standard arguments for selecting Rules to include in the generated template, including lists of individual Rule names, an `--all` flag, or using the RuleSets feature described below. - -:: - - $ rdk create-rule-template -o remote-rule-template.json --all - Generating CloudFormation template! - CloudFormation template written to remote-rule-template.json - - -RuleSets -~~~~~~~~ -New as of version 0.3.11, it is possible to add RuleSet tags to rules that can be used to deploy and test groups of rules together. Rules can belong to multiple RuleSets, and RuleSet membership is stored only in the parameters.json metadata. The `deploy`, `create-rule-template`, and `test-local` commands are RuleSet-aware such that a RuleSet can be passed in as the target instead of `--all` or a specific named Rule. - -A comma-delimited list of RuleSets can be added to a Rule when you create it (using the `--rulesets` flag), as part of a `modify` command, or using new `ruleset` subcommands to add or remove individual rules from a RuleSet. - -Running `rdk rulesets list` will display a list of the RuleSets currently defined across all of the Rules in the working directory - -:: - - rdk-dev $ rdk rulesets list - RuleSets: AnotherRuleSet MyNewSet - -Naming a specific RuleSet will list all of the Rules that are part of that RuleSet. - -:: - - rdk-dev $ rdk rulesets list AnotherRuleSet - Rules in AnotherRuleSet : RSTest - -Rules can be added to or removed from RuleSets using the `add` and `remove` subcommands: - -:: - - rdk-dev $ rdk rulesets add MyNewSet RSTest - RSTest added to RuleSet MyNewSet - - rdk-dev $ rdk rulesets remove AnotherRuleSet RSTest - RSTest removed from RuleSet AnotherRuleSet - -RuleSets are a convenient way to maintain a single repository of Config Rules that may need to have subsets of them deployed to different environments. For example your development environment may contain some of the Rules that you run in Production but not all of them; RuleSets gives you a way to identify and selectively deploy the appropriate Rules to each environment. - - -Region Sets -~~~~~~~~~~~ -`rdk init`, `rdk deploy`, and `rdk undeploy` subcommands now support running across multiple regions in parallel using region sets defined in a yaml file. - -To run a subcommand with a region set, pass in the region set yaml file and the specific region set to run through. - -:: - - $ rdk -f regions.yaml --region-set region-set-1 undeploy CUSTOM_RULE - Deleting rules in the following regions: ['sa-east-1', 'us-east-1']. - Delete specified Rules and Lambda Functions from your AWS Account? (y/N): y - [sa-east-1] Running un-deploy! - [us-east-1] Running un-deploy! - [us-east-1] Rule removal initiated. Waiting for Stack Deletion to complete. - [sa-east-1] Rule removal initiated. Waiting for Stack Deletion to complete. - [us-east-1] CloudFormation stack operation complete. - [us-east-1] Rule removal complete, but local files have been preserved. - [us-east-1] To re-deploy, use the 'deploy' command. - [sa-east-1] CloudFormation stack operation complete. - [sa-east-1] Rule removal complete, but local files have been preserved. - [sa-east-1] To re-deploy, use the 'deploy' command. - -Example region set file: - -:: - - default: - - us-west-1 - - us-west-2 - region-set-1: - - sa-east-1 - - us-east-1 - region-set-2: - - ap-southeast-1 - - eu-central-1 - - sa-east-1 - - us-east-1 From 14d5e3a3b71d7da860f3cbf3d453d418f81cc832 Mon Sep 17 00:00:00 2001 From: Benjamin Morris Date: Thu, 25 May 2023 08:44:41 -0700 Subject: [PATCH 11/12] little bugfixes --- .github/actions/dep-setup/action.yml | 63 + .github/workflows/publish.yaml | 50 + .github/workflows/validate.yaml | 78 + MANIFEST.in | 29 - NOTICE.txt | 2 +- README.md | 562 +++++ README.rst | 355 --- developer_notes.md | 34 + docs/Makefile | 20 - docs/_static/argparse.css | 13 - docs/commands/clean.md | 7 + docs/commands/create-rule-template.md | 28 + docs/commands/create.md | 7 + docs/commands/deploy.md | 47 + docs/commands/export.md | 19 + docs/commands/init.md | 27 + docs/commands/logs.md | 14 + docs/commands/modify.md | 7 + docs/commands/rulesets.md | 27 + docs/commands/sample-ci.md | 17 + docs/commands/test-local.md | 9 + docs/commands/undeploy.md | 10 + docs/conf.py | 181 -- docs/getting_started.rst | 262 --- docs/index.md | 1 + docs/index.rst | 24 - docs/introduction.rst | 7 - docs/legacy-docs.md | 378 ++++ docs/make.bat | 36 - docs/reference/clean.rst | 10 - docs/reference/create-rule-template.rst | 22 - docs/reference/create.rst | 10 - docs/reference/deploy.rst | 25 - docs/reference/export.rst | 22 - docs/reference/init.rst | 28 - docs/reference/logs.rst | 12 - docs/reference/modify.rst | 10 - docs/reference/rulesets.rst | 16 - docs/reference/sample-ci.rst | 18 - docs/reference/test-local.rst | 10 - docs/reference/undeploy.rst | 12 - docs/references.rst | 21 - docs/requirements.txt | 305 ++- mkdocs.yml | 17 + poetry.lock | 1903 +++++++++++++++++ pyproject.toml | 138 ++ rdk-workshop/WorkshopSetup.yaml | 6 +- rdk-workshop/instructions.md | 36 +- rdk/__init__.py | 2 +- rdk/rdk.py | 1856 ++++++++++++---- rdk/template/configRule.json | 9 - rdk/template/configRuleOrganization.json | 9 - .../example_ci/AWS_R53_HostedZone.json | 39 + .../AWS_S3_AccountPublicAccessBlock.json | 23 + .../AWS_SSM_ManagedInstanceInventory.json | 765 +++---- .../dotnetcore1.0/CustomConfigHandler.cs | 189 -- .../runtime/dotnetcore1.0/RuleCode.cs | 27 - .../aws-lambda-tools-defaults.json | 19 - .../runtime/dotnetcore1.0/csharp7.0.csproj | 28 - .../dotnetcore2.0/CustomConfigHandler.cs | 189 -- .../runtime/dotnetcore2.0/RuleCode.cs | 27 - .../aws-lambda-tools-defaults.json | 19 - .../runtime/dotnetcore2.0/csharp7.0.csproj | 28 - rdk/template/runtime/nodejs4.3/rule_code.js | 183 -- rdk/template/runtime/nodejs6.10/rule_code.js | 215 -- .../runtime/python3.10-lib/rule_code.py | 25 + .../runtime/python3.10-lib/rule_test.py | 157 ++ rdk/template/runtime/python3.10/rule_code.py | 437 ++++ rdk/template/runtime/python3.10/rule_test.py | 177 ++ .../managed-rule-code/rule_code.py | 168 -- .../managed-rule-code/rule_util.py | 147 -- .../SIMPLE_DELEGATE_PREPROD_TESTING.json | 28 - .../tst/managed-rule-code/sample_test.py | 9 - .../runtime/python3.7-lib/rule_test.py | 142 +- rdk/template/runtime/python3.7/rule_code.py | 4 +- .../runtime/python3.8-lib/rule_test.py | 142 +- rdk/template/runtime/python3.8/rule_code.py | 4 +- .../runtime/python3.9-lib/rule_test.py | 142 +- rdk/template/runtime/python3.9/rule_code.py | 4 +- rdk/template/terraform/0.11/config_rule.tf | 8 - rdk/template/terraform/0.12/config_rule.tf | 7 - setup.py | 47 - testing/linux-python3-buildspec.yaml | 18 +- testing/windows-python2-buildspec.yaml | 13 - testing/windows-python3-buildspec.yaml | 22 +- tox.ini | 2 + 86 files changed, 6679 insertions(+), 3586 deletions(-) create mode 100644 .github/actions/dep-setup/action.yml create mode 100644 .github/workflows/publish.yaml create mode 100644 .github/workflows/validate.yaml delete mode 100644 MANIFEST.in create mode 100644 README.md delete mode 100644 README.rst create mode 100644 developer_notes.md delete mode 100644 docs/Makefile delete mode 100644 docs/_static/argparse.css create mode 100644 docs/commands/clean.md create mode 100644 docs/commands/create-rule-template.md create mode 100644 docs/commands/create.md create mode 100644 docs/commands/deploy.md create mode 100644 docs/commands/export.md create mode 100644 docs/commands/init.md create mode 100644 docs/commands/logs.md create mode 100644 docs/commands/modify.md create mode 100644 docs/commands/rulesets.md create mode 100644 docs/commands/sample-ci.md create mode 100644 docs/commands/test-local.md create mode 100644 docs/commands/undeploy.md delete mode 100644 docs/conf.py delete mode 100644 docs/getting_started.rst create mode 120000 docs/index.md delete mode 100644 docs/index.rst delete mode 100644 docs/introduction.rst create mode 100644 docs/legacy-docs.md delete mode 100644 docs/make.bat delete mode 100644 docs/reference/clean.rst delete mode 100644 docs/reference/create-rule-template.rst delete mode 100644 docs/reference/create.rst delete mode 100644 docs/reference/deploy.rst delete mode 100644 docs/reference/export.rst delete mode 100644 docs/reference/init.rst delete mode 100644 docs/reference/logs.rst delete mode 100644 docs/reference/modify.rst delete mode 100644 docs/reference/rulesets.rst delete mode 100644 docs/reference/sample-ci.rst delete mode 100644 docs/reference/test-local.rst delete mode 100644 docs/reference/undeploy.rst delete mode 100644 docs/references.rst create mode 100644 mkdocs.yml create mode 100644 poetry.lock create mode 100644 pyproject.toml create mode 100644 rdk/template/example_ci/AWS_R53_HostedZone.json create mode 100644 rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json delete mode 100644 rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs delete mode 100755 rdk/template/runtime/dotnetcore1.0/RuleCode.cs delete mode 100755 rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json delete mode 100644 rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj delete mode 100644 rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs delete mode 100644 rdk/template/runtime/dotnetcore2.0/RuleCode.cs delete mode 100644 rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json delete mode 100644 rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj delete mode 100644 rdk/template/runtime/nodejs4.3/rule_code.js delete mode 100644 rdk/template/runtime/nodejs6.10/rule_code.js create mode 100644 rdk/template/runtime/python3.10-lib/rule_code.py create mode 100644 rdk/template/runtime/python3.10-lib/rule_test.py create mode 100644 rdk/template/runtime/python3.10/rule_code.py create mode 100644 rdk/template/runtime/python3.10/rule_test.py delete mode 100644 rdk/template/runtime/python3.6-managed/managed-rule-code/rule_code.py delete mode 100644 rdk/template/runtime/python3.6-managed/managed-rule-code/rule_util.py delete mode 100644 rdk/template/runtime/python3.6-managed/managed-rule-definitions/SIMPLE_DELEGATE_PREPROD_TESTING.json delete mode 100644 rdk/template/runtime/python3.6-managed/tst/managed-rule-code/sample_test.py delete mode 100644 setup.py delete mode 100644 testing/windows-python2-buildspec.yaml create mode 100644 tox.ini diff --git a/.github/actions/dep-setup/action.yml b/.github/actions/dep-setup/action.yml new file mode 100644 index 00000000..8e5cc609 --- /dev/null +++ b/.github/actions/dep-setup/action.yml @@ -0,0 +1,63 @@ +name: Dependency Setup +description: 'Action to setup the runtime environment for CI jobs.' + +inputs: + python-version: + description: 'The Python version to be used during setup' + required: true + +runs: + using: "composite" + steps: + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '${{ inputs.python-version }}' + + - name: Cache Poetry + id: cache-poetry + uses: actions/cache@v3 + with: + path: ${{github.workspace}}/.poetry + key: poetry-self-${{ hashFiles('.github/workflows/*.yml') }} + restore-keys: poetry-self- + + - name: Install Poetry + if: steps.cache-poetry.outputs.cache-hit != 'true' + shell: bash + run: | + export POETRY_HOME=${{github.workspace}}/.poetry + curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/install-poetry.py -O + python install-poetry.py --preview + rm install-poetry.py + + - name: Add Poetry to $PATH + shell: bash + run: echo "${{github.workspace}}/.poetry/bin" >> $GITHUB_PATH + + - name: Add poethepoet plugin + shell: bash + run: poetry self add 'poethepoet[poetry_plugin]' + + - name: Poetry Version + shell: bash + run: poetry --version + + - name: Check pyproject.toml validity + shell: bash + run: poetry check --no-interaction + + - name: Cache Dependencies + id: cache-deps + uses: actions/cache@v3 + with: + path: ${{github.workspace}}/.venv + key: poetry-deps-${{ hashFiles('**/poetry.lock') }} + restore-keys: poetry-deps- + + - name: Install Deps + if: steps.cache-deps.cache-hit != 'true' + shell: bash + run: | + poetry config virtualenvs.in-project true + poetry install --no-interaction diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 00000000..abcc2d89 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,50 @@ +name: 'Publish Release' + +on: + push: + tags: + - '*' + +jobs: + publish: + name: Publish Release + runs-on: ubuntu-latest + steps: + - name: Checkout Source + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Dependencies + uses: './.github/actions/dep-setup' + with: + python-version: '3.10' + + - name: Run Safety Check + run: poetry poe safety + + - name: Get Python Module Version + run: | + MODULE_VERSION=$(poetry version --short) + echo "MODULE_VERSION=$MODULE_VERSION" >> $GITHUB_ENV + + - name: Verify Versions Match + run: | + TAG_VERSION=$(git describe HEAD --tags --abbrev=0) + echo "Git Tag Version: $TAG_VERSION" + echo "Python Module Version: $MODULE_VERSION" + if [[ "$TAG_VERSION" != "$MODULE_VERSION" ]]; then exit 1; fi + + - name: Publish to PyPi + run: poetry publish --build + env: + POETRY_PYPI_TOKEN_PYPI: ${{ secrets.POETRY_PYPI_TOKEN_PYPI }} + + - name: Release + uses: softprops/action-gh-release@v1 + with: + discussion_category_name: announcements + generate_release_notes: true + files: | + dist/rdk-${{env.MODULE_VERSION}}-py3-none-any.whl + dist/rdk-${{env.MODULE_VERSION}}.tar.gz diff --git a/.github/workflows/validate.yaml b/.github/workflows/validate.yaml new file mode 100644 index 00000000..c3684525 --- /dev/null +++ b/.github/workflows/validate.yaml @@ -0,0 +1,78 @@ +name: 'Validation' + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + ## TODO: Enable this once the repo is totally formatted to standard. + # lint-style: + # name: Linting and Styling + # runs-on: ubuntu-latest + # steps: + # - name: Checkout Source + # uses: actions/checkout@v3 + # with: + # fetch-depth: 0 + + # - name: Setup Dependencies + # uses: './.github/actions/dep-setup' + # with: + # python-version: '3.10' + + # - name: Run Styling Enforcement + # shell: bash + # run: poetry poe check + + # # TODO: As soon as the repo is in a state to enable this, we'll do so. + # - name: Run Style Linting Enforcement + # shell: bash + # run: poetry poe lint + + ## TODO: Enable unit tests via GH Actions when unit tests are fixed and migrated to pytest. + # unit-tests: + # name: Run Unit Tests + # strategy: + # matrix: + # version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + # os: [ubuntu-latest] + # runs-on: ${{ matrix.os }} + # steps: + # - name: Checkout Source + # uses: actions/checkout@v3 + # with: + # fetch-depth: 0 + + # - name: Setup Dependencies + # uses: './.github/actions/dep-setup' + # with: + # python-version: '${{ matrix.version }}' + + # - name: Run Tests + # shell: bash + # run: poetry poe test + + # - name: Codecov + # uses: codecov/codecov-action@v3 + + security: + name: Run Security Checks + runs-on: ubuntu-latest + steps: + - name: Checkout Source + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Dependencies + uses: './.github/actions/dep-setup' + with: + python-version: '3.10' + + - name: Run Security Checks + shell: bash + run: poetry poe safety diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 9fc445cc..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,29 +0,0 @@ -include README.rst -include rdk/template/* -include rdk/template/terraform/* -include rdk/template/terraform/0.11/* -include rdk/template/terraform/0.12/* -include rdk/template/example_ci/* -include rdk/template/runtime/* -include rdk/template/runtime/java8/* -include rdk/template/runtime/java8/jars/* -include rdk/template/runtime/java8/src/main/java/com/rdk/* -include rdk/template/runtime/nodejs4.3/* -include rdk/template/runtime/python3.7/* -include rdk/template/runtime/python3.7-lib/* -include rdk/template/runtime/python3.8/* -include rdk/template/runtime/python3.8-lib/* -include rdk/template/runtime/python3.9/* -include rdk/template/runtime/python3.9-lib/* -include rdk/template/runtime/dotnetcore1.0/* -include rdk/template/runtime/dotnetcore1.0/bin/* -include rdk/template/runtime/dotnetcore1.0/obj/* -include rdk/template/runtime/dotnetcore1.0/obj/Debug/* -include rdk/template/runtime/dotnetcore1.0/obj/Release/netcoreapp1.0/* -include rdk/template/runtime/dotnetcore1.0/obj/Release/netcoreapp2.0/* -include rdk/template/runtime/dotnetcore2.0/* -include rdk/template/runtime/dotnetcore2.0/bin/* -include rdk/template/runtime/dotnetcore2.0/obj/* -include rdk/template/runtime/dotnetcore2.0/obj/Debug/* -include rdk/template/runtime/dotnetcore2.0/obj/Release/netcoreapp1.0/* -include rdk/template/runtime/dotnetcore2.0/obj/Release/netcoreapp2.0/* diff --git a/NOTICE.txt b/NOTICE.txt index 5bad47f4..433f02db 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,2 +1,2 @@ rdk -Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2017-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/README.md b/README.md new file mode 100644 index 00000000..f7d851c5 --- /dev/null +++ b/README.md @@ -0,0 +1,562 @@ +# AWS RDK + +[![pypibadge](https://static.pepy.tech/personalized-badge/rdk?period=total&units=international_system&left_color=black&right_color=blue&left_text=downloads)](https://pepy.tech/project/rdk) +![PyPI](https://img.shields.io/pypi/v/rdk) + +AWS Config Rules Development Kit + +We greatly appreciate feedback and bug reports at +! You may also create an issue on this repo. + +The RDK is designed to support a "Compliance-as-Code" workflow that is +intuitive and productive. It abstracts away much of the undifferentiated +heavy lifting associated with deploying AWS Config rules backed by +custom lambda functions, and provides a streamlined +develop-deploy-monitor iterative process. + +For complete documentation, including command reference, check out the +[ReadTheDocs documentation](https://aws-config-rdk.readthedocs.io/). + +## Getting Started + +Uses Python 3.7+ and is installed via pip. Requires you to have +an AWS account and sufficient permissions to manage the Config service, +and to create S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy +Document that describes the minimum necessary permissions can be found +at `policy/rdk-minimum-permissions.json`. + +Under the hood, rdk uses boto3 to make API calls to AWS, so you can set +your credentials any way that boto3 recognizes (options 3 through 8 +[here](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#guide-credentials)) +or pass them in with the command-line parameters `--profile`, +`--region`, `--access-key-id`, or `--secret-access-key` + +If you just want to use the RDK, go ahead and install it using pip. + +```bash +pip install rdk +``` + +Alternately, if you want to see the code and/or contribute you can clone +the git repo, and then from the repo directory use pip to install the +package. Use the `-e` flag to generate symlinks so that any edits you +make will be reflected when you run the installed package. + +If you are going to author your Lambda functions using Java you will +need to have Java 8 and gradle installed. If you are going to author +your Lambda functions in C# you will need to have the dotnet CLI and the +.NET Core Runtime 1.08 installed. + +```bash +pip install -e . +``` + +To make sure the rdk is installed correctly, running the package from +the command line without any arguments should display help information. + +```bash +rdk +usage: rdk [-h] [-p PROFILE] [-k ACCESS_KEY_ID] [-s SECRET_ACCESS_KEY] + [-r REGION] [-f REGION_FILE] [--region-set REGION_SET] + [-v] ... +rdk: error: the following arguments are required: , +``` + +## Usage + +### Configure your env + +To use the RDK, it's recommended to create a directory that will be +your working directory. This should be committed to a source code repo, +and ideally created as a python virtualenv. In that directory, run the +`init` command to set up your AWS Config environment. + +```bash +rdk init +Running init! +Creating Config bucket config-bucket-780784666283 +Creating IAM role config-role +Waiting for IAM role to propagate +Config Service is ON +Config setup complete. +Creating Code bucket config-rule-code-bucket-780784666283ap-southeast-1 +``` + +Running `init` subsequent times will validate your AWS Config setup and +re-create any S3 buckets or IAM resources that are needed. + +- If you have config delivery bucket already present in some other AWS account then use `--config-bucket-exists-in-another-account` as argument. + +```bash +rdk init --config-bucket-exists-in-another-account +``` + +- If you have AWS Organizations/ControlTower Setup in your AWS environment then additionally, use `--control-tower` as argument. + +```bash +rdk init --control-tower --config-bucket-exists-in-another-account +``` + +- If bucket for custom lambda code is already present in current account then use `--skip-code-bucket-creation` argument. + +```bash +rdk init --skip-code-bucket-creation +``` + +- If you want rdk to create/update and upload the rdklib-layer for you, then use `--generate-lambda-layer` argument. In supported regions, rdk will deploy the layer using the Serverless Application Repository, otherwise it will build a local lambda layer archive and upload it for use. + +```bash +rdk init --generate-lambda-layer +``` + +- If you want rdk to give a custom name to the lambda layer for you, then use `--custom-layer-namer` argument. The Serverless Application Repository currently cannot be used for custom lambda layers. + +```bash +rdk init --generate-lambda-layer --custom-layer-name +``` + +## Create Rules + +In your working directory, use the `create` command to start creating a +new custom rule. You must specify the runtime for the lambda function +that will back the Rule, and you can also specify a resource type (or +comma-separated list of types) that the Rule will evaluate or a maximum +frequency for a periodic rule. This will add a new directory for the +rule and populate it with several files, including a skeleton of your +Lambda code. + +```bash +rdk create MyRule --runtime python3.10 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' +Running create! +Local Rule files created. +``` + +On Windows it is necessary to escape the double-quotes when specifying +input parameters, so the `--input-parameters` argument would instead +look something like this: + +`'{\"desiredInstanceType\":\"t2.micro\"}'` + +Note that you can create rules that use EITHER resource-types OR +maximum-frequency, but not both. We have found that rules that try to be +both event-triggered as well as periodic wind up being very complicated +and so we do not recommend it as a best practice. + +### Edit Rules Locally + +Once you have created the rule, edit the python file in your rule +directory (in the above example it would be `MyRule/MyRule.py`, but may +be deeper into the rule directory tree depending on your chosen Lambda +runtime) to add whatever logic your Rule requires in the +`evaluate_compliance` function. You will have access to the CI that was +sent by Config, as well as any parameters configured for the Config +Rule. Your function should return either a simple compliance status (one +of `COMPLIANT`, `NON_COMPLIANT`, or `NOT_APPLICABLE`), or if you're +using the python or node runtimes you can return a JSON object with +multiple evaluation responses that the RDK will send back to AWS Config. + +An example would look like: + +```python +for sg in response['SecurityGroups']: + evaluations.append( + { + 'ComplianceResourceType': 'AWS::EC2::SecurityGroup', + 'ComplianceResourceId': sg['GroupId'], + 'ComplianceType': 'COMPLIANT', + 'Annotation': 'This is an important note.', + 'OrderingTimestamp': str(datetime.datetime.now()) + }) +return evaluations +``` + +This is necessary for periodic rules that are not triggered by any CI +change (which means the CI that is passed in will be null), and also for +attaching annotations to your evaluation results. + +If you want to see what the JSON structure of a CI looks like for +creating your logic, you can use + +```bash +rdk sample-ci +``` + +to output a formatted JSON document. + +### Write and Run Unit Tests + +If you are writing Config Rules using either of the Python runtimes +there will be a `_test.py` file deployed along with your +Lambda function skeleton. This can be used to write unit tests according +to the standard Python unittest framework (documented +[here](https://docs.python.org/3/library/unittest.html)), which can be +run using the `test-local` rdk command: + +```bash +rdk test-local MyTestRule +Running local test! +Testing MyTestRule +Looking for tests in /Users/mborch/Code/rdk-dev/MyTestRule + +--------------------------------------------------------------------- + +Ran 0 tests in 0.000s + +OK + +``` + +The test file includes setup for the MagicMock library that can be used +to stub boto3 API calls if your rule logic will involve making API calls +to gather additional information about your AWS environment. For some +tips on how to do this, check out this blog post: +[Mock Is Magic](https://sgillies.net/2017/10/19/mock-is-magic.html) + +### Modify Rule + +If you need to change the parameters of a Config rule in your working +directory you can use the `modify` command. Any parameters you specify +will overwrite existing values, any that you do not specify will not be +changed. + +```bash +rdk modify MyRule --runtime python3.10 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' +Running modify! +Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. +``` + +Again, on Windows the input parameters would look like: + +`'{\"desiredInstanceType\":\"t2.micro\"}'` + +It is worth noting that until you actually call the `deploy` command +your rule only exists in your working directory, none of the Rule +commands discussed thus far actually makes changes to your account. + +### Deploy Rule + +Once you have completed your compliance validation code and set your +Rule's configuration, you can deploy the Rule to your account using the +`deploy` command. This will zip up your code (and the other associated +code files, if any) into a deployable package (or run a gradle build if +you have selected the java8 runtime or run the Lambda packaging step +from the dotnet CLI if you have selected the dotnetcore1.0 runtime), +copy that zip file to S3, and then launch or update a CloudFormation +stack that defines your Config Rule, Lambda function, and the necessary +permissions and IAM Roles for it to function. Since CloudFormation does +not deeply inspect Lambda code objects in S3 to construct its changeset, +the `deploy` command will also directly update the Lambda function for +any subsequent deployments to make sure code changes are propagated +correctly. + +```bash +rdk deploy MyRule +Running deploy! +Zipping MyRule +Uploading MyRule +Creating CloudFormation Stack for MyRule +Waiting for CloudFormation stack operation to complete... +... +Waiting for CloudFormation stack operation to complete... +Config deploy complete. +``` + +The exact output will vary depending on Lambda runtime. You can use the +`--all` flag to deploy all of the rules in your working directory. If +you used the `--generate-lambda-layer` flag in rdk init, use the +`--generated-lambda-layer` flag for rdk deploy. + +### Deploy Organization Rule + +You can also deploy the Rule to your AWS Organization using the +`deploy-organization` command. For successful evaluation of custom rules +in child accounts, please make sure you do one of the following: + +1. Set ASSUME_ROLE_MODE in Lambda code to True, to get the Lambda to assume the Role attached on the Config Service and confirm that the role trusts the master account where the Lambda function is going to be deployed. +2. Set ASSUME_ROLE_MODE in Lambda code to True, to get the Lambda to assume a custom role and define an optional parameter with key as ExecutionRoleName and set the value to your custom role name; confirm that the role trusts the master account of the organization where the Lambda function will be deployed. + +```bash +rdk deploy-organization MyRule +Running deploy! +Zipping MyRule +Uploading MyRule +Creating CloudFormation Stack for MyRule +Waiting for CloudFormation stack operation to complete... +... +Waiting for CloudFormation stack operation to complete... +Config deploy complete. +``` + +The exact output will vary depending on Lambda runtime. You can use the +`--all` flag to deploy all of the rules in your working directory. This +command uses `PutOrganizationConfigRule` API for the rule deployment. If +a new account joins an organization, the rule is deployed to that +account. When an account leaves an organization, the rule is removed. +Deployment of existing organizational AWS Config Rules will only be +retried for 7 hours after an account is added to your organization if a +recorder is not available. You are expected to create a recorder if one +doesn't exist within 7 hours of adding an account to your organization. + +### View Logs For Deployed Rule + +Once the Rule has been deployed to AWS you can get the CloudWatch logs +associated with your Lambda function using the `logs` command. + +```bash +rdk logs MyRule -n 5 +2017-11-15 22:59:33 - START RequestId: 96e7639a-ca15-11e7-95a2-b1521890638d Version: $LATEST +2017-11-15 23:41:13 - REPORT RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Duration: 0.50 ms Billed Duration: 100 ms Memory Size: 256 MB Max Memory Used: 36 MB +2017-11-15 23:41:13 - END RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda +2017-11-15 23:41:13 - Default RDK utility class does not yet support Scheduled Notifications. +2017-11-15 23:41:13 - START RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Version: $LATEST +``` + +You can use the `-n` and `-f` command line flags just like the UNIX +`tail` command to view a larger number of log events and to continuously +poll for new events. The latter option can be useful in conjunction with +manually initiating Config Evaluations for your deploy Config Rule to +make sure it is behaving as expected. + +## Running the tests + +The `testing` directory contains scripts and buildspec files that I use +to run basic functionality tests across a variety of CLI environments +(currently Ubuntu Linux running Python 3.7/3.8/3.9/3.10, and Windows Server +running Python 3.10). If there is interest I can release a CloudFormation +template that could be used to build the test environment, let me know +if this is something you want! + +## Advanced Features + +### Cross-Account Deployments + +Features have been added to the RDK to facilitate the cross-account +deployment pattern that enterprise customers have standardized for +custom Config Rules. A cross-account architecture is one in which the +Lambda functions are deployed to a single central "Compliance" account +(which may be the same as a central "Security" account), and the +Config Rules are deployed to any number of "Satellite" accounts that +are used by other teams or departments. This gives the compliance team +confidence that their rule logic cannot be tampered with and makes it +much easier for them to modify rule logic without having to go through a +complex deployment process to potentially hundreds of AWS accounts. The +cross-account pattern uses two advanced RDK features: + +- `--functions-only` (`-f`) deployment +- `create-rule-template` command + +#### Functions-Only Deployment + +By using the `-f` or `--functions-only` flag on the `deploy` command the +RDK will deploy only the necessary Lambda Functions, Lambda Execution +Role, and Lambda Permissions to the account specified by the execution +credentials. It accomplishes this by batching up all of the Lambda +function CloudFormation snippets for the selected Rule(s) into a single +dynamically generated template and deploy that CloudFormation template. +One consequence of this is that subsequent deployments that specify a +different set of rules for the same stack name will update that +CloudFormation stack, and any Rules that were included in the first +deployment but not in the second will be removed. You can use the +`--stack-name` parameter to override the default CloudFormation stack +name if you need to manage different subsets of your Lambda Functions +independently. The intended usage is to deploy the functions for all of +the Config rules in the Security/Compliance account, which can be done +simply by using `rdk deploy -f --all` from your working directory. + +#### create-rule-template command + +This command generates a CloudFormation template that defines the AWS +Config rules themselves, along with the Config Role, Config data bucket, +Configuration Recorder, and Delivery channel necessary for the Config +rules to work in a satellite account. You must specify the file name for +the generated template using the `--output-file` or +`-o` command line flags. The generated template takes a +single parameter of the AccountID of the central compliance account that +contains the Lambda functions that will back your custom Config Rules. +The generated template can be deployed in the desired satellite accounts +through any of the means that you can deploy any other CloudFormation +template, including the console, the CLI, as a CodePipeline task, or +using StackSets. The `create-rule-template` command takes all of the +standard arguments for selecting Rules to include in the generated +template, including lists of individual Rule names, an `--all` flag, or +using the RuleSets feature described below. + +```bash +rdk create-rule-template -o remote-rule-template.json --all +Generating CloudFormation template! +CloudFormation template written to remote-rule-template.json +``` + +### Disable the supported resource types check + +It is now possible to define a resource type that is not yet supported +by rdk. To disable the supported resource check use the optional flag +'--skip-supported-resource-check' during the create command. + +```bash +rdk create MyRule --runtime python3.10 --resource-types AWS::New::ResourceType --skip-supported-resource-check +'AWS::New::ResourceType' not found in list of accepted resource types. +Skip-Supported-Resource-Check Flag set (--skip-supported-resource-check), ignoring missing resource type error. +Running create! +Local Rule files created. +``` + +### Custom Lambda Function Name + +As of version 0.7.14, instead of defaulting the lambda function names to +`RDK-Rule-Function-` it is possible to customize the name for +the Lambda function to any 64 characters string as per Lambda's naming +standards using the optional `--custom-lambda-name` flag while +performing `rdk create`. This opens up new features like : + +1. Longer config rule name. +2. Custom lambda function naming as per personal or enterprise standards. + +```bash +rdk create MyLongerRuleName --runtime python3.10 --resource-types AWS::EC2::Instance --custom-lambda-name custom-prefix-for-MyLongerRuleName +Running create! +Local Rule files created. +``` + +The above example would create files with config rule name as +`MyLongerRuleName` and lambda function with the name +`custom-prefix-for-MyLongerRuleName` instead of +`RDK-Rule-Function-MyLongerRuleName` + +### RuleSets + +New as of version 0.3.11, it is possible to add RuleSet tags to rules +that can be used to deploy and test groups of rules together. Rules can +belong to multiple RuleSets, and RuleSet membership is stored only in +the parameters.json metadata. The [deploy]{.title-ref}, +[create-rule-template]{.title-ref}, and [test-local]{.title-ref} +commands are RuleSet-aware such that a RuleSet can be passed in as the +target instead of [--all]{.title-ref} or a specific named Rule. + +A comma-delimited list of RuleSets can be added to a Rule when you +create it (using the `--rulesets` flag), as part of a `modify` command, +or using new `ruleset` subcommands to add or remove individual rules +from a RuleSet. + +Running `rdk rulesets list` will display a list of the RuleSets +currently defined across all of the Rules in the working directory + +```bash +rdk rulesets list +RuleSets: AnotherRuleSet MyNewSet +``` + +Naming a specific RuleSet will list all of the Rules that are part of +that RuleSet. + +```bash +rdk rulesets list AnotherRuleSet +Rules in AnotherRuleSet : RSTest +``` + +Rules can be added to or removed from RuleSets using the `add` and +`remove` subcommands: + +```bash +rdk rulesets add MyNewSet RSTest +RSTest added to RuleSet MyNewSet + +rdk rulesets remove AnotherRuleSet RSTest +RSTest removed from RuleSet AnotherRuleSet +``` + +RuleSets are a convenient way to maintain a single repository of Config +Rules that may need to have subsets of them deployed to different +environments. For example your development environment may contain some +of the Rules that you run in Production but not all of them; RuleSets +gives you a way to identify and selectively deploy the appropriate Rules +to each environment. + +### Managed Rules + +The RDK is able to deploy AWS Managed Rules. + +To do so, create a rule using `rdk create` and provide a valid +SourceIdentifier via the `--source-identifier` CLI option. The list of +Managed Rules can be found +[here](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html) +, and note that the Identifier can be obtained by replacing the dashes +with underscores and using all capitals (for example, the +"guardduty-enabled-centralized" rule has the SourceIdentifier +"GUARDDUTY_ENABLED_CENTRALIZED"). Just like custom Rules you will need +to specify source events and/or a maximum evaluation frequency, and also +pass in any Rule parameters. The resulting Rule directory will contain +only the parameters.json file, but using `rdk deploy` or +`rdk create-rule-template` can be used to deploy the Managed Rule like +any other Custom Rule. + +### Deploying Rules Across Multiple Regions + +The RDK is able to run init/deploy/undeploy across multiple regions with +a `rdk -f -t ` + +If no region group is specified, rdk will deploy to the `default` region +set. + +To create a sample starter region group, run `rdk create-region-set` to +specify the filename, add the `-o ` this +will create a region set with the following tests and regions +`"default":["us-east-1","us-west-1","eu-north-1","ap-east-1"],"aws-cn-region-set":["cn-north-1","cn-northwest-1"]` + +### Using RDK to Generate a Lambda Layer in a region (Python3) + +By default `rdk init --generate-lambda-layer` will generate an rdklib +lambda layer while running init in whatever region it is run, to force +re-generation of the layer, run `rdk init --generate-lambda-layer` again +over a region + +To use this generated lambda layer, add the flag +`--generated-lambda-layer` when running `rdk deploy`. For example: +`rdk -f regions.yaml deploy LP3_TestRule_P39_lib --generated-lambda-layer` + +If you created layer with a custom name (by running +`rdk init --custom-lambda-layer`, add a similar `custom-lambda-layer` +flag when running deploy. + +## Support & Feedback + +This project is maintained by AWS Solution Architects and Consultants. +It is not part of an AWS service and support is provided best-effort by +the maintainers. To post feedback, submit feature ideas, or report bugs, +please use the [Issues +section](https://github.com/awslabs/aws-config-rdk/issues) of this repo. + +## Contributing + +email us at if you have any questions. We +are happy to help and discuss. + +## Contacts + +- **Benjamin Morris** - [bmorrissirromb](https://github.com/bmorrissirromb) - _current maintainer_ +- **Julio Delgado Jr** - [tekdj7](https://github.com/tekdj7) - _current maintainer_ + +## Past Contributors + +- **Michael Borchert** - _Original Python version_ +- **Jonathan Rault** - _Original Design, testing, feedback_ +- **Greg Kim and Chris Gutierrez** - _Initial work and CI definitions_ +- **Henry Huang** - _Original CFN templates and other code_ +- **Santosh Kumar** - _maintainer_ +- **Jose Obando** - _maintainer_ +- **Jarrett Andrulis** - [jarrettandrulis](https://github.com/jarrettandrulis) - _maintainer_ +- **Sandeep Batchu** - [batchus](https://github.com/batchus) - _maintainer_ +- **Mark Beacom** - [mbeacom](https://github.com/mbeacom) - _maintainer_ +- **Ricky Chau** - [rickychau2780](https://github.com/rickychau2780) - _maintainer_ + +## License + +This project is licensed under the Apache 2.0 License + +## Acknowledgments + +- the boto3 team makes all of this magic possible. + +## Link + +- to view example of rules built with the RDK: [https://github.com/awslabs/aws-config-rules/tree/master/python](https://github.com/awslabs/aws-config-rules/tree/master/python) diff --git a/README.rst b/README.rst deleted file mode 100644 index 5009d902..00000000 --- a/README.rst +++ /dev/null @@ -1,355 +0,0 @@ -rdk -=== -Rule Development Kit - -We greatly appreciate feedback and bug reports at rdk-maintainers@amazon.com! You may also create an issue on this repo. - -The RDK is designed to support a "Compliance-as-Code" workflow that is intuitive and productive. It abstracts away much of the undifferentiated heavy lifting associated with deploying AWS Config rules backed by custom lambda functions, and provides a streamlined develop-deploy-monitor iterative process. - -For complete documentation, including command reference, check out the `ReadTheDocs documentation `_. - -Getting Started -=============== -Uses python 3.7/3.8/3.9 and is installed via pip. Requires you to have an AWS account and sufficient permissions to manage the Config service, and to create S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy Document that describes the minimum necessary permissions can be found at policy/rdk-minimum-permissions.json. - -Under the hood, rdk uses boto3 to make API calls to AWS, so you can set your credentials any way that boto3 recognizes (options 3 through 8 here: http://boto3.readthedocs.io/en/latest/guide/configuration.html) or pass them in with the command-line parameters --profile, --region, --access-key-id, or --secret-access-key - -If you just want to use the RDK, go ahead and install it using pip:: - -$ pip install rdk - -Alternately, if you want to see the code and/or contribute you can clone the git repo, and then from the repo directory use pip to install the package. Use the '-e' flag to generate symlinks so that any edits you make will be reflected when you run the installed package. - -If you are going to author your Lambda functions using Java you will need to have Java 8 and gradle installed. If you are going to author your Lambda functions in C# you will need to have the dotnet CLI and the .NET Core Runtime 1.08 installed. -:: - - $ pip install -e . - -To make sure the rdk is installed correctly, running the package from the command line without any arguments should display help information. - -:: - - $ rdk - usage: rdk [-h] [-p PROFILE] [-k ACCESS_KEY] [-s SECRET_ACCESS_KEY] - [-r REGION] - ... - rdk: error: the following arguments are required: , - - -Usage -===== - -Configure your env ------------------- -To use the RDK, it's recommended to create a directory that will be your working directory. This should be committed to a source code repo, and ideally created as a python virtualenv. In that directory, run the ``init`` command to set up your AWS Config environment. - -:: - - $ rdk init - Running init! - Creating Config bucket config-bucket-780784666283 - Creating IAM role config-role - Waiting for IAM role to propagate - Config Service is ON - Config setup complete. - Creating Code bucket config-rule-code-bucket-780784666283ap-southeast-1 - -Running ``init`` subsequent times will validate your AWS Config setup and re-create any S3 buckets or IAM resources that are needed. - -- If you have config delivery bucket already present in some other AWS account then use **--config-bucket-exists-in-another-account** as argument::: - - $ rdk init --config-bucket-exists-in-another-account -- If you have AWS Organizations/ControlTower Setup in your AWS environment then additionally, use **--control-tower** as argument::: - - $ rdk init --control-tower --config-bucket-exists-in-another-account -- If bucket for custom lambda code is already present in current account then use **--skip-code-bucket-creation** argument::: - - $ rdk init --skip-code-bucket-creation - -- If you want rdk to create/update and upload the rdklib-layer for you, then use **--generate-lambda-layer** argument. In supported regions, rdk will deploy the layer using the Serverless Application Repository, otherwise it will build a local lambda layer archive and upload it for use::: - - $ rdk init --generate-lambda-layer -- If you want rdk to give a custom name to the lambda layer for you, then use **--custom-layer-namer** argument. The Serverless Application Repository currently cannot be used for custom lambda layers.::: - - $ rdk init --generate-lambda-layer --custom-layer-name - -Create Rules ------------- -In your working directory, use the ``create`` command to start creating a new custom rule. You must specify the runtime for the lambda function that will back the Rule, and you can also specify a resource type (or comma-separated list of types) that the Rule will evaluate or a maximum frequency for a periodic rule. This will add a new directory for the rule and populate it with several files, including a skeleton of your Lambda code. - -:: - - $ rdk create MyRule --runtime python3.8 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running create! - Local Rule files created. - -On Windows it is necessary to escape the double-quotes when specifying input parameters, so the `--input-parameters` argument would instead look something like this:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -Note that you can create rules that use EITHER resource-types OR maximum-frequency, but not both. We have found that rules that try to be both event-triggered as well as periodic wind up being very complicated and so we do not recommend it as a best practice. - -Edit Rules Locally ---------------------------- -Once you have created the rule, edit the python file in your rule directory (in the above example it would be ``MyRule/MyRule.py``, but may be deeper into the rule directory tree depending on your chosen Lambda runtime) to add whatever logic your Rule requires in the ``evaluate_compliance`` function. You will have access to the CI that was sent by Config, as well as any parameters configured for the Config Rule. Your function should return either a simple compliance status (one of ``COMPLIANT``, ``NONCOMPLIANT``, or ``NOT_APPLICABLE``), or if you're using the python or node runtimes you can return a JSON object with multiple evaluation responses that the RDK will send back to AWS Config. An example would look like:: - - for sg in response['SecurityGroups']: - evaluations.append( - { - 'ComplianceResourceType': 'AWS::EC2::SecurityGroup', - 'ComplianceResourceId': sg['GroupId'], - 'ComplianceType': 'COMPLIANT', - 'Annotation': 'This is an important note.', - 'OrderingTimestamp': str(datetime.datetime.now()) - }) - - - return evaluations - -This is necessary for periodic rules that are not triggered by any CI change (which means the CI that is passed in will be null), and also for attaching annotations to your evaluation results. - -If you want to see what the JSON structure of a CI looks like for creating your logic, you can use - -:: - -$ rdk sample-ci - -to output a formatted JSON document. - -Write and Run Unit Tests ------------------------- -If you are writing Config Rules using either of the Python runtimes there will be a _test.py file deployed along with your Lambda function skeleton. This can be used to write unit tests according to the standard Python unittest framework (documented here: https://docs.python.org/3/library/unittest.html), which can be run using the `test-local` rdk command:: - - $ rdk test-local MyTestRule - Running local test! - Testing MyTestRule - Looking for tests in /Users/mborch/Code/rdk-dev/MyTestRule - - --------------------------------------------------------------------- - - Ran 0 tests in 0.000s - - OK - - -The test file includes setup for the MagicMock library that can be used to stub boto3 API calls if your rule logic will involve making API calls to gather additional information about your AWS environment. For some tips on how to do this, check out this blog post: https://sgillies.net/2017/10/19/mock-is-magic.html - -Modify Rule ------------ -If you need to change the parameters of a Config rule in your working directory you can use the ``modify`` command. Any parameters you specify will overwrite existing values, any that you do not specify will not be changed. - -:: - - $ rdk modify MyRule --runtime python3.9 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running modify! - Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. - -Again, on Windows the input parameters would look like:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -It is worth noting that until you actually call the ``deploy`` command your rule only exists in your working directory, none of the Rule commands discussed thus far actually makes changes to your account. - -Deploy Rule ------------ -Once you have completed your compliance validation code and set your Rule's configuration, you can deploy the Rule to your account using the ``deploy`` command. This will zip up your code (and the other associated code files, if any) into a deployable package (or run a gradle build if you have selected the java8 runtime or run the lambda packaging step from the dotnet CLI if you have selected the dotnetcore1.0 runtime), copy that zip file to S3, and then launch or update a CloudFormation stack that defines your Config Rule, Lambda function, and the necessary permissions and IAM Roles for it to function. Since CloudFormation does not deeply inspect Lambda code objects in S3 to construct its changeset, the ``deploy`` command will also directly update the Lambda function for any subsequent deployments to make sure code changes are propagated correctly. - -:: - - $ rdk deploy MyRule - Running deploy! - Zipping MyRule - Uploading MyRule - Creating CloudFormation Stack for MyRule - Waiting for CloudFormation stack operation to complete... - ... - Waiting for CloudFormation stack operation to complete... - Config deploy complete. - -The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. If you used the --generate-lambda-layer flag in rdk init, use the --generated-lambda-layer flag for rdk deploy. - -Deploy Organization Rule ------------------------- -You can also deploy the Rule to your AWS Organization using the ``deploy-organization`` command. -For successful evaluation of custom rules in child accounts, please make sure you do one of the following: - -1. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume the Role attached on the Config Service and confirm that the role trusts the master account where the Lambda function is going to be deployed. -2. Set ASSUME_ROLE_MODE in Lambda code to True, to get the lambda to assume a custom role and define an optional parameter with key as ExecutionRoleName and set the value to your custom role name; confirm that the role trusts the master account of the organization where the Lambda function will be deployed. - -:: - - $ rdk deploy-organization MyRule - Running deploy! - Zipping MyRule - Uploading MyRule - Creating CloudFormation Stack for MyRule - Waiting for CloudFormation stack operation to complete... - ... - Waiting for CloudFormation stack operation to complete... - Config deploy complete. - -The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. -This command uses 'PutOrganizationConfigRule' API for the rule deployment. If a new account joins an organization, the rule is deployed to that account. When an account leaves an organization, the rule is removed. Deployment of existing organizational AWS Config Rules will only be retried for 7 hours after an account is added to your organization if a recorder is not available. You are expected to create a recorder if one doesn't exist within 7 hours of adding an account to your organization. - -View Logs For Deployed Rule ---------------------------- -Once the Rule has been deployed to AWS you can get the CloudWatch logs associated with your lambda function using the ``logs`` command. - -:: - - $ rdk logs MyRule -n 5 - 2017-11-15 22:59:33 - START RequestId: 96e7639a-ca15-11e7-95a2-b1521890638d Version: $LATEST - 2017-11-15 23:41:13 - REPORT RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Duration: 0.50 ms Billed Duration: 100 ms Memory Size: 256 MB - Max Memory Used: 36 MB - 2017-11-15 23:41:13 - END RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda - 2017-11-15 23:41:13 - Default RDK utility class does not yet support Scheduled Notifications. - 2017-11-15 23:41:13 - START RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Version: $LATEST - -You can use the ``-n`` and ``-f`` command line flags just like the UNIX ``tail`` command to view a larger number of log events and to continuously poll for new events. The latter option can be useful in conjunction with manually initiating Config Evaluations for your deploy Config Rule to make sure it is behaving as expected. - - - -Running the tests -================= - -The `testing` directory contains scripts and buildspec files that I use to run basic functionality tests across a variety of CLI environments (currently Ubuntu linux running python 3.7/3.8/3.9, and Windows Server running python3.9). If there is interest I can release a CloudFormation template that could be used to build the test environment, let me know if this is something you want! - - -Advanced Features -================= -Cross-Account Deployments -------------------------- -Features have been added to the RDK to facilitate the cross-account deployment pattern that enterprise customers have standardized on for custom Config Rules. A cross-account architecture is one in which the Lambda functions are deployed to a single central "Compliance" account (which may be the same as a central "Security" account), and the Config Rules are deployed to any number of "Satellite" accounts that are used by other teams or departments. This gives the compliance team confidence that their Rule logic cannot be tampered with and makes it much easier for them to modify rule logic without having to go through a complex deployment process to potentially hundreds of AWS accounts. The cross-account pattern uses two advanced RDK features - functions-only deployments and the `create-rule-template` command. - -**Function-Only Deployment** - -By using the `-f` or `--functions-only` flag on the `deploy` command the RDK will deploy only the necessary Lambda Functions, Lambda Execution Role, and Lambda Permissions to the account specified by the execution credentials. It accomplishes this by batching up all of the Lambda function CloudFormation snippets for the selected Rule(s) into a single dynamically generated template and deploy that CloudFormation template. One consequence of this is that subsequent deployments that specify a different set of Rules for the same stack name will update that CloudFormation stack, and any Rules that were included in the first deployment but not in the second will be removed. You can use the `--stack-name` parameter to override the default CloudFormation stack name if you need to manage different subsets of your Lambda Functions independently. The intended usage is to deploy the functions for all of the Config rules in the Security/Compliance account, which can be done simply by using `rdk deploy -f --all` from your working directory. - -**`create-rule-template` command** - -This command generates a CloudFormation template that defines the AWS Config rules themselves, along with the Config Role, Config data bucket, Configuration Recorder, and Delivery channel necessary for the Config rules to work in a satellite account. You must specify the file name for the generated template using the `--output-file` or `o` command line flags. The generated template takes a single parameter of the AccountID of the central compliance account that contains the Lambda functions that will back your custom Config Rules. The generated template can be deployed in the desired satellite accounts through any of the means that you can deploy any other CloudFormation template, including the console, the CLI, as a CodePipeline task, or using StackSets. The `create-rule-template` command takes all of the standard arguments for selecting Rules to include in the generated template, including lists of individual Rule names, an `--all` flag, or using the RuleSets feature described below. - -:: - - $ rdk create-rule-template -o remote-rule-template.json --all - Generating CloudFormation template! - CloudFormation template written to remote-rule-template.json - - -Disable the supported resource types check ------------------------------------------- -It is now possible to define a resource type that is not yet supported by rdk. To disable the supported resource check use the optional flag '--skip-supported-resource-check' during the create command. - -:: - - $ rdk create MyRule --runtime python3.8 --resource-types AWS::New::ResourceType --skip-supported-resource-check - 'AWS::New::ResourceType' not found in list of accepted resource types. - Skip-Supported-Resource-Check Flag set (--skip-supported-resource-check), ignoring missing resource type error. - Running create! - Local Rule files created. - -Custom Lambda Function Name ---------------------------- -As of version 0.7.14, instead of defaulting the lambda function names to 'RDK-Rule-Function-' it is possible to customize the name for the Lambda function to any 64 characters string as per Lambda's naming standards using the optional '--custom-lambda-name' flag while performing rdk create. This opens up new features like : - -1. Longer config rule name. -2. Custom lambda function naming as per personal or enterprise standards. - -:: - - $ rdk create MyLongerRuleName --runtime python3.8 --resource-types AWS::EC2::Instance --custom-lambda-name custom-prefix-for-MyLongerRuleName - Running create! - Local Rule files created. - -The above example would create files with config rule name as 'MyLongerRuleName' and lambda function with the name 'custom-prefix-for-MyLongerRuleName' instead of 'RDK-Rule-Function-MyLongerRuleName' - -RuleSets --------- -New as of version 0.3.11, it is possible to add RuleSet tags to rules that can be used to deploy and test groups of rules together. Rules can belong to multiple RuleSets, and RuleSet membership is stored only in the parameters.json metadata. The `deploy`, `create-rule-template`, and `test-local` commands are RuleSet-aware such that a RuleSet can be passed in as the target instead of `--all` or a specific named Rule. - -A comma-delimited list of RuleSets can be added to a Rule when you create it (using the `--rulesets` flag), as part of a `modify` command, or using new `ruleset` subcommands to add or remove individual rules from a RuleSet. - -Running `rdk rulesets list` will display a list of the RuleSets currently defined across all of the Rules in the working directory - -:: - - rdk-dev $ rdk rulesets list - RuleSets: AnotherRuleSet MyNewSet - -Naming a specific RuleSet will list all of the Rules that are part of that RuleSet. - -:: - - rdk-dev $ rdk rulesets list AnotherRuleSet - Rules in AnotherRuleSet : RSTest - -Rules can be added to or removed from RuleSets using the `add` and `remove` subcommands: - -:: - - rdk-dev $ rdk rulesets add MyNewSet RSTest - RSTest added to RuleSet MyNewSet - - rdk-dev $ rdk rulesets remove AnotherRuleSet RSTest - RSTest removed from RuleSet AnotherRuleSet - -RuleSets are a convenient way to maintain a single repository of Config Rules that may need to have subsets of them deployed to different environments. For example your development environment may contain some of the Rules that you run in Production but not all of them; RuleSets gives you a way to identify and selectively deploy the appropriate Rules to each environment. - -Managed Rules -------------- -The RDK is able to deploy AWS Managed Rules. - -To do so, create a rule using "rdk create" and provide a valid SourceIdentifier via the --source-identifier CLI option. The list of Managed Rules can be found here: https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html, and note that the Identifier can be obtained by replacing the dashes with underscores and using all capitals (for example, the "guardduty-enabled-centralized" rule has the SourceIdentifier "GUARDDUTY_ENABLED_CENTRALIZED"). Just like custom Rules you will need to specify source events and/or a maximum evaluation frequency, and also pass in any Rule parameters. The resulting Rule directory will contain only the parameters.json file, but using `rdk deploy` or `rdk create-rule-template` can be used to deploy the Managed Rule like any other Custom Rule. - -Deploying Rules Across Multiple Regions ---------------------------------------- -The RDK is able to run init/deploy/undeploy across multiple regions with a `rdk -f -t ` - -If no region group is specified, rdk will deploy to the `default` region set - -To create a sample starter region group, run `rdk create-region-set` to specify the filename, add the `-o ` this will create a region set with the following tests and regions `"default":["us-east-1","us-west-1","eu-north-1","ap-east-1"],"aws-cn-region-set":["cn-north-1","cn-northwest-1"]` - -Using RDK to Generate a Lambda Layer in a region (Python3) ----------------------------------------------------------- -By default `rdk init --generate-lambda-layer` will generate an rdklib lambda layer while running init in whatever region it is run, to force re-generation of the layer, run `rdk init --generate-lambda-layer` again over a region - -To use this generated lambda layer, add the flag `--generated-lambda-layer` when running `rdk deploy`. For example: `rdk -f regions.yaml deploy LP3_TestRule_P39_lib --generated-lambda-layer` - -If you created layer with a custom name (by running `rdk init --custom-lambda-layer`, add a similar `custom-lambda-layer` flag when running deploy. - -Contributing -============ - -email us at rdk-maintainers@amazon.com if you have any questions. We are happy to help and discuss. - -Contacts -======== -* **Ricky Chau** - `rickychau2780 `_ - *current maintainer* -* **Jarrett Andrulis** - `jarrettandrulis `_ - *current maintainer* -* **Julio Delgado Jr** - `tekdj7 `_ - *current maintainer* -* **Sandeep Batchu** - `batchus `_ - *current maintainer* - -Past Contributors -================= -* **Michael Borchert** - *Orignal Python version* -* **Jonathan Rault** - *Orignal Design, testing, feedback* -* **Greg Kim and Chris Gutierrez** - *Initial work and CI definitions* -* **Henry Huang** - *Original CFN templates and other code* -* **Santosh Kumar** - *maintainer* -* **Jose Obando** - *maintainer* - -License -======= - -This project is licensed under the Apache 2.0 License - -Acknowledgments -=============== - -* the boto3 team makes all of this magic possible. - - -Link -==== - -* to view example of rules built with the RDK: https://github.com/awslabs/aws-config-rules/tree/master/python diff --git a/developer_notes.md b/developer_notes.md new file mode 100644 index 00000000..4702f5e6 --- /dev/null +++ b/developer_notes.md @@ -0,0 +1,34 @@ +# Developer Notes + +These notes are intended to help RDK developers update the repository consistently. + +## New Runtime Support Process + +These instructions document the parts of the repository that need to be updated when support for a new Lambda runtime is added. + +### Update pyproject.toml + +- Add to `classifiers` list: + +```yaml +"Programming Language :: Python :: ," +``` + +- Add to `include` list: + +```yaml +"rdk/template/runtime/python/*", +"rdk/template/runtime/python-lib/*", +``` + +### Update README.md + +- Update documentation and examples + +### Update rdk.py + +- Update references to include new version + +### Update Linux and Windows Buildspec files (`testing` folder) + +- Add new test cases for the new version diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 404b1e58..00000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SPHINXPROJ = RDK -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/_static/argparse.css b/docs/_static/argparse.css deleted file mode 100644 index 0c679a52..00000000 --- a/docs/_static/argparse.css +++ /dev/null @@ -1,13 +0,0 @@ -/* override table width restrictions */ -/*@media screen and (min-width: 767px) { - .wy-table-responsive table td { - white-space: normal !important; - } - .wy-table-responsive { - overflow: visible !important; - } -} - -.highlight { - overflow: visible !important -}*/ diff --git a/docs/commands/clean.md b/docs/commands/clean.md new file mode 100644 index 00000000..beae816b --- /dev/null +++ b/docs/commands/clean.md @@ -0,0 +1,7 @@ +# Clean + +The `clean` command is the inverse of the `init` command, and can be +used to completely remove Config resources from an account, including +the Configuration Recorder, Delivery Channel, S3 buckets, Roles, and +Permissions. This is useful for testing account provisioning automation +and for running automated tests in a clean environment. diff --git a/docs/commands/create-rule-template.md b/docs/commands/create-rule-template.md new file mode 100644 index 00000000..4a119a5c --- /dev/null +++ b/docs/commands/create-rule-template.md @@ -0,0 +1,28 @@ +# Create-Rule-Template + +Generates and saves to a file a single CloudFormation template that can +be used to deploy the specified Rule(s) into any account. This feature +has two primary uses: + +- Multi-account Config setup in which the Lambda Functions for custom rules are deployed into a centralized \"security\" or \"compliance\" account and the Config Rules themselves are deployed into \"application\" or \"satellite\" accounts. +- Combine many Config Rules into a single CloudFormation template for easier atomic deployment and management. + +The generated CloudFormation template includes a Parameter for the +AccountID that contains the Lambda functions that provide the compliance +logic for the Rules, and also exposes all of the Config Rule input +parameters as CloudFormation stack parameters. + +By default the generated CloudFormation template will set up Config as +per the settings used by the RDK `init` command, but those resources can +be omitted using the `--rules-only` flag. + +The `--config-role-arn` flag can be used for assigning existing config +role to the created Configuration Recorder. The +`-t | --tag-config-rules-script ` can now be used for output +the script generated for create tags for each config rule. + +As of version 0.6, RDK supports Config remediation. Note that in order +to use SSM documents for remediation you must supply all of the +necessary document parameters. These can be found in the SSM document +listing on the AWS console, but RDK will *not* validate at rule creation +that you have all of the necessary parameters supplied. diff --git a/docs/commands/create.md b/docs/commands/create.md new file mode 100644 index 00000000..a9a205fc --- /dev/null +++ b/docs/commands/create.md @@ -0,0 +1,7 @@ +# Create + +As of version 0.6, RDK supports Config remediation. Note that in order +to use SSM documents for remediation you must supply all of the +necessary document parameters. These can be found in the SSM document +listing on the AWS console, but RDK will *not* validate at rule creation +that you have all of the necessary parameters supplied. diff --git a/docs/commands/deploy.md b/docs/commands/deploy.md new file mode 100644 index 00000000..43c9f8f9 --- /dev/null +++ b/docs/commands/deploy.md @@ -0,0 +1,47 @@ +# Deploy + +This command will deploy the specified Rule(s) to the Account and Region +determined by the credentials being used to execute the command, and the +value of the AWS_DEFAULT_REGION environment variable, unless those +credentials or region are overridden using the common flags. + +Once deployed, RDK will **not** explicitly start a Rule evaluation. +Depending on the changes being made to your Config Rule setup AWS Config +may re-evaluate the deployed Rules automatically, or you can run an +evaluation using the AWS configservice CLI. + +The `--functions-only` flag can be used as part of a multi-account +deployment strategy to push **only** the Lambda functions (and +necessary Roles and Permssions) to the target account. This is intended +to be used in conjunction with the `create-rule-template` command in +order to separate the compliance logic from the evaluated accounts. For +an example of how this looks in practice, check out the [AWS +Compliance-as-Code +Engine](https://github.com/awslabs/aws-config-engine-for-compliance-as-code/). +The `--rdklib-layer-arn` flag can be used for attaching Lambda Layer ARN +that contains the desired rdklib. Note that Lambda Layers are +region-specific. The `--lambda-role-arn` flag can be used for assigning +existing iam role to all Lambda functions created for Custom Config +Rules. The `--lambda-layers` flag can be used for attaching a +comma-separated list of Lambda Layer ARNs to deploy with your Lambda +function(s). The `--lambda-subnets` flag can be used for attaching a +comma-separated list of Subnets to deploy your Lambda function(s). The +`--lambda-security-groups` flag can be used for attaching a +comma-separated list of Security Groups to deploy with your Lambda +function(s). The `--custom-code-bucket` flag can be used for providing +the custom code S3 bucket name, which is not created with rdk init, for +generated cloudformation template storage. The `--boundary-policy-arn` +flag can be used for attaching boundary Policy ARN that will be added to +rdkLambdaRole. The `--lambda-timeout` flag can be used for specifying +the timeout associated to the lambda function + +Note: Behind the scenes the `--functions-only` flag generates a +CloudFormation template and runs a \"create\" or \"update\" on the +targeted AWS Account and Region. If subsequent calls to `deploy` with +the `--functions-only` flag are made with the same stack name (either +the default or otherwise) but with *different Config rules targeted*, +any Rules deployed in previous `deploy`s but not included in the latest +`deploy` will be removed. After a functions-only `deploy` **only** the +Rules specifically targeted by that command (either through Rulesets or +an explicit list supplied on the command line) will be deployed in the +environment, all others will be removed.s diff --git a/docs/commands/export.md b/docs/commands/export.md new file mode 100644 index 00000000..2165b48e --- /dev/null +++ b/docs/commands/export.md @@ -0,0 +1,19 @@ +# Export + +This command will export the specified Rule(s) to the terraform file, it +supports the terraform versions 0.11 and 0.12. + +The `--format` flag can be used to specify export format, currently it +supports only terraform. The `--version` flag can be used to specify the +terraform version. The `--rdklib-layer-arn` flag can be used for +attaching Lambda Layer ARN that contains the desired rdklib. Note that +Lambda Layers are region-specific. The `--lambda-role-arn` flag can be +used for assigning existing iam role to all Lambda functions created for +Custom Config Rules. The `--lambda-layers` flag can be used for +attaching a comma-separated list of Lambda Layer ARNs to deploy with +your Lambda function(s). The `--lambda-subnets` flag can be used for +attaching a comma-separated list of Subnets to deploy your Lambda +function(s). The `--lambda-security-groups` flag can be used for +attaching a comma-separated list of Security Groups to deploy with your +Lambda function(s). The `--lambda-timeout` flag can be used for +specifying the timeout associated to the lambda function diff --git a/docs/commands/init.md b/docs/commands/init.md new file mode 100644 index 00000000..4292ebab --- /dev/null +++ b/docs/commands/init.md @@ -0,0 +1,27 @@ +# Init + +Sets up the AWS Config Service in an AWS Account. This includes: + +- Config Configuration Recorder +- Config Delivery Channel +- IAM Role for Delivery Channel +- S3 Bucket for Configuration Snapshots +- S3 Bucket for Lambda Code + +Additionally, `init` will make sure that the Configuration Recorder is +on and functioning, that the Delivery Channel has the appropriate Role +attached, and that the Delivery Channel Role has the proper permissions. + +Note: Even without Config Rules running the Configuration Recorder is +still capturing Configuration Item snapshots and storing them in S3, so +running `init` will incur AWS charges! + +Also Note: AWS Config is a regional service, so running `init` will only +set up Config in the region currently specified in your +AWS_DEFAULT_REGION environment variable or in the `--region` flag. + +Advanced Options: + +- `--config-bucket-exists-in-another-account`: \[optional\] If the bucket being used by a Config Delivery Channel exists in another account, it is possible to skip the check that the bucket exists. This is useful when using `init` to initialize AWS Config in an account which already has a delivery channel setup with a central bucket. Currently, the rdk lists out all the buckets within the account your are running `init` from, to check if the provided bucket name exists, if it doesn\'t then it will create it. This presents an issue when a Config Delivery Channel has been configured to push configuration recordings to a central bucket. The bucket will never be found as it doesn\'t exist in the same account, but cannot be created as bucket names have to be globally unique. +- `--skip-code-bucket-creation`: \[optional\] If you want to use custom code bucket for rdk, enable this and use flag `--custom-code-bucket` to `rdk deploy` +- `control-tower`: \[optional\] If your account is part of an AWS Control Tower setup \--control-tower will skip the setup of configuration_recorder and delivery_channel diff --git a/docs/commands/logs.md b/docs/commands/logs.md new file mode 100644 index 00000000..6c8e2033 --- /dev/null +++ b/docs/commands/logs.md @@ -0,0 +1,14 @@ +# Logs + +The `logs` command provides a shortcut to accessing the CloudWatch Logs +output from the Lambda Functions that back your custom Config Rules. +Logs are displayed in chronological order going back the number of log +entries specified by the `--number` flag (default 3). It supports a +`--follow` flag similar to the UNIX command `tail` so that you can +choose to continually poll CloudWatch to deliver new log items as they +are delivered by your Lambda function. + +In addition to any output that your function emits via `print()` or +`console.log()` commands, Lambda will also record log lines for the +start and stop of each Lambda invocation, including the runtime and +memory usage. diff --git a/docs/commands/modify.md b/docs/commands/modify.md new file mode 100644 index 00000000..98b0a528 --- /dev/null +++ b/docs/commands/modify.md @@ -0,0 +1,7 @@ +# Modify + +Used to modify the local metadata for Config Rules created by the RDK. +This command takes the same arguments as the `create` command (all of +them optional), and overwrites the Rule metadata for any flag specified. +Changes made using `modify` are not automatically pushed out to your AWS +Account, and must be deployed as usual using the `deploy` command. diff --git a/docs/commands/rulesets.md b/docs/commands/rulesets.md new file mode 100644 index 00000000..c4fc6743 --- /dev/null +++ b/docs/commands/rulesets.md @@ -0,0 +1,27 @@ +# Rulesets + +Rulesets provide a mechanism to tag individual Config Rules into groups +that can be acted on as a unit. Ruleset tags are single keywords, and +the commands `deploy`, `create-rule-template`, and `undeploy` can all +expand Ruleset parameters and operate on the resulting list of Rules. + +The most common use-case for Rulesets is to define standardized Account +metadata or data classifications, and then tag individual Rules to all +of the appropriate metadata tags or classification levels. + +Example: If you have Account classifications of \"Public\", \"Private\", +and \"Restricted\" you can tag all of your Rules as \"Restricted\", and +a subset of them that deal with private network security as \"Private\". +Then when you need to deploy controls to a new \"Private\" account you +can simply use `rdk create-rule-template --rulesets Private` to generate +a CloudFormation template that includes all of the Rules necessary for +your \"Private\" classification, but omit the Rules that are only +necessary for \"Restricted\" accounts. Additionally, as your compliance +requirements change and you add Config Rules you can tag them as +appropriate, re-generate your CloudFormation templates, and re-deploy to +make sure your Accounts are all up-to-date. + +You may also choose to classify accounts using binary attributes +(\"Prod\" vs. \"Non-Prod\" or \"PCI\" vs. \"Non-PCI\"), and then +generate account-specific CloudFormation templates using the Account +metadata to ensure that the appropriate controls are deployed. diff --git a/docs/commands/sample-ci.md b/docs/commands/sample-ci.md new file mode 100644 index 00000000..eaf93d81 --- /dev/null +++ b/docs/commands/sample-ci.md @@ -0,0 +1,17 @@ +# Sample-CI + +This utility command outputs a sample Configuration Item for the +specified resource type. This can be useful when writing new custom +Config Rules to help developers know what the CI structure and plausible +values for the resource type are. + +Note that you can construct Config Evaluations for any resource type +that is supported by CloudFormation, however you can not create +change-triggered Config Rules for resource types not explicitly +supported by Config, and some of the console functionality in AWS Config +may be limited. + +[CFN-supported +resources](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-supported-resources.html) +[Config-supported +resources](https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html) diff --git a/docs/commands/test-local.md b/docs/commands/test-local.md new file mode 100644 index 00000000..6ece0657 --- /dev/null +++ b/docs/commands/test-local.md @@ -0,0 +1,9 @@ +# Test-Local + +Shorthand command for running the unit tests defined for Config Rules +that use a Python runtime. When a Python 3.7+ Rule is created using the +`create` command a unit test template is created in the Rule directory. +This test boilerplate includes minimal tests, as well as a framework for +using the `unittest.mock` library for stubbing out Boto3 calls. This +allows more sophisticated test cases to be written for Periodic rules +that need to make API calls to gather information about the environment. diff --git a/docs/commands/undeploy.md b/docs/commands/undeploy.md new file mode 100644 index 00000000..83922f4e --- /dev/null +++ b/docs/commands/undeploy.md @@ -0,0 +1,10 @@ +# Undeploy + +The inverse of `deploy`, this command is used to remove a Config Rule +and its Lambda Function from the targeted account. + +This is intended to be used primarily for clean-up for testing +deployment automation (perhaps from a CI/CD pipeline) to ensure that it +works from an empty account, or to clean up a test account during +development. See also the [clean](./clean.html) command if you want to +more thoroughly scrub Config from your account. diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 7f9c9400..00000000 --- a/docs/conf.py +++ /dev/null @@ -1,181 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Configuration file for the Sphinx documentation builder. -# -# This file does only contain a selection of the most common options. For a -# full list see the documentation: -# http://www.sphinx-doc.org/en/master/config - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys - -sys.path.insert(0, os.path.abspath(".")) -sys.path.insert(0, os.path.abspath("../")) -sys.path.insert(0, os.path.abspath("../rdk/")) - - -# -- Project information ----------------------------------------------------- - -project = "RDK" -copyright = "2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved" -author = "RDK Maintainers" - -# The short X.Y version -version = "" -# The full version, including alpha/beta/rc tags -release = "1.0" - -on_rtd = os.environ.get("READTHEDOCS", None) == "True" - -if not on_rtd: # only import and set the theme if we're building docs locally, tested with sphinx-rtd-theme==0.4.3 - import sphinx_rtd_theme - - html_theme = "sphinx_rtd_theme" - html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - -# -- General configuration --------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.intersphinx", - "sphinx.ext.todo", - "sphinx.ext.viewcode", - "sphinx.ext.githubpages", - "sphinxarg.ext", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = ".rst" - -# The master toctree document. -master_doc = "index" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path . -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -# html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -html_context = { - "css_files": [ - "_static/theme_overrides.css", # override wide tables in RTD theme - ], -} - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# The default sidebars (for documents that don't match any pattern) are -# defined by theme itself. Builtin themes are using these templates by -# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', -# 'searchbox.html']``. -# -# html_sidebars = {} - - -# -- Options for HTMLHelp output --------------------------------------------- - -# Output file base name for HTML help builder. -htmlhelp_basename = "RDKdoc" - - -# -- Options for LaTeX output ------------------------------------------------ - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, "RDK.tex", "RDK Documentation", "RDK Maintainers", "manual"), -] - - -# -- Options for manual page output ------------------------------------------ - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "rdk", "RDK Documentation", [author], 1)] - - -# -- Options for Texinfo output ---------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, "RDK", "RDK Documentation", author, "RDK", "One line description of project.", "Miscellaneous"), -] - - -# -- Extension configuration ------------------------------------------------- - -# -- Options for intersphinx extension --------------------------------------- - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {"https://docs.python.org/": None} - -# -- Options for todo extension ---------------------------------------------- - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True diff --git a/docs/getting_started.rst b/docs/getting_started.rst deleted file mode 100644 index 4ae573cd..00000000 --- a/docs/getting_started.rst +++ /dev/null @@ -1,262 +0,0 @@ -Getting Started -=============== - -Let's get started using the RDK! - -Prerequisites -------------- - -RDK uses python 3.7+. You will need to have an AWS account and sufficient permissions to manage the Config service, and to create and manage S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy Document that describes the minimum necessary permissions can be found `here `_ on github. - -Under the hood, rdk uses boto3 to make API calls to AWS, so you can set your credentials any way that boto3 recognizes (options 3 through 8 in the `boto docs here `_ ) or pass them in with the command-line parameters --profile, --region, --access-key-id, or --secret-access-key - -.. _permissions: http://www.python.org/ - -Installation ------------- - -If you just want to use the RDK, go ahead and install it using pip:: - -$ pip install rdk - -Alternately, if you want to see the code and/or contribute you can clone the `git repo `_ , and then from the repo directory use pip to install the package. Use the '-e' flag to generate symlinks so that any edits you make will be reflected when you run the installed package. - -If you are going to author your Lambda functions using Java you will need to have Java 8 and gradle installed. If you are going to author your Lambda functions in C# you will need to have the dotnet CLI and the .NET Core Runtime 1.08 installed. -:: - - $ pip install -e . - -To make sure the rdk is installed correctly, running the package from the command line without any arguments should display help information. - -:: - - $ rdk - usage: rdk [-h] [-p PROFILE] [-k ACCESS_KEY] [-s SECRET_ACCESS_KEY] - [-r REGION] - ... - rdk: error: the following arguments are required: , - - -Usage ------ - -Configure your env -~~~~~~~~~~~~~~~~~~ -To use the RDK, it's recommended to create a directory that will be your working directory. This should be committed to a source code repo, and ideally created as a python virtualenv. In that directory, run the ``init`` command to set up your AWS Config environment. - -:: - - $ rdk init - Running init! - Creating Config bucket config-bucket-780784666283 - Creating IAM role config-role - Waiting for IAM role to propagate - Config Service is ON - Config setup complete. - Creating Code bucket config-rule-code-bucket-780784666283ap-southeast-1 - -Running ``init`` subsequent times will validate your AWS Config setup and re-create any S3 buckets or IAM resources that are needed. - -Create Rules -~~~~~~~~~~~~ -In your working directory, use the ``create`` command to start creating a new custom rule. You must specify the runtime for the lambda function that will back the Rule, and you can also specify a resource type (or comma-separated list of types) that the Rule will evaluate or a maximum frequency for a periodic rule. This will add a new directory for the rule and populate it with several files, including a skeleton of your Lambda code. - -:: - - $ rdk create MyRule --runtime python3.8 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running create! - Local Rule files created. - -On Windows it is necessary to escape the double-quotes when specifying input parameters, so the `--input-parameters` argument would instead look something like this:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -Note that you can create rules that use EITHER resource-types OR maximum-frequency, but not both. We have found that rules that try to be both event-triggered as well as periodic wind up being very complicated and so we do not recommend it as a best practice. - -Edit Rules Locally -~~~~~~~~~~~~~~~~~~ -Once you have created the rule, edit the python file in your rule directory (in the above example it would be ``MyRule/MyRule.py``, but may be deeper into the rule directory tree depending on your chosen Lambda runtime) to add whatever logic your Rule requires in the ``evaluate_compliance`` function. You will have access to the CI that was sent by Config, as well as any parameters configured for the Config Rule. Your function should return either a simple compliance status (one of ``COMPLIANT``, ``NONCOMPLIANT``, or ``NOT_APPLICABLE``), or if you're using the python or node runtimes you can return a JSON object with multiple evaluation responses that the RDK will send back to AWS Config. An example would look like:: - - for sg in response['SecurityGroups']: - evaluations.append( - { - 'ComplianceResourceType': 'AWS::EC2::SecurityGroup', - 'ComplianceResourceId': sg['GroupId'], - 'ComplianceType': 'COMPLIANT', - 'Annotation': 'This is an important note.', - 'OrderingTimestamp': str(datetime.datetime.now()) - }) - - - return evaluations - -This is necessary for periodic rules that are not triggered by any CI change (which means the CI that is passed in will be null), and also for attaching annotations to your evaluation results. - -If you want to see what the JSON structure of a CI looks like for creating your logic, you can use - -:: - -$ rdk sample-ci - -to output a formatted JSON document. - -Write and Run Unit Tests -~~~~~~~~~~~~~~~~~~~~~~~~ -If you are writing Config Rules using either of the Python runtimes there will be a _test.py file deployed along with your Lambda function skeleton. This can be used to write unit tests according to the standard Python unittest framework (documented here: https://docs.python.org/3/library/unittest.html), which can be run using the `test-local` rdk command:: - - $ rdk test-local MyTestRule - Running local test! - Testing MyTestRule - Looking for tests in /Users/mborch/Code/rdk-dev/MyTestRule - - --------------------------------------------------------------------- - - Ran 0 tests in 0.000s - - OK - - -The test file includes setup for the MagicMock library that can be used to stub boto3 API calls if your rule logic will involve making API calls to gather additional information about your AWS environment. For some tips on how to do this, check out this blog post: https://sgillies.net/2017/10/19/mock-is-magic.html - -Modify Rule -~~~~~~~~~~~ -If you need to change the parameters of a Config rule in your working directory you can use the ``modify`` command. Any parameters you specify will overwrite existing values, any that you do not specify will not be changed. - -:: - - $ rdk modify MyRule --runtime python3.9 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' - Running modify! - Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. - -Again, on Windows the input parameters would look like:: - - '{\"desiredInstanceType\":\"t2.micro\"}' - -It is worth noting that until you actually call the ``deploy`` command your rule only exists in your working directory, none of the Rule commands discussed thus far actually makes changes to your account. - -Deploy Rule -~~~~~~~~~~~ -Once you have completed your compliance validation code and set your Rule's configuration, you can deploy the Rule to your account using the ``deploy`` command. This will zip up your code (and the other associated code files, if any) into a deployable package (or run a gradle build if you have selected the java8 runtime or run the lambda packaging step from the dotnet CLI if you have selected the dotnetcore1.0 runtime), copy that zip file to S3, and then launch or update a CloudFormation stack that defines your Config Rule, Lambda function, and the necessary permissions and IAM Roles for it to function. Since CloudFormation does not deeply inspect Lambda code objects in S3 to construct its changeset, the ``deploy`` command will also directly update the Lambda function for any subsequent deployments to make sure code changes are propagated correctly. - -:: - - $ rdk deploy MyRule - Running deploy! - Zipping MyRule - Uploading MyRule - Creating CloudFormation Stack for MyRule - Waiting for CloudFormation stack operation to complete... - ... - Waiting for CloudFormation stack operation to complete... - Config deploy complete. - -The exact output will vary depending on Lambda runtime. You can use the --all flag to deploy all of the rules in your working directory. - -View Logs For Deployed Rule -~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Once the Rule has been deployed to AWS you can get the CloudWatch logs associated with your lambda function using the ``logs`` command. - -:: - - $ rdk logs MyRule -n 5 - 2017-11-15 22:59:33 - START RequestId: 96e7639a-ca15-11e7-95a2-b1521890638d Version: $LATEST - 2017-11-15 23:41:13 - REPORT RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Duration: 0.50 ms Billed Duration: 100 ms Memory Size: 256 MB - Max Memory Used: 36 MB - 2017-11-15 23:41:13 - END RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda - 2017-11-15 23:41:13 - Default RDK utility class does not yet support Scheduled Notifications. - 2017-11-15 23:41:13 - START RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Version: $LATEST - -You can use the ``-n`` and ``-f`` command line flags just like the UNIX ``tail`` command to view a larger number of log events and to continuously poll for new events. The latter option can be useful in conjunction with manually initiating Config Evaluations for your deploy Config Rule to make sure it is behaving as expected. - - -Advanced Features ------------------ -Cross-Account Deployments -~~~~~~~~~~~~~~~~~~~~~~~~~ -Features have been added to the RDK to facilitate the cross-account deployment pattern that enterprise customers have standardized on for custom Config Rules. A cross-account architecture is one in which the Lambda functions are deployed to a single central "Compliance" account (which may be the same as a central "Security" account), and the Config Rules are deployed to any number of "Satellite" accounts that are used by other teams or departments. This gives the compliance team confidence that their Rule logic cannot be tampered with and makes it much easier for them to modify rule logic without having to go through a complex deployment process to potentially hundreds of AWS accounts. The cross-account pattern uses two advanced RDK features - functions-only deployments and the `create-rule-template` command. - -**Function-Only Deployment** - -By using the `-f` or `--functions-only` flag on the `deploy` command the RDK will deploy only the necessary Lambda Functions, Lambda Execution Role, and Lambda Permissions to the account specified by the execution credentials. It accomplishes this by batching up all of the Lambda function CloudFormation snippets for the selected Rule(s) into a single dynamically generated template and deploy that CloudFormation template. One consequence of this is that subsequent deployments that specify a different set of Rules for the same stack name will update that CloudFormation stack, and any Rules that were included in the first deployment but not in the second will be removed. You can use the `--stack-name` parameter to override the default CloudFormation stack name if you need to manage different subsets of your Lambda Functions independently. The intended usage is to deploy the functions for all of the Config rules in the Security/Compliance account, which can be done simply by using `rdk deploy -f --all` from your working directory. - -**`create-rule-template` command** - -This command generates a CloudFormation template that defines the AWS Config rules themselves, along with the Config Role, Config data bucket, Configuration Recorder, and Delivery channel necessary for the Config rules to work in a satellite account. You must specify the file name for the generated template using the `--output-file` or `o` command line flags. The generated template takes a single parameter of the AccountID of the central compliance account that contains the Lambda functions that will back your custom Config Rules. The generated template can be deployed in the desired satellite accounts through any of the means that you can deploy any other CloudFormation template, including the console, the CLI, as a CodePipeline task, or using StackSets. The `create-rule-template` command takes all of the standard arguments for selecting Rules to include in the generated template, including lists of individual Rule names, an `--all` flag, or using the RuleSets feature described below. - -:: - - $ rdk create-rule-template -o remote-rule-template.json --all - Generating CloudFormation template! - CloudFormation template written to remote-rule-template.json - - -RuleSets -~~~~~~~~ -New as of version 0.3.11, it is possible to add RuleSet tags to rules that can be used to deploy and test groups of rules together. Rules can belong to multiple RuleSets, and RuleSet membership is stored only in the parameters.json metadata. The `deploy`, `create-rule-template`, and `test-local` commands are RuleSet-aware such that a RuleSet can be passed in as the target instead of `--all` or a specific named Rule. - -A comma-delimited list of RuleSets can be added to a Rule when you create it (using the `--rulesets` flag), as part of a `modify` command, or using new `ruleset` subcommands to add or remove individual rules from a RuleSet. - -Running `rdk rulesets list` will display a list of the RuleSets currently defined across all of the Rules in the working directory - -:: - - rdk-dev $ rdk rulesets list - RuleSets: AnotherRuleSet MyNewSet - -Naming a specific RuleSet will list all of the Rules that are part of that RuleSet. - -:: - - rdk-dev $ rdk rulesets list AnotherRuleSet - Rules in AnotherRuleSet : RSTest - -Rules can be added to or removed from RuleSets using the `add` and `remove` subcommands: - -:: - - rdk-dev $ rdk rulesets add MyNewSet RSTest - RSTest added to RuleSet MyNewSet - - rdk-dev $ rdk rulesets remove AnotherRuleSet RSTest - RSTest removed from RuleSet AnotherRuleSet - -RuleSets are a convenient way to maintain a single repository of Config Rules that may need to have subsets of them deployed to different environments. For example your development environment may contain some of the Rules that you run in Production but not all of them; RuleSets gives you a way to identify and selectively deploy the appropriate Rules to each environment. - - -Region Sets -~~~~~~~~~~~ -`rdk init`, `rdk deploy`, and `rdk undeploy` subcommands now support running across multiple regions in parallel using region sets defined in a yaml file. - -To run a subcommand with a region set, pass in the region set yaml file and the specific region set to run through. - -:: - - $ rdk -f regions.yaml --region-set region-set-1 undeploy CUSTOM_RULE - Deleting rules in the following regions: ['sa-east-1', 'us-east-1']. - Delete specified Rules and Lambda Functions from your AWS Account? (y/N): y - [sa-east-1] Running un-deploy! - [us-east-1] Running un-deploy! - [us-east-1] Rule removal initiated. Waiting for Stack Deletion to complete. - [sa-east-1] Rule removal initiated. Waiting for Stack Deletion to complete. - [us-east-1] CloudFormation stack operation complete. - [us-east-1] Rule removal complete, but local files have been preserved. - [us-east-1] To re-deploy, use the 'deploy' command. - [sa-east-1] CloudFormation stack operation complete. - [sa-east-1] Rule removal complete, but local files have been preserved. - [sa-east-1] To re-deploy, use the 'deploy' command. - -Example region set file: - -:: - - default: - - us-west-1 - - us-west-2 - region-set-1: - - sa-east-1 - - us-east-1 - region-set-2: - - ap-southeast-1 - - eu-central-1 - - sa-east-1 - - us-east-1 diff --git a/docs/index.md b/docs/index.md new file mode 120000 index 00000000..32d46ee8 --- /dev/null +++ b/docs/index.md @@ -0,0 +1 @@ +../README.md \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index ffa34f9d..00000000 --- a/docs/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. RDK documentation master file, created by - sphinx-quickstart on Mon Sep 3 12:46:08 2018. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to RDK's documentation! -=============================== - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - - introduction - getting_started - references - - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/introduction.rst b/docs/introduction.rst deleted file mode 100644 index 601ad9a8..00000000 --- a/docs/introduction.rst +++ /dev/null @@ -1,7 +0,0 @@ -Introduction -============ - -Rule Development Kit - Version 2 -This tool should be considered in "Open Beta". We would greatly appreciate feedback and bug reports either as github issues or emails to rdk-maintainers@amazon.com! - -The RDK is designed to support a "Compliance-as-Code" workflow that is intuitive and productive. It abstracts away much of the undifferentiated heavy lifting associated with deploying AWS Config rules backed by custom lambda functions, and provides a streamlined develop-deploy-monitor iterative process. diff --git a/docs/legacy-docs.md b/docs/legacy-docs.md new file mode 100644 index 00000000..d7bba05c --- /dev/null +++ b/docs/legacy-docs.md @@ -0,0 +1,378 @@ +# Legacy RDK Documentation + +Please note, this documentation is a carry-over from the old RTD documentation pre-mkdocs. +This will likely be removed in a subsequent release. + +## Introduction + +Rule Development Kit - Version 2 This tool should be considered in +"Open Beta". We would greatly appreciate feedback and bug reports +either as github issues or emails to ! + +The RDK is designed to support a "Compliance-as-Code" workflow that is +intuitive and productive. It abstracts away much of the undifferentiated +heavy lifting associated with deploying AWS Config rules backed by +custom lambda functions, and provides a streamlined +develop-deploy-monitor iterative process. + +## Prerequisites + +RDK uses python 3.7+. You will need to have an AWS account and +sufficient permissions to manage the Config service, and to create and +manage S3 Buckets, Roles, and Lambda Functions. An AWS IAM Policy +Document that describes the minimum necessary permissions can be found +[here](https://github.com/awslabs/aws-config-rdk/blob/master/policy/rdk-minimum-permissions.json) +on github. + +Under the hood, rdk uses boto3 to make API calls to AWS, so you can set +your credentials any way that boto3 recognizes (options 3 through 8 in +the [boto docs +here](https://boto3.readthedocs.io/en/latest/guide/configuration.html) ) +or pass them in with the command-line parameters --profile, --region, +--access-key-id, or --secret-access-key + +## Installation + +If you just want to use the RDK, go ahead and install it using pip: + + pip install rdk + +Alternately, if you want to see the code and/or contribute you can clone +the [git repo](https://github.com/awslabs/aws-config-rdk/) , and then +from the repo directory use pip to install the package. Use the '-e' +flag to generate symlinks so that any edits you make will be reflected +when you run the installed package. + +If you are going to author your Lambda functions using Java you will +need to have Java 8 and gradle installed. If you are going to author +your Lambda functions in C# you will need to have the dotnet CLI and the +.NET Core Runtime 1.08 installed. : + + pip install -e . + +To make sure the rdk is installed correctly, running the package from +the command line without any arguments should display help information. + + rdk + usage: rdk [-h] [-p PROFILE] [-k ACCESS_KEY] [-s SECRET_ACCESS_KEY] + [-r REGION] + ... + rdk: error: the following arguments are required: , + +## Usage + +### Configure your env + +To use the RDK, it's recommended to create a directory that will be +your working directory. This should be committed to a source code repo, +and ideally created as a python virtualenv. In that directory, run the +`init` command to set up your AWS Config environment. + + rdk init + Running init! + Creating Config bucket config-bucket-780784666283 + Creating IAM role config-role + Waiting for IAM role to propagate + Config Service is ON + Config setup complete. + Creating Code bucket config-rule-code-bucket-780784666283ap-southeast-1 + +Running `init` subsequent times will validate your AWS Config setup and +re-create any S3 buckets or IAM resources that are needed. + +### Create Rules + +In your working directory, use the `create` command to start creating a +new custom rule. You must specify the runtime for the lambda function +that will back the Rule, and you can also specify a resource type (or +comma-separated list of types) that the Rule will evaluate or a maximum +frequency for a periodic rule. This will add a new directory for the +rule and populate it with several files, including a skeleton of your +Lambda code. + + rdk create MyRule --runtime python3.8 --resource-types AWS::EC2::Instance --input-parameters '{"desiredInstanceType":"t2.micro"}' + Running create! + Local Rule files created. + +On Windows it is necessary to escape the double-quotes when specifying +input parameters, so the --input-parameters argument +would instead look something like this: + + '{"desiredInstanceType":"t2.micro"}' + +Note that you can create rules that use EITHER resource-types OR +maximum-frequency, but not both. We have found that rules that try to be +both event-triggered as well as periodic wind up being very complicated +and so we do not recommend it as a best practice. + +### Edit Rules Locally + +Once you have created the rule, edit the python file in your rule +directory (in the above example it would be `MyRule/MyRule.py`, but may +be deeper into the rule directory tree depending on your chosen Lambda +runtime) to add whatever logic your Rule requires in the +`evaluate_compliance` function. You will have access to the CI that was +sent by Config, as well as any parameters configured for the Config +Rule. Your function should return either a simple compliance status (one +of `COMPLIANT`, `NONCOMPLIANT`, or `NOT_APPLICABLE`), or if you're +using the python or node runtimes you can return a JSON object with +multiple evaluation responses that the RDK will send back to AWS Config. +An example would look like: + + for sg in response['SecurityGroups']: + evaluations.append( + { + 'ComplianceResourceType': 'AWS::EC2::SecurityGroup', + 'ComplianceResourceId': sg['GroupId'], + 'ComplianceType': 'COMPLIANT', + 'Annotation': 'This is an important note.', + 'OrderingTimestamp': str(datetime.datetime.now()) + }) + + + return evaluations + +This is necessary for periodic rules that are not triggered by any CI +change (which means the CI that is passed in will be null), and also for +attaching annotations to your evaluation results. + +If you want to see what the JSON structure of a CI looks like for +creating your logic, you can use + + rdk sample-ci + +to output a formatted JSON document. + +### Write and Run Unit Tests + +If you are writing Config Rules using either of the Python runtimes +there will be a `_test.py` file deployed along with your +Lambda function skeleton. This can be used to write unit tests according +to the standard Python unittest framework (documented here: +), which can be run +using the [test-local]{.title-ref} rdk command: + + rdk test-local MyTestRule + Running local test! + Testing MyTestRule + Looking for tests in /Users/mborch/Code/rdk-dev/MyTestRule + + --------------------------------------------------------------------- + + Ran 0 tests in 0.000s + + OK + + +The test file includes setup for the MagicMock library that can be used +to stub boto3 API calls if your rule logic will involve making API calls +to gather additional information about your AWS environment. For some +tips on how to do this, check out this blog post: + + +### Modify Rule + +If you need to change the parameters of a Config rule in your working +directory you can use the `modify` command. Any parameters you specify +will overwrite existing values, any that you do not specify will not be +changed. + + rdk modify MyRule --runtime python3.9 --maximum-frequency TwentyFour_Hours --input-parameters '{"desiredInstanceType":"t2.micro"}' + Running modify! + Modified Rule 'MyRule'. Use the `deploy` command to push your changes to AWS. + +Again, on Windows the input parameters would look like: + + '{"desiredInstanceType":"t2.micro"}' + +It is worth noting that until you actually call the `deploy` command +your rule only exists in your working directory, none of the Rule +commands discussed thus far actually makes changes to your account. + +### Deploy Rule + +Once you have completed your compliance validation code and set your +Rule's configuration, you can deploy the Rule to your account using the +`deploy` command. This will zip up your code (and the other associated +code files, if any) into a deployable package (or run a gradle build if +you have selected the java8 runtime or run the lambda packaging step +from the dotnet CLI if you have selected the dotnetcore1.0 runtime), +copy that zip file to S3, and then launch or update a CloudFormation +stack that defines your Config Rule, Lambda function, and the necessary +permissions and IAM Roles for it to function. Since CloudFormation does +not deeply inspect Lambda code objects in S3 to construct its changeset, +the `deploy` command will also directly update the Lambda function for +any subsequent deployments to make sure code changes are propagated +correctly. + + rdk deploy MyRule + Running deploy! + Zipping MyRule + Uploading MyRule + Creating CloudFormation Stack for MyRule + Waiting for CloudFormation stack operation to complete... + ... + Waiting for CloudFormation stack operation to complete... + Config deploy complete. + +The exact output will vary depending on Lambda runtime. You can use the +--all flag to deploy all of the rules in your working directory. + +### View Logs For Deployed Rule + +Once the Rule has been deployed to AWS you can get the CloudWatch logs +associated with your lambda function using the `logs` command. + + rdk logs MyRule -n 5 + 2017-11-15 22:59:33 - START RequestId: 96e7639a-ca15-11e7-95a2-b1521890638d Version: $LATEST + 2017-11-15 23:41:13 - REPORT RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Duration: 0.50 ms Billed Duration: 100 ms Memory Size: 256 MB + Max Memory Used: 36 MB + 2017-11-15 23:41:13 - END RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda + 2017-11-15 23:41:13 - Default RDK utility class does not yet support Scheduled Notifications. + 2017-11-15 23:41:13 - START RequestId: 68e0304f-ca1b-11e7-b735-81ebae95acda Version: $LATEST + +You can use the `-n` and `-f` command line flags just like the UNIX +`tail` command to view a larger number of log events and to continuously +poll for new events. The latter option can be useful in conjunction with +manually initiating Config Evaluations for your deploy Config Rule to +make sure it is behaving as expected. + +## Advanced Features + +### Cross-Account Deployments + +Features have been added to the RDK to facilitate the cross-account +deployment pattern that enterprise customers have standardized on for +custom Config Rules. A cross-account architecture is one in which the +Lambda functions are deployed to a single central "Compliance" account +(which may be the same as a central "Security" account), and the +Config Rules are deployed to any number of "Satellite" accounts that +are used by other teams or departments. This gives the compliance team +confidence that their Rule logic cannot be tampered with and makes it +much easier for them to modify rule logic without having to go through a +complex deployment process to potentially hundreds of AWS accounts. The +cross-account pattern uses two advanced RDK features - functions-only +deployments and the create-rule-template command. + +**Function-Only Deployment** + +By using the -f or --functions-only flag on +the deploy command the RDK will deploy only the necessary +Lambda Functions, Lambda Execution Role, and Lambda Permissions to the +account specified by the execution credentials. It accomplishes this by +batching up all of the Lambda function CloudFormation snippets for the +selected Rule(s) into a single dynamically generated template and deploy +that CloudFormation template. One consequence of this is that subsequent +deployments that specify a different set of Rules for the same stack +name will update that CloudFormation stack, and any Rules that were +included in the first deployment but not in the second will be removed. +You can use the --stack-name parameter to override the +default CloudFormation stack name if you need to manage different +subsets of your Lambda Functions independently. The intended usage is to +deploy the functions for all of the Config rules in the +Security/Compliance account, which can be done simply by using rdk +deploy -f --all from your working directory. + +**`create-rule-template` command** + +This command generates a CloudFormation template that defines the AWS +Config rules themselves, along with the Config Role, Config data bucket, +Configuration Recorder, and Delivery channel necessary for the Config +rules to work in a satellite account. You must specify the file name for +the generated template using the --output-file or +o command line flags. The generated template takes a +single parameter of the AccountID of the central compliance account that +contains the Lambda functions that will back your custom Config Rules. +The generated template can be deployed in the desired satellite accounts +through any of the means that you can deploy any other CloudFormation +template, including the console, the CLI, as a CodePipeline task, or +using StackSets. The create-rule-template command takes +all of the standard arguments for selecting Rules to include in the +generated template, including lists of individual Rule names, an +--all flag, or using the RuleSets feature described +below. + + rdk create-rule-template -o remote-rule-template.json --all + Generating CloudFormation template! + CloudFormation template written to remote-rule-template.json + +### RuleSets + +New as of version 0.3.11, it is possible to add RuleSet tags to rules +that can be used to deploy and test groups of rules together. Rules can +belong to multiple RuleSets, and RuleSet membership is stored only in +the parameters.json metadata. The deploy, +create-rule-template, and test-local +commands are RuleSet-aware such that a RuleSet can be passed in as the +target instead of --all or a specific named Rule. + +A comma-delimited list of RuleSets can be added to a Rule when you +create it (using the --rulesets flag), as part of a +modify command, or using new ruleset +subcommands to add or remove individual rules from a RuleSet. + +Running rdk rulesets list will display a list of the +RuleSets currently defined across all of the Rules in the working +directory + + rdk rulesets list + RuleSets: AnotherRuleSet MyNewSet + +Naming a specific RuleSet will list all of the Rules that are part of +that RuleSet. + + rdk rulesets list AnotherRuleSet + Rules in AnotherRuleSet : RSTest + +Rules can be added to or removed from RuleSets using the +add and remove subcommands: + + rdk rulesets add MyNewSet RSTest + RSTest added to RuleSet MyNewSet + + rdk rulesets remove AnotherRuleSet RSTest + RSTest removed from RuleSet AnotherRuleSet + +RuleSets are a convenient way to maintain a single repository of Config +Rules that may need to have subsets of them deployed to different +environments. For example your development environment may contain some +of the Rules that you run in Production but not all of them; RuleSets +gives you a way to identify and selectively deploy the appropriate Rules +to each environment. + +### Region Sets + +rdk init, rdk deploy, and rdk +undeploy subcommands now support running across multiple +regions in parallel using region sets defined in a yaml file. + +To run a subcommand with a region set, pass in the region set yaml file +and the specific region set to run through. + + rdk -f regions.yaml --region-set region-set-1 undeploy CUSTOM_RULE + Deleting rules in the following regions: ['sa-east-1', 'us-east-1']. + Delete specified Rules and Lambda Functions from your AWS Account? (y/N): y + [sa-east-1] Running un-deploy! + [us-east-1] Running un-deploy! + [us-east-1] Rule removal initiated. Waiting for Stack Deletion to complete. + [sa-east-1] Rule removal initiated. Waiting for Stack Deletion to complete. + [us-east-1] CloudFormation stack operation complete. + [us-east-1] Rule removal complete, but local files have been preserved. + [us-east-1] To re-deploy, use the 'deploy' command. + [sa-east-1] CloudFormation stack operation complete. + [sa-east-1] Rule removal complete, but local files have been preserved. + [sa-east-1] To re-deploy, use the 'deploy' command. + +Example region set file: + + default: + - us-west-1 + - us-west-2 + region-set-1: + - sa-east-1 + - us-east-1 + region-set-2: + - ap-southeast-1 + - eu-central-1 + - sa-east-1 + - us-east-1 diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index add241e6..00000000 --- a/docs/make.bat +++ /dev/null @@ -1,36 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=. -set BUILDDIR=_build -set SPHINXPROJ=RDK - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% - -:end -popd diff --git a/docs/reference/clean.rst b/docs/reference/clean.rst deleted file mode 100644 index 07804bad..00000000 --- a/docs/reference/clean.rst +++ /dev/null @@ -1,10 +0,0 @@ -Clean ------ - -.. argparse:: - :module: rdk - :func: get_clean_parser - :prog: rdk clean - :nodescription: - - The ``clean`` command is the inverse of the ``init`` command, and can be used to completely remove Config resources from an account, including the Configuration Recorder, Delivery Channel, S3 buckets, Roles, and Permissions. This is useful for testing account provisioning automation and for running automated tests in a clean environment. diff --git a/docs/reference/create-rule-template.rst b/docs/reference/create-rule-template.rst deleted file mode 100644 index 1d4903d9..00000000 --- a/docs/reference/create-rule-template.rst +++ /dev/null @@ -1,22 +0,0 @@ -Create-Rule-Template --------------------- - -.. argparse:: - :module: rdk - :func: get_create_rule_template_parser - :prog: rdk create-rule-template - :nodescription: - - Generates and saves to a file a single CloudFormation template that can be used to deploy the specified Rule(s) into any account. This feature has two primary uses: - - - Multi-account Config setup in which the Lambda Functions for custom Rules are deployed into a centralized "security" or "compliance" account and the Config Rules themselves are deployed into "application" or "satellite" accounts. - - Combine many Config Rules into a single CloudFormation template for easier atomic deployment and management. - - The generated CloudFormation template includes a Parameter for the AccountID that contains the Lambda functions that provide the compliance logic for the Rules, and also exposes all of the Config Rule input parameters as CloudFormation stack parameters. - - By default the generated CloudFormation template will set up Config as per the settings used by the RDK ``init`` command, but those resources can be omitted using the ``--rules-only`` flag. - - The ``--config-role-arn`` flag can be used for assigning existing config role to the created Configuration Recorder. - The ``-t | --tag-config-rules-script `` can now be used for output the script generated for create tags for each config rule. - - As of version 0.6, RDK supports Config remediation. Note that in order to use SSM documents for remediation you must supply all of the necessary document parameters. These can be found in the SSM document listing on the AWS console, but RDK will *not* validate at rule creation that you have all of the necessary parameters supplied. \ No newline at end of file diff --git a/docs/reference/create.rst b/docs/reference/create.rst deleted file mode 100644 index 4a5d7589..00000000 --- a/docs/reference/create.rst +++ /dev/null @@ -1,10 +0,0 @@ -Create ------- - -.. argparse:: - :module: rdk - :func: get_create_parser - :prog: rdk create - - As of version 0.6, RDK supports Config remediation. Note that in order to use SSM documents for remediation you must supply all of the necessary document parameters. These can be found in the SSM document listing on the AWS console, but RDK will *not* validate at rule creation that you have all of the necessary parameters supplied. - diff --git a/docs/reference/deploy.rst b/docs/reference/deploy.rst deleted file mode 100644 index 512c5918..00000000 --- a/docs/reference/deploy.rst +++ /dev/null @@ -1,25 +0,0 @@ -Deploy ------- - -.. argparse:: - :module: rdk - :func: get_deploy_parser - :prog: rdk deploy - :nodescription: - - This command will deploy the specified Rule(s) to the Account and Region determined by the credentials being used to execute the command, and the value of the AWS_DEFAULT_REGION environment variable, unless those credentials or region are overridden using the common flags. - - Once deployed, RDK will _not_ explicitly start a Rule evaluation. Depending on the changes being made to your Config Rule setup AWS Config may re-evaluate the deployed Rules automatically, or you can run an evaluation using the AWS configservice CLI. - - The ``--functions-only`` flag can be used as part of a multi-account deployment strategy to push _only_ the Lambda functions (and necessary Roles and Permssions) to the target account. This is intended to be used in conjunction with the ``create-rule-template`` command in order to separate the compliance logic from the evaluated accounts. For an example of how this looks in practice, check out the `AWS Compliance-as-Code Engine `_. - The ``--rdklib-layer-arn`` flag can be used for attaching Lambda Layer ARN that contains the desired rdklib. Note that Lambda Layers are region-specific. - The ``--lambda-role-arn`` flag can be used for assigning existing iam role to all Lambda functions created for Custom Config Rules. - The ``--lambda-layers`` flag can be used for attaching a comma-separated list of Lambda Layer ARNs to deploy with your Lambda function(s). - The ``--lambda-subnets`` flag can be used for attaching a comma-separated list of Subnets to deploy your Lambda function(s). - The ``--lambda-security-groups`` flag can be used for attaching a comma-separated list of Security Groups to deploy with your Lambda function(s). - The ``--custom-code-bucket`` flag can be used for providing the custom code S3 bucket name, which is not created with rdk init, for generated cloudformation template storage. - The ``--boundary-policy-arn`` flag can be used for attaching boundary Policy ARN that will be added to rdkLambdaRole. - The ``--lambda-timeout`` flag can be used for specifying the timeout associated to the lambda function - - - Note: Behind the scenes the ``--functions-only`` flag generates a CloudFormation template and runs a "create" or "update" on the targeted AWS Account and Region. If subsequent calls to ``deploy`` with the ``--functions-only`` flag are made with the same stack name (either the default or otherwise) but with *different Config rules targeted*, any Rules deployed in previous ``deploy``s but not included in the latest ``deploy`` will be removed. After a functions-only ``deploy`` _only_ the Rules specifically targeted by that command (either through Rulesets or an explicit list supplied on the command line) will be deployed in the environment, all others will be removed.s diff --git a/docs/reference/export.rst b/docs/reference/export.rst deleted file mode 100644 index f71a3bd2..00000000 --- a/docs/reference/export.rst +++ /dev/null @@ -1,22 +0,0 @@ -Export ------- - -.. argparse:: - :module: rdk - :func: get_export_parser - :prog: rdk export - :nodescription: - - This command will export the specified Rule(s) to the terraform file, it supports the terraform versions 0.11 and 0.12. - - - The ``--format`` flag can be used to specify export format, currently it supports only terraform. - The ``--version`` flag can be used to specify the terraform version. - The ``--rdklib-layer-arn`` flag can be used for attaching Lambda Layer ARN that contains the desired rdklib. Note that Lambda Layers are region-specific. - The ``--lambda-role-arn`` flag can be used for assigning existing iam role to all Lambda functions created for Custom Config Rules. - The ``--lambda-layers`` flag can be used for attaching a comma-separated list of Lambda Layer ARNs to deploy with your Lambda function(s). - The ``--lambda-subnets`` flag can be used for attaching a comma-separated list of Subnets to deploy your Lambda function(s). - The ``--lambda-security-groups`` flag can be used for attaching a comma-separated list of Security Groups to deploy with your Lambda function(s). - The ``--lambda-timeout`` flag can be used for specifying the timeout associated to the lambda function - - \ No newline at end of file diff --git a/docs/reference/init.rst b/docs/reference/init.rst deleted file mode 100644 index fe2dc9af..00000000 --- a/docs/reference/init.rst +++ /dev/null @@ -1,28 +0,0 @@ -Init ----- - -.. argparse:: - :module: rdk - :func: get_init_parser - :prog: rdk init - :nodescription: - - Sets up the AWS Config Service in an AWS Account. This includes: - - - Config Configuration Recorder - - Config Delivery Channel - - IAM Role for Delivery Channel - - S3 Bucket for Configuration Snapshots - - S3 Bucket for Lambda Code - - Additionally, ``init`` will make sure that the Configuration Recorder is on and functioning, that the Delivery Channel has the appropriate Role attached, and that the Delivery Channel Role has the proper permissions. - - Note: Even without Config Rules running the Configuration Recorder is still capturing Configuration Item snapshots and storing them in S3, so running ``init`` will incur AWS charges! - - Also Note: AWS Config is a regional service, so running ``init`` will only set up Config in the region currently specified in your AWS_DEFAULT_REGION environment variable or in the ``--region`` flag. - - Advanced Options: - - - ``--config-bucket-exists-in-another-account``: [optional] If the bucket being used by a Config Delivery Channel exists in another account, it is possible to skip the check that the bucket exists. This is useful when using ``init`` to initialize AWS Config in an account which already has a delivery channel setup with a central bucket. Currently, the rdk lists out all the buckets within the account your are running ``init`` from, to check if the provided bucket name exists, if it doesn't then it will create it. This presents an issue when a Config Delivery Channel has been configured to push configuration recordings to a central bucket. The bucket will never be found as it doesn't exist in the same account, but cannot be created as bucket names have to be globally unique. - - ``--skip-code-bucket-creation``: [optional] If you want to use custom code bucket for rdk, enable this and use flag ``--custom-code-bucket`` to ``rdk deploy`` - - ``control-tower``: [optional] If your account is part of an AWS Control Tower setup --control-tower will skip the setup of configuration_recorder and delivery_channel \ No newline at end of file diff --git a/docs/reference/logs.rst b/docs/reference/logs.rst deleted file mode 100644 index 2b73c9af..00000000 --- a/docs/reference/logs.rst +++ /dev/null @@ -1,12 +0,0 @@ -Logs ----- - -.. argparse:: - :module: rdk - :func: get_logs_parser - :prog: rdk logs - :nodescription: - - The ``logs`` command provides a shortcut to accessing the CloudWatch Logs output from the Lambda Functions that back your custom Config Rules. Logs are displayed in chronological order going back the number of log entries specified by the ``--number`` flag (default 3). It supports a ``--follow`` flag similar to the UNIX command ``tail`` so that you can choose to continually poll CloudWatch to deliver new log items as they are delivered by your Lambda function. - - In addition to any output that your function emits via ``print()`` or ``console.log()`` commands, Lambda will also record log lines for the start and stop of each Lambda invocation, including the runtime and memory usage. diff --git a/docs/reference/modify.rst b/docs/reference/modify.rst deleted file mode 100644 index fc5f171b..00000000 --- a/docs/reference/modify.rst +++ /dev/null @@ -1,10 +0,0 @@ -Modify ------- - -.. argparse:: - :module: rdk - :func: get_modify_parser - :prog: rdk modify - :nodescription: - - Used to modify the local metadata for Config Rules created by the RDK. This command takes the same arguments as the ``create`` command (all of them optional), and overwrites the Rule metadata for any flag specified. Changes made using ``modify`` are not automatically pushed out to your AWS Account, and must be deployed as usual using the ``deploy`` command. diff --git a/docs/reference/rulesets.rst b/docs/reference/rulesets.rst deleted file mode 100644 index f09cab7b..00000000 --- a/docs/reference/rulesets.rst +++ /dev/null @@ -1,16 +0,0 @@ -Rulesets --------- - -.. argparse:: - :module: rdk - :func: get_rulesets_parser - :prog: rdk rulesets - :nodescription: - - Rulesets provide a mechanism to tag individual Config Rules into groups that can be acted on as a unit. Ruleset tags are single keywords, and the commands ``deploy``, ``create-rule-template``, and ``undeploy`` can all expand Ruleset parameters and operate on the resulting list of Rules. - - The most common use-case for Rulesets is to define standardized Account metadata or data classifications, and then tag individual Rules to all of the appropriate metadata tags or classification levels. - - Example: If you have Account classifications of "Public", "Private", and "Restricted" you can tag all of your Rules as "Restricted", and a subset of them that deal with private network security as "Private". Then when you need to deploy controls to a new "Private" account you can simply use ``rdk create-rule-template --rulesets Private`` to generate a CloudFormation template that includes all of the Rules necessary for your "Private" classification, but omit the Rules that are only necessary for "Restricted" accounts. Additionally, as your compliance requirements change and you add Config Rules you can tag them as appropriate, re-generate your CloudFormation templates, and re-deploy to make sure your Accounts are all up-to-date. - - You may also choose to classify accounts using binary attributes ("Prod" vs. "Non-Prod" or "PCI" vs. "Non-PCI"), and then generate account-specific CloudFormation templates using the Account metadata to ensure that the appropriate controls are deployed. diff --git a/docs/reference/sample-ci.rst b/docs/reference/sample-ci.rst deleted file mode 100644 index 23567094..00000000 --- a/docs/reference/sample-ci.rst +++ /dev/null @@ -1,18 +0,0 @@ -Sample-CI ---------- - -.. argparse:: - :module: rdk - :func: get_sample_ci_parser - :prog: rdk sample-ci - :nodescription: - - This utility command outputs a sample Configuration Item for the specified resource type. This can be useful when writing new custom Config Rules to help developers know what the CI structure and plausible values for the resource type are. - - Note that you can construct Config Evaluations for any resource type that is supported by CloudFormation, however you can not create change-triggered Config Rules for resource types not explicitly supported by Config, and some of the console functionality in AWS Config may be limited. - - `CFN-supported resources `_ - `Config-supported resources `_ - - ci_type : @replace - One of the supported Config-supported resource types. diff --git a/docs/reference/test-local.rst b/docs/reference/test-local.rst deleted file mode 100644 index 63476a9e..00000000 --- a/docs/reference/test-local.rst +++ /dev/null @@ -1,10 +0,0 @@ -Test-Local ----------- - -.. argparse:: - :module: rdk - :func: get_test_local_parser - :prog: rdk test-local - :nodescription: - - Shorthand command for running the unit tests defined for Config Rules that use a Python runtime. When a Python 3.7+ Rule is created using the ``create`` command a unit test template is created in the Rule directory. This test boilerplate includes minimal tests, as well as a framework for using the ``unittest.mock`` library for stubbing out Boto3 calls. This allows more sophisticated test cases to be written for Periodic rules that need to make API calls to gather information about the environment. diff --git a/docs/reference/undeploy.rst b/docs/reference/undeploy.rst deleted file mode 100644 index 4207d0e6..00000000 --- a/docs/reference/undeploy.rst +++ /dev/null @@ -1,12 +0,0 @@ -Undeploy --------- - -.. argparse:: - :module: rdk - :func: get_undeploy_parser - :prog: rdk undeploy - :nodescription: - - The inverse of ``deploy``, this command is used to remove a Config Rule and its Lambda Function from the targeted account. - - This is intended to be used primarily for clean-up for testing deployment automation (perhaps from a CI/CD pipeline) to ensure that it works from an empty account, or to clean up a test account during development. See also the `clean <./clean.html>`_ command if you want to more thoroughly scrub Config from your account. diff --git a/docs/references.rst b/docs/references.rst deleted file mode 100644 index 5b526844..00000000 --- a/docs/references.rst +++ /dev/null @@ -1,21 +0,0 @@ -Command Reference -================= - -.. argparse:: - :module: rdk - :func: get_command_parser - :prog: rdk - :nodescription: - - The RDK has some options that can be used to override the default behavior (mostly relating to the identity and credentials used by the tool) that are common to all of the sub-commands. - -Sub-Commands ------------- - -.. toctree:: - :maxdepth: 3 - :caption: Command Reference: - :titlesonly: - :glob: - - reference/* diff --git a/docs/requirements.txt b/docs/requirements.txt index 74b9a6da..c1560e74 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,6 +1,299 @@ -Sphinx==1.7.8 -sphinx-argparse==0.2.5 -sphinx-rtd-theme==0.4.3 -sphinxcontrib-websupport==1.1.0 -PyYAML==5.4.1 - +cached-property==1.5.2 ; python_full_version >= "3.7.2" and python_version < "3.8" \ + --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \ + --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 +certifi==2022.12.7 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ + --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 +charset-normalizer==2.1.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ + --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f +click==8.1.3 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e \ + --hash=sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48 +colorama==0.4.6 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 +ghp-import==2.1.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619 \ + --hash=sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343 +griffe==0.28.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:a471498b0b9505c721ea0e652fd77c97df1aeb56c4eb8c93d24bb1140da4216d \ + --hash=sha256:bde3a3dfa301a4b113c7fac3b2be45e5723bc50cda4c9cfe13f43c447c9aa5d1 +idna==3.4 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ + --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 +importlib-metadata==4.13.0 ; python_full_version >= "3.7.2" and python_version < "3.10" \ + --hash=sha256:8a8a81bcf996e74fee46f0d16bd3eaa382a7eb20fd82445c3ad11f4090334116 \ + --hash=sha256:dd0173e8f150d6815e098fd354f6414b0f079af4644ddfe90c71e2fc6174346d +jinja2==3.1.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ + --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 +markdown-include==0.8.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:1d0623e0fc2757c38d35df53752768356162284259d259c486b4ab6285cdbbe3 \ + --hash=sha256:32f0635b9cfef46997b307e2430022852529f7a5b87c0075c504283e7cc7db53 +markdown==3.3.7 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874 \ + --hash=sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621 +markupsafe==2.1.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed \ + --hash=sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc \ + --hash=sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2 \ + --hash=sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460 \ + --hash=sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7 \ + --hash=sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0 \ + --hash=sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1 \ + --hash=sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa \ + --hash=sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03 \ + --hash=sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323 \ + --hash=sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65 \ + --hash=sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013 \ + --hash=sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036 \ + --hash=sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f \ + --hash=sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4 \ + --hash=sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419 \ + --hash=sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2 \ + --hash=sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619 \ + --hash=sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a \ + --hash=sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a \ + --hash=sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd \ + --hash=sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7 \ + --hash=sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666 \ + --hash=sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65 \ + --hash=sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859 \ + --hash=sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625 \ + --hash=sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff \ + --hash=sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156 \ + --hash=sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd \ + --hash=sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba \ + --hash=sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f \ + --hash=sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1 \ + --hash=sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094 \ + --hash=sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a \ + --hash=sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513 \ + --hash=sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed \ + --hash=sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d \ + --hash=sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3 \ + --hash=sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147 \ + --hash=sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c \ + --hash=sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603 \ + --hash=sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601 \ + --hash=sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a \ + --hash=sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1 \ + --hash=sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d \ + --hash=sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3 \ + --hash=sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54 \ + --hash=sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2 \ + --hash=sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6 \ + --hash=sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58 +mergedeep==1.3.4 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8 \ + --hash=sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307 +mkdocs-autorefs==0.4.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:70748a7bd025f9ecd6d6feeba8ba63f8e891a1af55f48e366d6d6e78493aba84 \ + --hash=sha256:a2248a9501b29dc0cc8ba4c09f4f47ff121945f6ce33d760f145d6f89d313f5b +mkdocs-material-extensions==1.1.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:9c003da71e2cc2493d910237448c672e00cefc800d3d6ae93d2fc69979e3bd93 \ + --hash=sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945 +mkdocs-material==9.1.14 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:1ae74cc5464ef2f64574d4884512efed7f4db386fb9bc6af20fd427d7a702f49 \ + --hash=sha256:b56a9f955ed32d38333715cbbf68ce38f683bf38610c65094fa4ef2db9f08bcd +mkdocs==1.4.3 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:5955093bbd4dd2e9403c5afaf57324ad8b04f16886512a3ee6ef828956481c57 \ + --hash=sha256:6ee46d309bda331aac915cd24aab882c179a933bd9e77b80ce7d2eaaa3f689dd +mkdocstrings-python==1.0.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:b89d849df990204f909d5452548b6936a185f912da06208a93909bebe25d6e67 \ + --hash=sha256:c59d67009a7a85172f4da990d8523e95606b6a1ff93a22a2351ad3b5f8cafed1 +mkdocstrings==0.21.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:304e56a2e90595708a38a13a278e538a67ad82052dd5c8b71f77a604a4f3d911 \ + --hash=sha256:949ef8da92df9d692ca07be50616459a6b536083a25520fd54b00e8814ce019b +packaging==21.3 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ + --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 +pygments==2.15.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c \ + --hash=sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1 +pymdown-extensions==10.0.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:ae66d84013c5d027ce055693e09a4628b67e9dec5bce05727e45b0918e36f274 \ + --hash=sha256:b44e1093a43b8a975eae17b03c3a77aad4681b3b56fce60ce746dbef1944c8cb +pyparsing==3.0.9 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ + --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc +python-dateutil==2.8.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ + --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 +pyyaml-env-tag==0.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb \ + --hash=sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069 +pyyaml==6.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \ + --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \ + --hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \ + --hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \ + --hash=sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b \ + --hash=sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4 \ + --hash=sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07 \ + --hash=sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba \ + --hash=sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 \ + --hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \ + --hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \ + --hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \ + --hash=sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782 \ + --hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \ + --hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \ + --hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \ + --hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \ + --hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \ + --hash=sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1 \ + --hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \ + --hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \ + --hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \ + --hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \ + --hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \ + --hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \ + --hash=sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d \ + --hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \ + --hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \ + --hash=sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7 \ + --hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \ + --hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \ + --hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \ + --hash=sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358 \ + --hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \ + --hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \ + --hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \ + --hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \ + --hash=sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f \ + --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \ + --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5 +regex==2023.5.5 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:02f4541550459c08fdd6f97aa4e24c6f1932eec780d58a2faa2068253df7d6ff \ + --hash=sha256:0a69cf0c00c4d4a929c6c7717fd918414cab0d6132a49a6d8fc3ded1988ed2ea \ + --hash=sha256:0bbd5dcb19603ab8d2781fac60114fb89aee8494f4505ae7ad141a3314abb1f9 \ + --hash=sha256:10250a093741ec7bf74bcd2039e697f519b028518f605ff2aa7ac1e9c9f97423 \ + --hash=sha256:10374c84ee58c44575b667310d5bbfa89fb2e64e52349720a0182c0017512f6c \ + --hash=sha256:1189fbbb21e2c117fda5303653b61905aeeeea23de4a94d400b0487eb16d2d60 \ + --hash=sha256:1307aa4daa1cbb23823d8238e1f61292fd07e4e5d8d38a6efff00b67a7cdb764 \ + --hash=sha256:144b5b017646b5a9392a5554a1e5db0000ae637be4971c9747566775fc96e1b2 \ + --hash=sha256:171c52e320fe29260da550d81c6b99f6f8402450dc7777ef5ced2e848f3b6f8f \ + --hash=sha256:18196c16a584619c7c1d843497c069955d7629ad4a3fdee240eb347f4a2c9dbe \ + --hash=sha256:18f05d14f14a812fe9723f13afafefe6b74ca042d99f8884e62dbd34dcccf3e2 \ + --hash=sha256:1ecf3dcff71f0c0fe3e555201cbe749fa66aae8d18f80d2cc4de8e66df37390a \ + --hash=sha256:21e90a288e6ba4bf44c25c6a946cb9b0f00b73044d74308b5e0afd190338297c \ + --hash=sha256:23d86ad2121b3c4fc78c58f95e19173790e22ac05996df69b84e12da5816cb17 \ + --hash=sha256:256f7f4c6ba145f62f7a441a003c94b8b1af78cee2cccacfc1e835f93bc09426 \ + --hash=sha256:290fd35219486dfbc00b0de72f455ecdd63e59b528991a6aec9fdfc0ce85672e \ + --hash=sha256:2e9c4f778514a560a9c9aa8e5538bee759b55f6c1dcd35613ad72523fd9175b8 \ + --hash=sha256:338994d3d4ca4cf12f09822e025731a5bdd3a37aaa571fa52659e85ca793fb67 \ + --hash=sha256:33d430a23b661629661f1fe8395be2004006bc792bb9fc7c53911d661b69dd7e \ + --hash=sha256:385992d5ecf1a93cb85adff2f73e0402dd9ac29b71b7006d342cc920816e6f32 \ + --hash=sha256:3d45864693351c15531f7e76f545ec35000d50848daa833cead96edae1665559 \ + --hash=sha256:40005cbd383438aecf715a7b47fe1e3dcbc889a36461ed416bdec07e0ef1db66 \ + --hash=sha256:4035d6945cb961c90c3e1c1ca2feb526175bcfed44dfb1cc77db4fdced060d3e \ + --hash=sha256:445d6f4fc3bd9fc2bf0416164454f90acab8858cd5a041403d7a11e3356980e8 \ + --hash=sha256:48c9ec56579d4ba1c88f42302194b8ae2350265cb60c64b7b9a88dcb7fbde309 \ + --hash=sha256:4a5059bd585e9e9504ef9c07e4bc15b0a621ba20504388875d66b8b30a5c4d18 \ + --hash=sha256:4a6e4b0e0531223f53bad07ddf733af490ba2b8367f62342b92b39b29f72735a \ + --hash=sha256:4b870b6f632fc74941cadc2a0f3064ed8409e6f8ee226cdfd2a85ae50473aa94 \ + --hash=sha256:50fd2d9b36938d4dcecbd684777dd12a407add4f9f934f235c66372e630772b0 \ + --hash=sha256:53e22e4460f0245b468ee645156a4f84d0fc35a12d9ba79bd7d79bdcd2f9629d \ + --hash=sha256:586a011f77f8a2da4b888774174cd266e69e917a67ba072c7fc0e91878178a80 \ + --hash=sha256:59597cd6315d3439ed4b074febe84a439c33928dd34396941b4d377692eca810 \ + --hash=sha256:59e4b729eae1a0919f9e4c0fc635fbcc9db59c74ad98d684f4877be3d2607dd6 \ + --hash=sha256:5a0f874ee8c0bc820e649c900243c6d1e6dc435b81da1492046716f14f1a2a96 \ + --hash=sha256:5ac2b7d341dc1bd102be849d6dd33b09701223a851105b2754339e390be0627a \ + --hash=sha256:5e3f4468b8c6fd2fd33c218bbd0a1559e6a6fcf185af8bb0cc43f3b5bfb7d636 \ + --hash=sha256:6164d4e2a82f9ebd7752a06bd6c504791bedc6418c0196cd0a23afb7f3e12b2d \ + --hash=sha256:6893544e06bae009916a5658ce7207e26ed17385149f35a3125f5259951f1bbe \ + --hash=sha256:690a17db524ee6ac4a27efc5406530dd90e7a7a69d8360235323d0e5dafb8f5b \ + --hash=sha256:6b8d0c153f07a953636b9cdb3011b733cadd4178123ef728ccc4d5969e67f3c2 \ + --hash=sha256:72a28979cc667e5f82ef433db009184e7ac277844eea0f7f4d254b789517941d \ + --hash=sha256:72aa4746993a28c841e05889f3f1b1e5d14df8d3daa157d6001a34c98102b393 \ + --hash=sha256:732176f5427e72fa2325b05c58ad0b45af341c459910d766f814b0584ac1f9ac \ + --hash=sha256:7918a1b83dd70dc04ab5ed24c78ae833ae8ea228cef84e08597c408286edc926 \ + --hash=sha256:7923470d6056a9590247ff729c05e8e0f06bbd4efa6569c916943cb2d9b68b91 \ + --hash=sha256:7d76a8a1fc9da08296462a18f16620ba73bcbf5909e42383b253ef34d9d5141e \ + --hash=sha256:811040d7f3dd9c55eb0d8b00b5dcb7fd9ae1761c454f444fd9f37fe5ec57143a \ + --hash=sha256:821a88b878b6589c5068f4cc2cfeb2c64e343a196bc9d7ac68ea8c2a776acd46 \ + --hash=sha256:84397d3f750d153ebd7f958efaa92b45fea170200e2df5e0e1fd4d85b7e3f58a \ + --hash=sha256:844671c9c1150fcdac46d43198364034b961bd520f2c4fdaabfc7c7d7138a2dd \ + --hash=sha256:890a09cb0a62198bff92eda98b2b507305dd3abf974778bae3287f98b48907d3 \ + --hash=sha256:8f08276466fedb9e36e5193a96cb944928301152879ec20c2d723d1031cd4ddd \ + --hash=sha256:8f5e06df94fff8c4c85f98c6487f6636848e1dc85ce17ab7d1931df4a081f657 \ + --hash=sha256:921473a93bcea4d00295799ab929522fc650e85c6b9f27ae1e6bb32a790ea7d3 \ + --hash=sha256:941b3f1b2392f0bcd6abf1bc7a322787d6db4e7457be6d1ffd3a693426a755f2 \ + --hash=sha256:9b320677521aabf666cdd6e99baee4fb5ac3996349c3b7f8e7c4eee1c00dfe3a \ + --hash=sha256:9c3efee9bb53cbe7b285760c81f28ac80dc15fa48b5fe7e58b52752e642553f1 \ + --hash=sha256:9fda3e50abad8d0f48df621cf75adc73c63f7243cbe0e3b2171392b445401550 \ + --hash=sha256:a4c5da39bca4f7979eefcbb36efea04471cd68db2d38fcbb4ee2c6d440699833 \ + --hash=sha256:a56c18f21ac98209da9c54ae3ebb3b6f6e772038681d6cb43b8d53da3b09ee81 \ + --hash=sha256:a623564d810e7a953ff1357f7799c14bc9beeab699aacc8b7ab7822da1e952b8 \ + --hash=sha256:a8906669b03c63266b6a7693d1f487b02647beb12adea20f8840c1a087e2dfb5 \ + --hash=sha256:a99757ad7fe5c8a2bb44829fc57ced11253e10f462233c1255fe03888e06bc19 \ + --hash=sha256:aa7d032c1d84726aa9edeb6accf079b4caa87151ca9fabacef31fa028186c66d \ + --hash=sha256:aad5524c2aedaf9aa14ef1bc9327f8abd915699dea457d339bebbe2f0d218f86 \ + --hash=sha256:afb1c70ec1e594a547f38ad6bf5e3d60304ce7539e677c1429eebab115bce56e \ + --hash=sha256:b6365703e8cf1644b82104cdd05270d1a9f043119a168d66c55684b1b557d008 \ + --hash=sha256:b8b942d8b3ce765dbc3b1dad0a944712a89b5de290ce8f72681e22b3c55f3cc8 \ + --hash=sha256:ba73a14e9c8f9ac409863543cde3290dba39098fc261f717dc337ea72d3ebad2 \ + --hash=sha256:bd7b68fd2e79d59d86dcbc1ccd6e2ca09c505343445daaa4e07f43c8a9cc34da \ + --hash=sha256:bd966475e963122ee0a7118ec9024388c602d12ac72860f6eea119a3928be053 \ + --hash=sha256:c2ce65bdeaf0a386bb3b533a28de3994e8e13b464ac15e1e67e4603dd88787fa \ + --hash=sha256:c64d5abe91a3dfe5ff250c6bb267ef00dbc01501518225b45a5f9def458f31fb \ + --hash=sha256:c8c143a65ce3ca42e54d8e6fcaf465b6b672ed1c6c90022794a802fb93105d22 \ + --hash=sha256:cd46f30e758629c3ee91713529cfbe107ac50d27110fdcc326a42ce2acf4dafc \ + --hash=sha256:ced02e3bd55e16e89c08bbc8128cff0884d96e7f7a5633d3dc366b6d95fcd1d6 \ + --hash=sha256:cf123225945aa58b3057d0fba67e8061c62d14cc8a4202630f8057df70189051 \ + --hash=sha256:d19e57f888b00cd04fc38f5e18d0efbd91ccba2d45039453ab2236e6eec48d4d \ + --hash=sha256:d1cbe6b5be3b9b698d8cc4ee4dee7e017ad655e83361cd0ea8e653d65e469468 \ + --hash=sha256:db09e6c18977a33fea26fe67b7a842f706c67cf8bda1450974d0ae0dd63570df \ + --hash=sha256:de2f780c3242ea114dd01f84848655356af4dd561501896c751d7b885ea6d3a1 \ + --hash=sha256:e2205a81f815b5bb17e46e74cc946c575b484e5f0acfcb805fb252d67e22938d \ + --hash=sha256:e645c757183ee0e13f0bbe56508598e2d9cd42b8abc6c0599d53b0d0b8dd1479 \ + --hash=sha256:f2910502f718828cecc8beff004917dcf577fc5f8f5dd40ffb1ea7612124547b \ + --hash=sha256:f764e4dfafa288e2eba21231f455d209f4709436baeebb05bdecfb5d8ddc3d35 \ + --hash=sha256:f83fe9e10f9d0b6cf580564d4d23845b9d692e4c91bd8be57733958e4c602956 \ + --hash=sha256:fb2b495dd94b02de8215625948132cc2ea360ae84fe6634cd19b6567709c8ae2 \ + --hash=sha256:fee0016cc35a8a91e8cc9312ab26a6fe638d484131a7afa79e1ce6165328a135 +requests==2.31.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 +six==1.16.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 +typing-extensions==4.4.0 ; python_full_version >= "3.7.2" and python_version < "3.10" \ + --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ + --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e +urllib3==1.26.13 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc \ + --hash=sha256:c083dd0dce68dbfbe1129d5271cb90f9447dea7d52097c6e0126120c521ddea8 +watchdog==3.0.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ + --hash=sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a \ + --hash=sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100 \ + --hash=sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8 \ + --hash=sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc \ + --hash=sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae \ + --hash=sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41 \ + --hash=sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0 \ + --hash=sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f \ + --hash=sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c \ + --hash=sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9 \ + --hash=sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3 \ + --hash=sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709 \ + --hash=sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83 \ + --hash=sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759 \ + --hash=sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9 \ + --hash=sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3 \ + --hash=sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7 \ + --hash=sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f \ + --hash=sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346 \ + --hash=sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674 \ + --hash=sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397 \ + --hash=sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96 \ + --hash=sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d \ + --hash=sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a \ + --hash=sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64 \ + --hash=sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44 \ + --hash=sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33 +zipp==3.11.0 ; python_full_version >= "3.7.2" and python_version < "3.10" \ + --hash=sha256:83a28fcb75844b5c0cdaf5aa4003c2d728c77e05f5aeabe8e95e56727005fbaa \ + --hash=sha256:a7a22e05929290a67401440b39690ae6563279bced5f314609d9d03798f56766 diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..89a345e5 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,17 @@ +site_name: AWS RDK Documentation +theme: + name: material + palette: + scheme: default + primary: orange +plugins: + - search + # TODO: Enable this if/when docstrings are expanded in the core rdk module. + # - mkdocstrings: + # handlers: + # python: + # paths: [rdk] +markdown_extensions: + - markdown_include.include: + base_path: . +docs_dir: docs diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..f24e6c0c --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1903 @@ +# This file is automatically @generated by Poetry and should not be changed by hand. + +[[package]] +name = "bandit" +version = "1.7.5" +description = "Security oriented static analyser for python code." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "bandit-1.7.5-py3-none-any.whl", hash = "sha256:75665181dc1e0096369112541a056c59d1c5f66f9bb74a8d686c3c362b83f549"}, + {file = "bandit-1.7.5.tar.gz", hash = "sha256:bdfc739baa03b880c2d15d0431b31c658ffc348e907fe197e54e0389dd59e11e"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.9", markers = "platform_system == \"Windows\""} +GitPython = ">=1.0.1" +PyYAML = ">=5.3.1" +rich = "*" +stevedore = ">=1.20.0" + +[package.extras] +test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)", "tomli (>=1.1.0)"] +toml = ["tomli (>=1.1.0)"] +yaml = ["PyYAML"] + +[[package]] +name = "black" +version = "22.12.0" +description = "The uncompromising code formatter." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, + {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, + {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, + {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, + {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, + {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, + {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, + {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, + {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, + {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, + {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, + {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} +typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} +typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "boto3" +version = "1.26.139" +description = "The AWS SDK for Python" +category = "main" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "boto3-1.26.139-py3-none-any.whl", hash = "sha256:5b61a82f0c1cd006bd109ddf27c93d9b010c4c188fc583ee257ff6f3bb89970d"}, + {file = "boto3-1.26.139.tar.gz", hash = "sha256:fe19d287bc8ede385e1b9136f135ee8f93eab81404ad1445b1a70cabfe3f7087"}, +] + +[package.dependencies] +botocore = ">=1.29.139,<1.30.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.6.0,<0.7.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "boto3-stubs" +version = "1.26.139" +description = "Type annotations for boto3 1.26.139 generated with mypy-boto3-builder 7.14.5" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "boto3-stubs-1.26.139.tar.gz", hash = "sha256:e1eb346e607864182b189a2a352cdd11db56b8157cd8cf445e0548dac5b10147"}, + {file = "boto3_stubs-1.26.139-py3-none-any.whl", hash = "sha256:bf1f40b9f8c490c8c89dadc47b8ebedaaa339e8c35a8e690dfb11c0f34b43836"}, +] + +[package.dependencies] +botocore-stubs = "*" +mypy-boto3-cloudformation = {version = ">=1.26.0,<1.27.0", optional = true, markers = "extra == \"cloudformation\""} +mypy-boto3-config = {version = ">=1.26.0,<1.27.0", optional = true, markers = "extra == \"config\""} +mypy-boto3-iam = {version = ">=1.26.0,<1.27.0", optional = true, markers = "extra == \"iam\""} +mypy-boto3-s3 = {version = ">=1.26.0,<1.27.0", optional = true, markers = "extra == \"s3\""} +mypy-boto3-sts = {version = ">=1.26.0,<1.27.0", optional = true, markers = "extra == \"sts\""} +types-s3transfer = "*" +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} + +[package.extras] +accessanalyzer = ["mypy-boto3-accessanalyzer (>=1.26.0,<1.27.0)"] +account = ["mypy-boto3-account (>=1.26.0,<1.27.0)"] +acm = ["mypy-boto3-acm (>=1.26.0,<1.27.0)"] +acm-pca = ["mypy-boto3-acm-pca (>=1.26.0,<1.27.0)"] +alexaforbusiness = ["mypy-boto3-alexaforbusiness (>=1.26.0,<1.27.0)"] +all = ["mypy-boto3-accessanalyzer (>=1.26.0,<1.27.0)", "mypy-boto3-account (>=1.26.0,<1.27.0)", "mypy-boto3-acm (>=1.26.0,<1.27.0)", "mypy-boto3-acm-pca (>=1.26.0,<1.27.0)", "mypy-boto3-alexaforbusiness (>=1.26.0,<1.27.0)", "mypy-boto3-amp (>=1.26.0,<1.27.0)", "mypy-boto3-amplify (>=1.26.0,<1.27.0)", "mypy-boto3-amplifybackend (>=1.26.0,<1.27.0)", "mypy-boto3-amplifyuibuilder (>=1.26.0,<1.27.0)", "mypy-boto3-apigateway (>=1.26.0,<1.27.0)", "mypy-boto3-apigatewaymanagementapi (>=1.26.0,<1.27.0)", "mypy-boto3-apigatewayv2 (>=1.26.0,<1.27.0)", "mypy-boto3-appconfig (>=1.26.0,<1.27.0)", "mypy-boto3-appconfigdata (>=1.26.0,<1.27.0)", "mypy-boto3-appflow (>=1.26.0,<1.27.0)", "mypy-boto3-appintegrations (>=1.26.0,<1.27.0)", "mypy-boto3-application-autoscaling (>=1.26.0,<1.27.0)", "mypy-boto3-application-insights (>=1.26.0,<1.27.0)", "mypy-boto3-applicationcostprofiler (>=1.26.0,<1.27.0)", "mypy-boto3-appmesh (>=1.26.0,<1.27.0)", "mypy-boto3-apprunner (>=1.26.0,<1.27.0)", "mypy-boto3-appstream (>=1.26.0,<1.27.0)", "mypy-boto3-appsync (>=1.26.0,<1.27.0)", "mypy-boto3-arc-zonal-shift (>=1.26.0,<1.27.0)", "mypy-boto3-athena (>=1.26.0,<1.27.0)", "mypy-boto3-auditmanager (>=1.26.0,<1.27.0)", "mypy-boto3-autoscaling (>=1.26.0,<1.27.0)", "mypy-boto3-autoscaling-plans (>=1.26.0,<1.27.0)", "mypy-boto3-backup (>=1.26.0,<1.27.0)", "mypy-boto3-backup-gateway (>=1.26.0,<1.27.0)", "mypy-boto3-backupstorage (>=1.26.0,<1.27.0)", "mypy-boto3-batch (>=1.26.0,<1.27.0)", "mypy-boto3-billingconductor (>=1.26.0,<1.27.0)", "mypy-boto3-braket (>=1.26.0,<1.27.0)", "mypy-boto3-budgets (>=1.26.0,<1.27.0)", "mypy-boto3-ce (>=1.26.0,<1.27.0)", "mypy-boto3-chime (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-identity (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-meetings (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-messaging (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-voice (>=1.26.0,<1.27.0)", "mypy-boto3-cleanrooms (>=1.26.0,<1.27.0)", "mypy-boto3-cloud9 (>=1.26.0,<1.27.0)", "mypy-boto3-cloudcontrol (>=1.26.0,<1.27.0)", "mypy-boto3-clouddirectory (>=1.26.0,<1.27.0)", "mypy-boto3-cloudformation (>=1.26.0,<1.27.0)", "mypy-boto3-cloudfront (>=1.26.0,<1.27.0)", "mypy-boto3-cloudhsm (>=1.26.0,<1.27.0)", "mypy-boto3-cloudhsmv2 (>=1.26.0,<1.27.0)", "mypy-boto3-cloudsearch (>=1.26.0,<1.27.0)", "mypy-boto3-cloudsearchdomain (>=1.26.0,<1.27.0)", "mypy-boto3-cloudtrail (>=1.26.0,<1.27.0)", "mypy-boto3-cloudtrail-data (>=1.26.0,<1.27.0)", "mypy-boto3-cloudwatch (>=1.26.0,<1.27.0)", "mypy-boto3-codeartifact (>=1.26.0,<1.27.0)", "mypy-boto3-codebuild (>=1.26.0,<1.27.0)", "mypy-boto3-codecatalyst (>=1.26.0,<1.27.0)", "mypy-boto3-codecommit (>=1.26.0,<1.27.0)", "mypy-boto3-codedeploy (>=1.26.0,<1.27.0)", "mypy-boto3-codeguru-reviewer (>=1.26.0,<1.27.0)", "mypy-boto3-codeguruprofiler (>=1.26.0,<1.27.0)", "mypy-boto3-codepipeline (>=1.26.0,<1.27.0)", "mypy-boto3-codestar (>=1.26.0,<1.27.0)", "mypy-boto3-codestar-connections (>=1.26.0,<1.27.0)", "mypy-boto3-codestar-notifications (>=1.26.0,<1.27.0)", "mypy-boto3-cognito-identity (>=1.26.0,<1.27.0)", "mypy-boto3-cognito-idp (>=1.26.0,<1.27.0)", "mypy-boto3-cognito-sync (>=1.26.0,<1.27.0)", "mypy-boto3-comprehend (>=1.26.0,<1.27.0)", "mypy-boto3-comprehendmedical (>=1.26.0,<1.27.0)", "mypy-boto3-compute-optimizer (>=1.26.0,<1.27.0)", "mypy-boto3-config (>=1.26.0,<1.27.0)", "mypy-boto3-connect (>=1.26.0,<1.27.0)", "mypy-boto3-connect-contact-lens (>=1.26.0,<1.27.0)", "mypy-boto3-connectcampaigns (>=1.26.0,<1.27.0)", "mypy-boto3-connectcases (>=1.26.0,<1.27.0)", "mypy-boto3-connectparticipant (>=1.26.0,<1.27.0)", "mypy-boto3-controltower (>=1.26.0,<1.27.0)", "mypy-boto3-cur (>=1.26.0,<1.27.0)", "mypy-boto3-customer-profiles (>=1.26.0,<1.27.0)", "mypy-boto3-databrew (>=1.26.0,<1.27.0)", "mypy-boto3-dataexchange (>=1.26.0,<1.27.0)", "mypy-boto3-datapipeline (>=1.26.0,<1.27.0)", "mypy-boto3-datasync (>=1.26.0,<1.27.0)", "mypy-boto3-dax (>=1.26.0,<1.27.0)", "mypy-boto3-detective (>=1.26.0,<1.27.0)", "mypy-boto3-devicefarm (>=1.26.0,<1.27.0)", "mypy-boto3-devops-guru (>=1.26.0,<1.27.0)", "mypy-boto3-directconnect (>=1.26.0,<1.27.0)", "mypy-boto3-discovery (>=1.26.0,<1.27.0)", "mypy-boto3-dlm (>=1.26.0,<1.27.0)", "mypy-boto3-dms (>=1.26.0,<1.27.0)", "mypy-boto3-docdb (>=1.26.0,<1.27.0)", "mypy-boto3-docdb-elastic (>=1.26.0,<1.27.0)", "mypy-boto3-drs (>=1.26.0,<1.27.0)", "mypy-boto3-ds (>=1.26.0,<1.27.0)", "mypy-boto3-dynamodb (>=1.26.0,<1.27.0)", "mypy-boto3-dynamodbstreams (>=1.26.0,<1.27.0)", "mypy-boto3-ebs (>=1.26.0,<1.27.0)", "mypy-boto3-ec2 (>=1.26.0,<1.27.0)", "mypy-boto3-ec2-instance-connect (>=1.26.0,<1.27.0)", "mypy-boto3-ecr (>=1.26.0,<1.27.0)", "mypy-boto3-ecr-public (>=1.26.0,<1.27.0)", "mypy-boto3-ecs (>=1.26.0,<1.27.0)", "mypy-boto3-efs (>=1.26.0,<1.27.0)", "mypy-boto3-eks (>=1.26.0,<1.27.0)", "mypy-boto3-elastic-inference (>=1.26.0,<1.27.0)", "mypy-boto3-elasticache (>=1.26.0,<1.27.0)", "mypy-boto3-elasticbeanstalk (>=1.26.0,<1.27.0)", "mypy-boto3-elastictranscoder (>=1.26.0,<1.27.0)", "mypy-boto3-elb (>=1.26.0,<1.27.0)", "mypy-boto3-elbv2 (>=1.26.0,<1.27.0)", "mypy-boto3-emr (>=1.26.0,<1.27.0)", "mypy-boto3-emr-containers (>=1.26.0,<1.27.0)", "mypy-boto3-emr-serverless (>=1.26.0,<1.27.0)", "mypy-boto3-es (>=1.26.0,<1.27.0)", "mypy-boto3-events (>=1.26.0,<1.27.0)", "mypy-boto3-evidently (>=1.26.0,<1.27.0)", "mypy-boto3-finspace (>=1.26.0,<1.27.0)", "mypy-boto3-finspace-data (>=1.26.0,<1.27.0)", "mypy-boto3-firehose (>=1.26.0,<1.27.0)", "mypy-boto3-fis (>=1.26.0,<1.27.0)", "mypy-boto3-fms (>=1.26.0,<1.27.0)", "mypy-boto3-forecast (>=1.26.0,<1.27.0)", "mypy-boto3-forecastquery (>=1.26.0,<1.27.0)", "mypy-boto3-frauddetector (>=1.26.0,<1.27.0)", "mypy-boto3-fsx (>=1.26.0,<1.27.0)", "mypy-boto3-gamelift (>=1.26.0,<1.27.0)", "mypy-boto3-gamesparks (>=1.26.0,<1.27.0)", "mypy-boto3-glacier (>=1.26.0,<1.27.0)", "mypy-boto3-globalaccelerator (>=1.26.0,<1.27.0)", "mypy-boto3-glue (>=1.26.0,<1.27.0)", "mypy-boto3-grafana (>=1.26.0,<1.27.0)", "mypy-boto3-greengrass (>=1.26.0,<1.27.0)", "mypy-boto3-greengrassv2 (>=1.26.0,<1.27.0)", "mypy-boto3-groundstation (>=1.26.0,<1.27.0)", "mypy-boto3-guardduty (>=1.26.0,<1.27.0)", "mypy-boto3-health (>=1.26.0,<1.27.0)", "mypy-boto3-healthlake (>=1.26.0,<1.27.0)", "mypy-boto3-honeycode (>=1.26.0,<1.27.0)", "mypy-boto3-iam (>=1.26.0,<1.27.0)", "mypy-boto3-identitystore (>=1.26.0,<1.27.0)", "mypy-boto3-imagebuilder (>=1.26.0,<1.27.0)", "mypy-boto3-importexport (>=1.26.0,<1.27.0)", "mypy-boto3-inspector (>=1.26.0,<1.27.0)", "mypy-boto3-inspector2 (>=1.26.0,<1.27.0)", "mypy-boto3-internetmonitor (>=1.26.0,<1.27.0)", "mypy-boto3-iot (>=1.26.0,<1.27.0)", "mypy-boto3-iot-data (>=1.26.0,<1.27.0)", "mypy-boto3-iot-jobs-data (>=1.26.0,<1.27.0)", "mypy-boto3-iot-roborunner (>=1.26.0,<1.27.0)", "mypy-boto3-iot1click-devices (>=1.26.0,<1.27.0)", "mypy-boto3-iot1click-projects (>=1.26.0,<1.27.0)", "mypy-boto3-iotanalytics (>=1.26.0,<1.27.0)", "mypy-boto3-iotdeviceadvisor (>=1.26.0,<1.27.0)", "mypy-boto3-iotevents (>=1.26.0,<1.27.0)", "mypy-boto3-iotevents-data (>=1.26.0,<1.27.0)", "mypy-boto3-iotfleethub (>=1.26.0,<1.27.0)", "mypy-boto3-iotfleetwise (>=1.26.0,<1.27.0)", "mypy-boto3-iotsecuretunneling (>=1.26.0,<1.27.0)", "mypy-boto3-iotsitewise (>=1.26.0,<1.27.0)", "mypy-boto3-iotthingsgraph (>=1.26.0,<1.27.0)", "mypy-boto3-iottwinmaker (>=1.26.0,<1.27.0)", "mypy-boto3-iotwireless (>=1.26.0,<1.27.0)", "mypy-boto3-ivs (>=1.26.0,<1.27.0)", "mypy-boto3-ivs-realtime (>=1.26.0,<1.27.0)", "mypy-boto3-ivschat (>=1.26.0,<1.27.0)", "mypy-boto3-kafka (>=1.26.0,<1.27.0)", "mypy-boto3-kafkaconnect (>=1.26.0,<1.27.0)", "mypy-boto3-kendra (>=1.26.0,<1.27.0)", "mypy-boto3-kendra-ranking (>=1.26.0,<1.27.0)", "mypy-boto3-keyspaces (>=1.26.0,<1.27.0)", "mypy-boto3-kinesis (>=1.26.0,<1.27.0)", "mypy-boto3-kinesis-video-archived-media (>=1.26.0,<1.27.0)", "mypy-boto3-kinesis-video-media (>=1.26.0,<1.27.0)", "mypy-boto3-kinesis-video-signaling (>=1.26.0,<1.27.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.26.0,<1.27.0)", "mypy-boto3-kinesisanalytics (>=1.26.0,<1.27.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.26.0,<1.27.0)", "mypy-boto3-kinesisvideo (>=1.26.0,<1.27.0)", "mypy-boto3-kms (>=1.26.0,<1.27.0)", "mypy-boto3-lakeformation (>=1.26.0,<1.27.0)", "mypy-boto3-lambda (>=1.26.0,<1.27.0)", "mypy-boto3-lex-models (>=1.26.0,<1.27.0)", "mypy-boto3-lex-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-lexv2-models (>=1.26.0,<1.27.0)", "mypy-boto3-lexv2-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-license-manager (>=1.26.0,<1.27.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.26.0,<1.27.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.26.0,<1.27.0)", "mypy-boto3-lightsail (>=1.26.0,<1.27.0)", "mypy-boto3-location (>=1.26.0,<1.27.0)", "mypy-boto3-logs (>=1.26.0,<1.27.0)", "mypy-boto3-lookoutequipment (>=1.26.0,<1.27.0)", "mypy-boto3-lookoutmetrics (>=1.26.0,<1.27.0)", "mypy-boto3-lookoutvision (>=1.26.0,<1.27.0)", "mypy-boto3-m2 (>=1.26.0,<1.27.0)", "mypy-boto3-machinelearning (>=1.26.0,<1.27.0)", "mypy-boto3-macie (>=1.26.0,<1.27.0)", "mypy-boto3-macie2 (>=1.26.0,<1.27.0)", "mypy-boto3-managedblockchain (>=1.26.0,<1.27.0)", "mypy-boto3-marketplace-catalog (>=1.26.0,<1.27.0)", "mypy-boto3-marketplace-entitlement (>=1.26.0,<1.27.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.26.0,<1.27.0)", "mypy-boto3-mediaconnect (>=1.26.0,<1.27.0)", "mypy-boto3-mediaconvert (>=1.26.0,<1.27.0)", "mypy-boto3-medialive (>=1.26.0,<1.27.0)", "mypy-boto3-mediapackage (>=1.26.0,<1.27.0)", "mypy-boto3-mediapackage-vod (>=1.26.0,<1.27.0)", "mypy-boto3-mediapackagev2 (>=1.26.0,<1.27.0)", "mypy-boto3-mediastore (>=1.26.0,<1.27.0)", "mypy-boto3-mediastore-data (>=1.26.0,<1.27.0)", "mypy-boto3-mediatailor (>=1.26.0,<1.27.0)", "mypy-boto3-memorydb (>=1.26.0,<1.27.0)", "mypy-boto3-meteringmarketplace (>=1.26.0,<1.27.0)", "mypy-boto3-mgh (>=1.26.0,<1.27.0)", "mypy-boto3-mgn (>=1.26.0,<1.27.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.26.0,<1.27.0)", "mypy-boto3-migrationhub-config (>=1.26.0,<1.27.0)", "mypy-boto3-migrationhuborchestrator (>=1.26.0,<1.27.0)", "mypy-boto3-migrationhubstrategy (>=1.26.0,<1.27.0)", "mypy-boto3-mobile (>=1.26.0,<1.27.0)", "mypy-boto3-mq (>=1.26.0,<1.27.0)", "mypy-boto3-mturk (>=1.26.0,<1.27.0)", "mypy-boto3-mwaa (>=1.26.0,<1.27.0)", "mypy-boto3-neptune (>=1.26.0,<1.27.0)", "mypy-boto3-network-firewall (>=1.26.0,<1.27.0)", "mypy-boto3-networkmanager (>=1.26.0,<1.27.0)", "mypy-boto3-nimble (>=1.26.0,<1.27.0)", "mypy-boto3-oam (>=1.26.0,<1.27.0)", "mypy-boto3-omics (>=1.26.0,<1.27.0)", "mypy-boto3-opensearch (>=1.26.0,<1.27.0)", "mypy-boto3-opensearchserverless (>=1.26.0,<1.27.0)", "mypy-boto3-opsworks (>=1.26.0,<1.27.0)", "mypy-boto3-opsworkscm (>=1.26.0,<1.27.0)", "mypy-boto3-organizations (>=1.26.0,<1.27.0)", "mypy-boto3-osis (>=1.26.0,<1.27.0)", "mypy-boto3-outposts (>=1.26.0,<1.27.0)", "mypy-boto3-panorama (>=1.26.0,<1.27.0)", "mypy-boto3-personalize (>=1.26.0,<1.27.0)", "mypy-boto3-personalize-events (>=1.26.0,<1.27.0)", "mypy-boto3-personalize-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-pi (>=1.26.0,<1.27.0)", "mypy-boto3-pinpoint (>=1.26.0,<1.27.0)", "mypy-boto3-pinpoint-email (>=1.26.0,<1.27.0)", "mypy-boto3-pinpoint-sms-voice (>=1.26.0,<1.27.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.26.0,<1.27.0)", "mypy-boto3-pipes (>=1.26.0,<1.27.0)", "mypy-boto3-polly (>=1.26.0,<1.27.0)", "mypy-boto3-pricing (>=1.26.0,<1.27.0)", "mypy-boto3-privatenetworks (>=1.26.0,<1.27.0)", "mypy-boto3-proton (>=1.26.0,<1.27.0)", "mypy-boto3-qldb (>=1.26.0,<1.27.0)", "mypy-boto3-qldb-session (>=1.26.0,<1.27.0)", "mypy-boto3-quicksight (>=1.26.0,<1.27.0)", "mypy-boto3-ram (>=1.26.0,<1.27.0)", "mypy-boto3-rbin (>=1.26.0,<1.27.0)", "mypy-boto3-rds (>=1.26.0,<1.27.0)", "mypy-boto3-rds-data (>=1.26.0,<1.27.0)", "mypy-boto3-redshift (>=1.26.0,<1.27.0)", "mypy-boto3-redshift-data (>=1.26.0,<1.27.0)", "mypy-boto3-redshift-serverless (>=1.26.0,<1.27.0)", "mypy-boto3-rekognition (>=1.26.0,<1.27.0)", "mypy-boto3-resiliencehub (>=1.26.0,<1.27.0)", "mypy-boto3-resource-explorer-2 (>=1.26.0,<1.27.0)", "mypy-boto3-resource-groups (>=1.26.0,<1.27.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.26.0,<1.27.0)", "mypy-boto3-robomaker (>=1.26.0,<1.27.0)", "mypy-boto3-rolesanywhere (>=1.26.0,<1.27.0)", "mypy-boto3-route53 (>=1.26.0,<1.27.0)", "mypy-boto3-route53-recovery-cluster (>=1.26.0,<1.27.0)", "mypy-boto3-route53-recovery-control-config (>=1.26.0,<1.27.0)", "mypy-boto3-route53-recovery-readiness (>=1.26.0,<1.27.0)", "mypy-boto3-route53domains (>=1.26.0,<1.27.0)", "mypy-boto3-route53resolver (>=1.26.0,<1.27.0)", "mypy-boto3-rum (>=1.26.0,<1.27.0)", "mypy-boto3-s3 (>=1.26.0,<1.27.0)", "mypy-boto3-s3control (>=1.26.0,<1.27.0)", "mypy-boto3-s3outposts (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-edge (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-geospatial (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-metrics (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-savingsplans (>=1.26.0,<1.27.0)", "mypy-boto3-scheduler (>=1.26.0,<1.27.0)", "mypy-boto3-schemas (>=1.26.0,<1.27.0)", "mypy-boto3-sdb (>=1.26.0,<1.27.0)", "mypy-boto3-secretsmanager (>=1.26.0,<1.27.0)", "mypy-boto3-securityhub (>=1.26.0,<1.27.0)", "mypy-boto3-securitylake (>=1.26.0,<1.27.0)", "mypy-boto3-serverlessrepo (>=1.26.0,<1.27.0)", "mypy-boto3-service-quotas (>=1.26.0,<1.27.0)", "mypy-boto3-servicecatalog (>=1.26.0,<1.27.0)", "mypy-boto3-servicecatalog-appregistry (>=1.26.0,<1.27.0)", "mypy-boto3-servicediscovery (>=1.26.0,<1.27.0)", "mypy-boto3-ses (>=1.26.0,<1.27.0)", "mypy-boto3-sesv2 (>=1.26.0,<1.27.0)", "mypy-boto3-shield (>=1.26.0,<1.27.0)", "mypy-boto3-signer (>=1.26.0,<1.27.0)", "mypy-boto3-simspaceweaver (>=1.26.0,<1.27.0)", "mypy-boto3-sms (>=1.26.0,<1.27.0)", "mypy-boto3-sms-voice (>=1.26.0,<1.27.0)", "mypy-boto3-snow-device-management (>=1.26.0,<1.27.0)", "mypy-boto3-snowball (>=1.26.0,<1.27.0)", "mypy-boto3-sns (>=1.26.0,<1.27.0)", "mypy-boto3-sqs (>=1.26.0,<1.27.0)", "mypy-boto3-ssm (>=1.26.0,<1.27.0)", "mypy-boto3-ssm-contacts (>=1.26.0,<1.27.0)", "mypy-boto3-ssm-incidents (>=1.26.0,<1.27.0)", "mypy-boto3-ssm-sap (>=1.26.0,<1.27.0)", "mypy-boto3-sso (>=1.26.0,<1.27.0)", "mypy-boto3-sso-admin (>=1.26.0,<1.27.0)", "mypy-boto3-sso-oidc (>=1.26.0,<1.27.0)", "mypy-boto3-stepfunctions (>=1.26.0,<1.27.0)", "mypy-boto3-storagegateway (>=1.26.0,<1.27.0)", "mypy-boto3-sts (>=1.26.0,<1.27.0)", "mypy-boto3-support (>=1.26.0,<1.27.0)", "mypy-boto3-support-app (>=1.26.0,<1.27.0)", "mypy-boto3-swf (>=1.26.0,<1.27.0)", "mypy-boto3-synthetics (>=1.26.0,<1.27.0)", "mypy-boto3-textract (>=1.26.0,<1.27.0)", "mypy-boto3-timestream-query (>=1.26.0,<1.27.0)", "mypy-boto3-timestream-write (>=1.26.0,<1.27.0)", "mypy-boto3-tnb (>=1.26.0,<1.27.0)", "mypy-boto3-transcribe (>=1.26.0,<1.27.0)", "mypy-boto3-transfer (>=1.26.0,<1.27.0)", "mypy-boto3-translate (>=1.26.0,<1.27.0)", "mypy-boto3-voice-id (>=1.26.0,<1.27.0)", "mypy-boto3-vpc-lattice (>=1.26.0,<1.27.0)", "mypy-boto3-waf (>=1.26.0,<1.27.0)", "mypy-boto3-waf-regional (>=1.26.0,<1.27.0)", "mypy-boto3-wafv2 (>=1.26.0,<1.27.0)", "mypy-boto3-wellarchitected (>=1.26.0,<1.27.0)", "mypy-boto3-wisdom (>=1.26.0,<1.27.0)", "mypy-boto3-workdocs (>=1.26.0,<1.27.0)", "mypy-boto3-worklink (>=1.26.0,<1.27.0)", "mypy-boto3-workmail (>=1.26.0,<1.27.0)", "mypy-boto3-workmailmessageflow (>=1.26.0,<1.27.0)", "mypy-boto3-workspaces (>=1.26.0,<1.27.0)", "mypy-boto3-workspaces-web (>=1.26.0,<1.27.0)", "mypy-boto3-xray (>=1.26.0,<1.27.0)"] +amp = ["mypy-boto3-amp (>=1.26.0,<1.27.0)"] +amplify = ["mypy-boto3-amplify (>=1.26.0,<1.27.0)"] +amplifybackend = ["mypy-boto3-amplifybackend (>=1.26.0,<1.27.0)"] +amplifyuibuilder = ["mypy-boto3-amplifyuibuilder (>=1.26.0,<1.27.0)"] +apigateway = ["mypy-boto3-apigateway (>=1.26.0,<1.27.0)"] +apigatewaymanagementapi = ["mypy-boto3-apigatewaymanagementapi (>=1.26.0,<1.27.0)"] +apigatewayv2 = ["mypy-boto3-apigatewayv2 (>=1.26.0,<1.27.0)"] +appconfig = ["mypy-boto3-appconfig (>=1.26.0,<1.27.0)"] +appconfigdata = ["mypy-boto3-appconfigdata (>=1.26.0,<1.27.0)"] +appflow = ["mypy-boto3-appflow (>=1.26.0,<1.27.0)"] +appintegrations = ["mypy-boto3-appintegrations (>=1.26.0,<1.27.0)"] +application-autoscaling = ["mypy-boto3-application-autoscaling (>=1.26.0,<1.27.0)"] +application-insights = ["mypy-boto3-application-insights (>=1.26.0,<1.27.0)"] +applicationcostprofiler = ["mypy-boto3-applicationcostprofiler (>=1.26.0,<1.27.0)"] +appmesh = ["mypy-boto3-appmesh (>=1.26.0,<1.27.0)"] +apprunner = ["mypy-boto3-apprunner (>=1.26.0,<1.27.0)"] +appstream = ["mypy-boto3-appstream (>=1.26.0,<1.27.0)"] +appsync = ["mypy-boto3-appsync (>=1.26.0,<1.27.0)"] +arc-zonal-shift = ["mypy-boto3-arc-zonal-shift (>=1.26.0,<1.27.0)"] +athena = ["mypy-boto3-athena (>=1.26.0,<1.27.0)"] +auditmanager = ["mypy-boto3-auditmanager (>=1.26.0,<1.27.0)"] +autoscaling = ["mypy-boto3-autoscaling (>=1.26.0,<1.27.0)"] +autoscaling-plans = ["mypy-boto3-autoscaling-plans (>=1.26.0,<1.27.0)"] +backup = ["mypy-boto3-backup (>=1.26.0,<1.27.0)"] +backup-gateway = ["mypy-boto3-backup-gateway (>=1.26.0,<1.27.0)"] +backupstorage = ["mypy-boto3-backupstorage (>=1.26.0,<1.27.0)"] +batch = ["mypy-boto3-batch (>=1.26.0,<1.27.0)"] +billingconductor = ["mypy-boto3-billingconductor (>=1.26.0,<1.27.0)"] +boto3 = ["boto3 (==1.26.139)", "botocore (==1.29.139)"] +braket = ["mypy-boto3-braket (>=1.26.0,<1.27.0)"] +budgets = ["mypy-boto3-budgets (>=1.26.0,<1.27.0)"] +ce = ["mypy-boto3-ce (>=1.26.0,<1.27.0)"] +chime = ["mypy-boto3-chime (>=1.26.0,<1.27.0)"] +chime-sdk-identity = ["mypy-boto3-chime-sdk-identity (>=1.26.0,<1.27.0)"] +chime-sdk-media-pipelines = ["mypy-boto3-chime-sdk-media-pipelines (>=1.26.0,<1.27.0)"] +chime-sdk-meetings = ["mypy-boto3-chime-sdk-meetings (>=1.26.0,<1.27.0)"] +chime-sdk-messaging = ["mypy-boto3-chime-sdk-messaging (>=1.26.0,<1.27.0)"] +chime-sdk-voice = ["mypy-boto3-chime-sdk-voice (>=1.26.0,<1.27.0)"] +cleanrooms = ["mypy-boto3-cleanrooms (>=1.26.0,<1.27.0)"] +cloud9 = ["mypy-boto3-cloud9 (>=1.26.0,<1.27.0)"] +cloudcontrol = ["mypy-boto3-cloudcontrol (>=1.26.0,<1.27.0)"] +clouddirectory = ["mypy-boto3-clouddirectory (>=1.26.0,<1.27.0)"] +cloudformation = ["mypy-boto3-cloudformation (>=1.26.0,<1.27.0)"] +cloudfront = ["mypy-boto3-cloudfront (>=1.26.0,<1.27.0)"] +cloudhsm = ["mypy-boto3-cloudhsm (>=1.26.0,<1.27.0)"] +cloudhsmv2 = ["mypy-boto3-cloudhsmv2 (>=1.26.0,<1.27.0)"] +cloudsearch = ["mypy-boto3-cloudsearch (>=1.26.0,<1.27.0)"] +cloudsearchdomain = ["mypy-boto3-cloudsearchdomain (>=1.26.0,<1.27.0)"] +cloudtrail = ["mypy-boto3-cloudtrail (>=1.26.0,<1.27.0)"] +cloudtrail-data = ["mypy-boto3-cloudtrail-data (>=1.26.0,<1.27.0)"] +cloudwatch = ["mypy-boto3-cloudwatch (>=1.26.0,<1.27.0)"] +codeartifact = ["mypy-boto3-codeartifact (>=1.26.0,<1.27.0)"] +codebuild = ["mypy-boto3-codebuild (>=1.26.0,<1.27.0)"] +codecatalyst = ["mypy-boto3-codecatalyst (>=1.26.0,<1.27.0)"] +codecommit = ["mypy-boto3-codecommit (>=1.26.0,<1.27.0)"] +codedeploy = ["mypy-boto3-codedeploy (>=1.26.0,<1.27.0)"] +codeguru-reviewer = ["mypy-boto3-codeguru-reviewer (>=1.26.0,<1.27.0)"] +codeguruprofiler = ["mypy-boto3-codeguruprofiler (>=1.26.0,<1.27.0)"] +codepipeline = ["mypy-boto3-codepipeline (>=1.26.0,<1.27.0)"] +codestar = ["mypy-boto3-codestar (>=1.26.0,<1.27.0)"] +codestar-connections = ["mypy-boto3-codestar-connections (>=1.26.0,<1.27.0)"] +codestar-notifications = ["mypy-boto3-codestar-notifications (>=1.26.0,<1.27.0)"] +cognito-identity = ["mypy-boto3-cognito-identity (>=1.26.0,<1.27.0)"] +cognito-idp = ["mypy-boto3-cognito-idp (>=1.26.0,<1.27.0)"] +cognito-sync = ["mypy-boto3-cognito-sync (>=1.26.0,<1.27.0)"] +comprehend = ["mypy-boto3-comprehend (>=1.26.0,<1.27.0)"] +comprehendmedical = ["mypy-boto3-comprehendmedical (>=1.26.0,<1.27.0)"] +compute-optimizer = ["mypy-boto3-compute-optimizer (>=1.26.0,<1.27.0)"] +config = ["mypy-boto3-config (>=1.26.0,<1.27.0)"] +connect = ["mypy-boto3-connect (>=1.26.0,<1.27.0)"] +connect-contact-lens = ["mypy-boto3-connect-contact-lens (>=1.26.0,<1.27.0)"] +connectcampaigns = ["mypy-boto3-connectcampaigns (>=1.26.0,<1.27.0)"] +connectcases = ["mypy-boto3-connectcases (>=1.26.0,<1.27.0)"] +connectparticipant = ["mypy-boto3-connectparticipant (>=1.26.0,<1.27.0)"] +controltower = ["mypy-boto3-controltower (>=1.26.0,<1.27.0)"] +cur = ["mypy-boto3-cur (>=1.26.0,<1.27.0)"] +customer-profiles = ["mypy-boto3-customer-profiles (>=1.26.0,<1.27.0)"] +databrew = ["mypy-boto3-databrew (>=1.26.0,<1.27.0)"] +dataexchange = ["mypy-boto3-dataexchange (>=1.26.0,<1.27.0)"] +datapipeline = ["mypy-boto3-datapipeline (>=1.26.0,<1.27.0)"] +datasync = ["mypy-boto3-datasync (>=1.26.0,<1.27.0)"] +dax = ["mypy-boto3-dax (>=1.26.0,<1.27.0)"] +detective = ["mypy-boto3-detective (>=1.26.0,<1.27.0)"] +devicefarm = ["mypy-boto3-devicefarm (>=1.26.0,<1.27.0)"] +devops-guru = ["mypy-boto3-devops-guru (>=1.26.0,<1.27.0)"] +directconnect = ["mypy-boto3-directconnect (>=1.26.0,<1.27.0)"] +discovery = ["mypy-boto3-discovery (>=1.26.0,<1.27.0)"] +dlm = ["mypy-boto3-dlm (>=1.26.0,<1.27.0)"] +dms = ["mypy-boto3-dms (>=1.26.0,<1.27.0)"] +docdb = ["mypy-boto3-docdb (>=1.26.0,<1.27.0)"] +docdb-elastic = ["mypy-boto3-docdb-elastic (>=1.26.0,<1.27.0)"] +drs = ["mypy-boto3-drs (>=1.26.0,<1.27.0)"] +ds = ["mypy-boto3-ds (>=1.26.0,<1.27.0)"] +dynamodb = ["mypy-boto3-dynamodb (>=1.26.0,<1.27.0)"] +dynamodbstreams = ["mypy-boto3-dynamodbstreams (>=1.26.0,<1.27.0)"] +ebs = ["mypy-boto3-ebs (>=1.26.0,<1.27.0)"] +ec2 = ["mypy-boto3-ec2 (>=1.26.0,<1.27.0)"] +ec2-instance-connect = ["mypy-boto3-ec2-instance-connect (>=1.26.0,<1.27.0)"] +ecr = ["mypy-boto3-ecr (>=1.26.0,<1.27.0)"] +ecr-public = ["mypy-boto3-ecr-public (>=1.26.0,<1.27.0)"] +ecs = ["mypy-boto3-ecs (>=1.26.0,<1.27.0)"] +efs = ["mypy-boto3-efs (>=1.26.0,<1.27.0)"] +eks = ["mypy-boto3-eks (>=1.26.0,<1.27.0)"] +elastic-inference = ["mypy-boto3-elastic-inference (>=1.26.0,<1.27.0)"] +elasticache = ["mypy-boto3-elasticache (>=1.26.0,<1.27.0)"] +elasticbeanstalk = ["mypy-boto3-elasticbeanstalk (>=1.26.0,<1.27.0)"] +elastictranscoder = ["mypy-boto3-elastictranscoder (>=1.26.0,<1.27.0)"] +elb = ["mypy-boto3-elb (>=1.26.0,<1.27.0)"] +elbv2 = ["mypy-boto3-elbv2 (>=1.26.0,<1.27.0)"] +emr = ["mypy-boto3-emr (>=1.26.0,<1.27.0)"] +emr-containers = ["mypy-boto3-emr-containers (>=1.26.0,<1.27.0)"] +emr-serverless = ["mypy-boto3-emr-serverless (>=1.26.0,<1.27.0)"] +es = ["mypy-boto3-es (>=1.26.0,<1.27.0)"] +essential = ["mypy-boto3-cloudformation (>=1.26.0,<1.27.0)", "mypy-boto3-dynamodb (>=1.26.0,<1.27.0)", "mypy-boto3-ec2 (>=1.26.0,<1.27.0)", "mypy-boto3-lambda (>=1.26.0,<1.27.0)", "mypy-boto3-rds (>=1.26.0,<1.27.0)", "mypy-boto3-s3 (>=1.26.0,<1.27.0)", "mypy-boto3-sqs (>=1.26.0,<1.27.0)"] +events = ["mypy-boto3-events (>=1.26.0,<1.27.0)"] +evidently = ["mypy-boto3-evidently (>=1.26.0,<1.27.0)"] +finspace = ["mypy-boto3-finspace (>=1.26.0,<1.27.0)"] +finspace-data = ["mypy-boto3-finspace-data (>=1.26.0,<1.27.0)"] +firehose = ["mypy-boto3-firehose (>=1.26.0,<1.27.0)"] +fis = ["mypy-boto3-fis (>=1.26.0,<1.27.0)"] +fms = ["mypy-boto3-fms (>=1.26.0,<1.27.0)"] +forecast = ["mypy-boto3-forecast (>=1.26.0,<1.27.0)"] +forecastquery = ["mypy-boto3-forecastquery (>=1.26.0,<1.27.0)"] +frauddetector = ["mypy-boto3-frauddetector (>=1.26.0,<1.27.0)"] +fsx = ["mypy-boto3-fsx (>=1.26.0,<1.27.0)"] +gamelift = ["mypy-boto3-gamelift (>=1.26.0,<1.27.0)"] +gamesparks = ["mypy-boto3-gamesparks (>=1.26.0,<1.27.0)"] +glacier = ["mypy-boto3-glacier (>=1.26.0,<1.27.0)"] +globalaccelerator = ["mypy-boto3-globalaccelerator (>=1.26.0,<1.27.0)"] +glue = ["mypy-boto3-glue (>=1.26.0,<1.27.0)"] +grafana = ["mypy-boto3-grafana (>=1.26.0,<1.27.0)"] +greengrass = ["mypy-boto3-greengrass (>=1.26.0,<1.27.0)"] +greengrassv2 = ["mypy-boto3-greengrassv2 (>=1.26.0,<1.27.0)"] +groundstation = ["mypy-boto3-groundstation (>=1.26.0,<1.27.0)"] +guardduty = ["mypy-boto3-guardduty (>=1.26.0,<1.27.0)"] +health = ["mypy-boto3-health (>=1.26.0,<1.27.0)"] +healthlake = ["mypy-boto3-healthlake (>=1.26.0,<1.27.0)"] +honeycode = ["mypy-boto3-honeycode (>=1.26.0,<1.27.0)"] +iam = ["mypy-boto3-iam (>=1.26.0,<1.27.0)"] +identitystore = ["mypy-boto3-identitystore (>=1.26.0,<1.27.0)"] +imagebuilder = ["mypy-boto3-imagebuilder (>=1.26.0,<1.27.0)"] +importexport = ["mypy-boto3-importexport (>=1.26.0,<1.27.0)"] +inspector = ["mypy-boto3-inspector (>=1.26.0,<1.27.0)"] +inspector2 = ["mypy-boto3-inspector2 (>=1.26.0,<1.27.0)"] +internetmonitor = ["mypy-boto3-internetmonitor (>=1.26.0,<1.27.0)"] +iot = ["mypy-boto3-iot (>=1.26.0,<1.27.0)"] +iot-data = ["mypy-boto3-iot-data (>=1.26.0,<1.27.0)"] +iot-jobs-data = ["mypy-boto3-iot-jobs-data (>=1.26.0,<1.27.0)"] +iot-roborunner = ["mypy-boto3-iot-roborunner (>=1.26.0,<1.27.0)"] +iot1click-devices = ["mypy-boto3-iot1click-devices (>=1.26.0,<1.27.0)"] +iot1click-projects = ["mypy-boto3-iot1click-projects (>=1.26.0,<1.27.0)"] +iotanalytics = ["mypy-boto3-iotanalytics (>=1.26.0,<1.27.0)"] +iotdeviceadvisor = ["mypy-boto3-iotdeviceadvisor (>=1.26.0,<1.27.0)"] +iotevents = ["mypy-boto3-iotevents (>=1.26.0,<1.27.0)"] +iotevents-data = ["mypy-boto3-iotevents-data (>=1.26.0,<1.27.0)"] +iotfleethub = ["mypy-boto3-iotfleethub (>=1.26.0,<1.27.0)"] +iotfleetwise = ["mypy-boto3-iotfleetwise (>=1.26.0,<1.27.0)"] +iotsecuretunneling = ["mypy-boto3-iotsecuretunneling (>=1.26.0,<1.27.0)"] +iotsitewise = ["mypy-boto3-iotsitewise (>=1.26.0,<1.27.0)"] +iotthingsgraph = ["mypy-boto3-iotthingsgraph (>=1.26.0,<1.27.0)"] +iottwinmaker = ["mypy-boto3-iottwinmaker (>=1.26.0,<1.27.0)"] +iotwireless = ["mypy-boto3-iotwireless (>=1.26.0,<1.27.0)"] +ivs = ["mypy-boto3-ivs (>=1.26.0,<1.27.0)"] +ivs-realtime = ["mypy-boto3-ivs-realtime (>=1.26.0,<1.27.0)"] +ivschat = ["mypy-boto3-ivschat (>=1.26.0,<1.27.0)"] +kafka = ["mypy-boto3-kafka (>=1.26.0,<1.27.0)"] +kafkaconnect = ["mypy-boto3-kafkaconnect (>=1.26.0,<1.27.0)"] +kendra = ["mypy-boto3-kendra (>=1.26.0,<1.27.0)"] +kendra-ranking = ["mypy-boto3-kendra-ranking (>=1.26.0,<1.27.0)"] +keyspaces = ["mypy-boto3-keyspaces (>=1.26.0,<1.27.0)"] +kinesis = ["mypy-boto3-kinesis (>=1.26.0,<1.27.0)"] +kinesis-video-archived-media = ["mypy-boto3-kinesis-video-archived-media (>=1.26.0,<1.27.0)"] +kinesis-video-media = ["mypy-boto3-kinesis-video-media (>=1.26.0,<1.27.0)"] +kinesis-video-signaling = ["mypy-boto3-kinesis-video-signaling (>=1.26.0,<1.27.0)"] +kinesis-video-webrtc-storage = ["mypy-boto3-kinesis-video-webrtc-storage (>=1.26.0,<1.27.0)"] +kinesisanalytics = ["mypy-boto3-kinesisanalytics (>=1.26.0,<1.27.0)"] +kinesisanalyticsv2 = ["mypy-boto3-kinesisanalyticsv2 (>=1.26.0,<1.27.0)"] +kinesisvideo = ["mypy-boto3-kinesisvideo (>=1.26.0,<1.27.0)"] +kms = ["mypy-boto3-kms (>=1.26.0,<1.27.0)"] +lakeformation = ["mypy-boto3-lakeformation (>=1.26.0,<1.27.0)"] +lambda = ["mypy-boto3-lambda (>=1.26.0,<1.27.0)"] +lex-models = ["mypy-boto3-lex-models (>=1.26.0,<1.27.0)"] +lex-runtime = ["mypy-boto3-lex-runtime (>=1.26.0,<1.27.0)"] +lexv2-models = ["mypy-boto3-lexv2-models (>=1.26.0,<1.27.0)"] +lexv2-runtime = ["mypy-boto3-lexv2-runtime (>=1.26.0,<1.27.0)"] +license-manager = ["mypy-boto3-license-manager (>=1.26.0,<1.27.0)"] +license-manager-linux-subscriptions = ["mypy-boto3-license-manager-linux-subscriptions (>=1.26.0,<1.27.0)"] +license-manager-user-subscriptions = ["mypy-boto3-license-manager-user-subscriptions (>=1.26.0,<1.27.0)"] +lightsail = ["mypy-boto3-lightsail (>=1.26.0,<1.27.0)"] +location = ["mypy-boto3-location (>=1.26.0,<1.27.0)"] +logs = ["mypy-boto3-logs (>=1.26.0,<1.27.0)"] +lookoutequipment = ["mypy-boto3-lookoutequipment (>=1.26.0,<1.27.0)"] +lookoutmetrics = ["mypy-boto3-lookoutmetrics (>=1.26.0,<1.27.0)"] +lookoutvision = ["mypy-boto3-lookoutvision (>=1.26.0,<1.27.0)"] +m2 = ["mypy-boto3-m2 (>=1.26.0,<1.27.0)"] +machinelearning = ["mypy-boto3-machinelearning (>=1.26.0,<1.27.0)"] +macie = ["mypy-boto3-macie (>=1.26.0,<1.27.0)"] +macie2 = ["mypy-boto3-macie2 (>=1.26.0,<1.27.0)"] +managedblockchain = ["mypy-boto3-managedblockchain (>=1.26.0,<1.27.0)"] +marketplace-catalog = ["mypy-boto3-marketplace-catalog (>=1.26.0,<1.27.0)"] +marketplace-entitlement = ["mypy-boto3-marketplace-entitlement (>=1.26.0,<1.27.0)"] +marketplacecommerceanalytics = ["mypy-boto3-marketplacecommerceanalytics (>=1.26.0,<1.27.0)"] +mediaconnect = ["mypy-boto3-mediaconnect (>=1.26.0,<1.27.0)"] +mediaconvert = ["mypy-boto3-mediaconvert (>=1.26.0,<1.27.0)"] +medialive = ["mypy-boto3-medialive (>=1.26.0,<1.27.0)"] +mediapackage = ["mypy-boto3-mediapackage (>=1.26.0,<1.27.0)"] +mediapackage-vod = ["mypy-boto3-mediapackage-vod (>=1.26.0,<1.27.0)"] +mediapackagev2 = ["mypy-boto3-mediapackagev2 (>=1.26.0,<1.27.0)"] +mediastore = ["mypy-boto3-mediastore (>=1.26.0,<1.27.0)"] +mediastore-data = ["mypy-boto3-mediastore-data (>=1.26.0,<1.27.0)"] +mediatailor = ["mypy-boto3-mediatailor (>=1.26.0,<1.27.0)"] +memorydb = ["mypy-boto3-memorydb (>=1.26.0,<1.27.0)"] +meteringmarketplace = ["mypy-boto3-meteringmarketplace (>=1.26.0,<1.27.0)"] +mgh = ["mypy-boto3-mgh (>=1.26.0,<1.27.0)"] +mgn = ["mypy-boto3-mgn (>=1.26.0,<1.27.0)"] +migration-hub-refactor-spaces = ["mypy-boto3-migration-hub-refactor-spaces (>=1.26.0,<1.27.0)"] +migrationhub-config = ["mypy-boto3-migrationhub-config (>=1.26.0,<1.27.0)"] +migrationhuborchestrator = ["mypy-boto3-migrationhuborchestrator (>=1.26.0,<1.27.0)"] +migrationhubstrategy = ["mypy-boto3-migrationhubstrategy (>=1.26.0,<1.27.0)"] +mobile = ["mypy-boto3-mobile (>=1.26.0,<1.27.0)"] +mq = ["mypy-boto3-mq (>=1.26.0,<1.27.0)"] +mturk = ["mypy-boto3-mturk (>=1.26.0,<1.27.0)"] +mwaa = ["mypy-boto3-mwaa (>=1.26.0,<1.27.0)"] +neptune = ["mypy-boto3-neptune (>=1.26.0,<1.27.0)"] +network-firewall = ["mypy-boto3-network-firewall (>=1.26.0,<1.27.0)"] +networkmanager = ["mypy-boto3-networkmanager (>=1.26.0,<1.27.0)"] +nimble = ["mypy-boto3-nimble (>=1.26.0,<1.27.0)"] +oam = ["mypy-boto3-oam (>=1.26.0,<1.27.0)"] +omics = ["mypy-boto3-omics (>=1.26.0,<1.27.0)"] +opensearch = ["mypy-boto3-opensearch (>=1.26.0,<1.27.0)"] +opensearchserverless = ["mypy-boto3-opensearchserverless (>=1.26.0,<1.27.0)"] +opsworks = ["mypy-boto3-opsworks (>=1.26.0,<1.27.0)"] +opsworkscm = ["mypy-boto3-opsworkscm (>=1.26.0,<1.27.0)"] +organizations = ["mypy-boto3-organizations (>=1.26.0,<1.27.0)"] +osis = ["mypy-boto3-osis (>=1.26.0,<1.27.0)"] +outposts = ["mypy-boto3-outposts (>=1.26.0,<1.27.0)"] +panorama = ["mypy-boto3-panorama (>=1.26.0,<1.27.0)"] +personalize = ["mypy-boto3-personalize (>=1.26.0,<1.27.0)"] +personalize-events = ["mypy-boto3-personalize-events (>=1.26.0,<1.27.0)"] +personalize-runtime = ["mypy-boto3-personalize-runtime (>=1.26.0,<1.27.0)"] +pi = ["mypy-boto3-pi (>=1.26.0,<1.27.0)"] +pinpoint = ["mypy-boto3-pinpoint (>=1.26.0,<1.27.0)"] +pinpoint-email = ["mypy-boto3-pinpoint-email (>=1.26.0,<1.27.0)"] +pinpoint-sms-voice = ["mypy-boto3-pinpoint-sms-voice (>=1.26.0,<1.27.0)"] +pinpoint-sms-voice-v2 = ["mypy-boto3-pinpoint-sms-voice-v2 (>=1.26.0,<1.27.0)"] +pipes = ["mypy-boto3-pipes (>=1.26.0,<1.27.0)"] +polly = ["mypy-boto3-polly (>=1.26.0,<1.27.0)"] +pricing = ["mypy-boto3-pricing (>=1.26.0,<1.27.0)"] +privatenetworks = ["mypy-boto3-privatenetworks (>=1.26.0,<1.27.0)"] +proton = ["mypy-boto3-proton (>=1.26.0,<1.27.0)"] +qldb = ["mypy-boto3-qldb (>=1.26.0,<1.27.0)"] +qldb-session = ["mypy-boto3-qldb-session (>=1.26.0,<1.27.0)"] +quicksight = ["mypy-boto3-quicksight (>=1.26.0,<1.27.0)"] +ram = ["mypy-boto3-ram (>=1.26.0,<1.27.0)"] +rbin = ["mypy-boto3-rbin (>=1.26.0,<1.27.0)"] +rds = ["mypy-boto3-rds (>=1.26.0,<1.27.0)"] +rds-data = ["mypy-boto3-rds-data (>=1.26.0,<1.27.0)"] +redshift = ["mypy-boto3-redshift (>=1.26.0,<1.27.0)"] +redshift-data = ["mypy-boto3-redshift-data (>=1.26.0,<1.27.0)"] +redshift-serverless = ["mypy-boto3-redshift-serverless (>=1.26.0,<1.27.0)"] +rekognition = ["mypy-boto3-rekognition (>=1.26.0,<1.27.0)"] +resiliencehub = ["mypy-boto3-resiliencehub (>=1.26.0,<1.27.0)"] +resource-explorer-2 = ["mypy-boto3-resource-explorer-2 (>=1.26.0,<1.27.0)"] +resource-groups = ["mypy-boto3-resource-groups (>=1.26.0,<1.27.0)"] +resourcegroupstaggingapi = ["mypy-boto3-resourcegroupstaggingapi (>=1.26.0,<1.27.0)"] +robomaker = ["mypy-boto3-robomaker (>=1.26.0,<1.27.0)"] +rolesanywhere = ["mypy-boto3-rolesanywhere (>=1.26.0,<1.27.0)"] +route53 = ["mypy-boto3-route53 (>=1.26.0,<1.27.0)"] +route53-recovery-cluster = ["mypy-boto3-route53-recovery-cluster (>=1.26.0,<1.27.0)"] +route53-recovery-control-config = ["mypy-boto3-route53-recovery-control-config (>=1.26.0,<1.27.0)"] +route53-recovery-readiness = ["mypy-boto3-route53-recovery-readiness (>=1.26.0,<1.27.0)"] +route53domains = ["mypy-boto3-route53domains (>=1.26.0,<1.27.0)"] +route53resolver = ["mypy-boto3-route53resolver (>=1.26.0,<1.27.0)"] +rum = ["mypy-boto3-rum (>=1.26.0,<1.27.0)"] +s3 = ["mypy-boto3-s3 (>=1.26.0,<1.27.0)"] +s3control = ["mypy-boto3-s3control (>=1.26.0,<1.27.0)"] +s3outposts = ["mypy-boto3-s3outposts (>=1.26.0,<1.27.0)"] +sagemaker = ["mypy-boto3-sagemaker (>=1.26.0,<1.27.0)"] +sagemaker-a2i-runtime = ["mypy-boto3-sagemaker-a2i-runtime (>=1.26.0,<1.27.0)"] +sagemaker-edge = ["mypy-boto3-sagemaker-edge (>=1.26.0,<1.27.0)"] +sagemaker-featurestore-runtime = ["mypy-boto3-sagemaker-featurestore-runtime (>=1.26.0,<1.27.0)"] +sagemaker-geospatial = ["mypy-boto3-sagemaker-geospatial (>=1.26.0,<1.27.0)"] +sagemaker-metrics = ["mypy-boto3-sagemaker-metrics (>=1.26.0,<1.27.0)"] +sagemaker-runtime = ["mypy-boto3-sagemaker-runtime (>=1.26.0,<1.27.0)"] +savingsplans = ["mypy-boto3-savingsplans (>=1.26.0,<1.27.0)"] +scheduler = ["mypy-boto3-scheduler (>=1.26.0,<1.27.0)"] +schemas = ["mypy-boto3-schemas (>=1.26.0,<1.27.0)"] +sdb = ["mypy-boto3-sdb (>=1.26.0,<1.27.0)"] +secretsmanager = ["mypy-boto3-secretsmanager (>=1.26.0,<1.27.0)"] +securityhub = ["mypy-boto3-securityhub (>=1.26.0,<1.27.0)"] +securitylake = ["mypy-boto3-securitylake (>=1.26.0,<1.27.0)"] +serverlessrepo = ["mypy-boto3-serverlessrepo (>=1.26.0,<1.27.0)"] +service-quotas = ["mypy-boto3-service-quotas (>=1.26.0,<1.27.0)"] +servicecatalog = ["mypy-boto3-servicecatalog (>=1.26.0,<1.27.0)"] +servicecatalog-appregistry = ["mypy-boto3-servicecatalog-appregistry (>=1.26.0,<1.27.0)"] +servicediscovery = ["mypy-boto3-servicediscovery (>=1.26.0,<1.27.0)"] +ses = ["mypy-boto3-ses (>=1.26.0,<1.27.0)"] +sesv2 = ["mypy-boto3-sesv2 (>=1.26.0,<1.27.0)"] +shield = ["mypy-boto3-shield (>=1.26.0,<1.27.0)"] +signer = ["mypy-boto3-signer (>=1.26.0,<1.27.0)"] +simspaceweaver = ["mypy-boto3-simspaceweaver (>=1.26.0,<1.27.0)"] +sms = ["mypy-boto3-sms (>=1.26.0,<1.27.0)"] +sms-voice = ["mypy-boto3-sms-voice (>=1.26.0,<1.27.0)"] +snow-device-management = ["mypy-boto3-snow-device-management (>=1.26.0,<1.27.0)"] +snowball = ["mypy-boto3-snowball (>=1.26.0,<1.27.0)"] +sns = ["mypy-boto3-sns (>=1.26.0,<1.27.0)"] +sqs = ["mypy-boto3-sqs (>=1.26.0,<1.27.0)"] +ssm = ["mypy-boto3-ssm (>=1.26.0,<1.27.0)"] +ssm-contacts = ["mypy-boto3-ssm-contacts (>=1.26.0,<1.27.0)"] +ssm-incidents = ["mypy-boto3-ssm-incidents (>=1.26.0,<1.27.0)"] +ssm-sap = ["mypy-boto3-ssm-sap (>=1.26.0,<1.27.0)"] +sso = ["mypy-boto3-sso (>=1.26.0,<1.27.0)"] +sso-admin = ["mypy-boto3-sso-admin (>=1.26.0,<1.27.0)"] +sso-oidc = ["mypy-boto3-sso-oidc (>=1.26.0,<1.27.0)"] +stepfunctions = ["mypy-boto3-stepfunctions (>=1.26.0,<1.27.0)"] +storagegateway = ["mypy-boto3-storagegateway (>=1.26.0,<1.27.0)"] +sts = ["mypy-boto3-sts (>=1.26.0,<1.27.0)"] +support = ["mypy-boto3-support (>=1.26.0,<1.27.0)"] +support-app = ["mypy-boto3-support-app (>=1.26.0,<1.27.0)"] +swf = ["mypy-boto3-swf (>=1.26.0,<1.27.0)"] +synthetics = ["mypy-boto3-synthetics (>=1.26.0,<1.27.0)"] +textract = ["mypy-boto3-textract (>=1.26.0,<1.27.0)"] +timestream-query = ["mypy-boto3-timestream-query (>=1.26.0,<1.27.0)"] +timestream-write = ["mypy-boto3-timestream-write (>=1.26.0,<1.27.0)"] +tnb = ["mypy-boto3-tnb (>=1.26.0,<1.27.0)"] +transcribe = ["mypy-boto3-transcribe (>=1.26.0,<1.27.0)"] +transfer = ["mypy-boto3-transfer (>=1.26.0,<1.27.0)"] +translate = ["mypy-boto3-translate (>=1.26.0,<1.27.0)"] +voice-id = ["mypy-boto3-voice-id (>=1.26.0,<1.27.0)"] +vpc-lattice = ["mypy-boto3-vpc-lattice (>=1.26.0,<1.27.0)"] +waf = ["mypy-boto3-waf (>=1.26.0,<1.27.0)"] +waf-regional = ["mypy-boto3-waf-regional (>=1.26.0,<1.27.0)"] +wafv2 = ["mypy-boto3-wafv2 (>=1.26.0,<1.27.0)"] +wellarchitected = ["mypy-boto3-wellarchitected (>=1.26.0,<1.27.0)"] +wisdom = ["mypy-boto3-wisdom (>=1.26.0,<1.27.0)"] +workdocs = ["mypy-boto3-workdocs (>=1.26.0,<1.27.0)"] +worklink = ["mypy-boto3-worklink (>=1.26.0,<1.27.0)"] +workmail = ["mypy-boto3-workmail (>=1.26.0,<1.27.0)"] +workmailmessageflow = ["mypy-boto3-workmailmessageflow (>=1.26.0,<1.27.0)"] +workspaces = ["mypy-boto3-workspaces (>=1.26.0,<1.27.0)"] +workspaces-web = ["mypy-boto3-workspaces-web (>=1.26.0,<1.27.0)"] +xray = ["mypy-boto3-xray (>=1.26.0,<1.27.0)"] + +[[package]] +name = "botocore" +version = "1.29.139" +description = "Low-level, data-driven core of boto 3." +category = "main" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "botocore-1.29.139-py3-none-any.whl", hash = "sha256:b164af929eb2f1507833718de9eb8811e3adc6943b464c1869e95ac87f3bab88"}, + {file = "botocore-1.29.139.tar.gz", hash = "sha256:acc62710bdf11e47f4f26fb290a9082ff00377d7e93a16e1f080f9c789898114"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = ">=1.25.4,<1.27" + +[package.extras] +crt = ["awscrt (==0.16.9)"] + +[[package]] +name = "botocore-stubs" +version = "1.29.130" +description = "Type annotations and code completion for botocore" +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "botocore_stubs-1.29.130-py3-none-any.whl", hash = "sha256:622c4a5cd740498439008d81c5ded612146f4f0d575341c12591f978edbbe733"}, + {file = "botocore_stubs-1.29.130.tar.gz", hash = "sha256:5f6f1967d23c45834858a055cbf65b66863f9f28d05f32f57bf52864a13512d9"}, +] + +[package.dependencies] +types-awscrt = "*" +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "cached-property" +version = "1.5.2" +description = "A decorator for caching properties in classes." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "cached-property-1.5.2.tar.gz", hash = "sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130"}, + {file = "cached_property-1.5.2-py2.py3-none-any.whl", hash = "sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0"}, +] + +[[package]] +name = "certifi" +version = "2022.12.7" +description = "Python package for providing Mozilla's CA Bundle." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, + {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, +] + +[[package]] +name = "charset-normalizer" +version = "2.1.1" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +category = "dev" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "charset-normalizer-2.1.1.tar.gz", hash = "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845"}, + {file = "charset_normalizer-2.1.1-py3-none-any.whl", hash = "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"}, +] + +[package.extras] +unicode-backport = ["unicodedata2"] + +[[package]] +name = "click" +version = "8.1.3" +description = "Composable command line interface toolkit" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "debugpy" +version = "1.6.7" +description = "An implementation of the Debug Adapter Protocol for Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "debugpy-1.6.7-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b3e7ac809b991006ad7f857f016fa92014445085711ef111fdc3f74f66144096"}, + {file = "debugpy-1.6.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3876611d114a18aafef6383695dfc3f1217c98a9168c1aaf1a02b01ec7d8d1e"}, + {file = "debugpy-1.6.7-cp310-cp310-win32.whl", hash = "sha256:33edb4afa85c098c24cc361d72ba7c21bb92f501104514d4ffec1fb36e09c01a"}, + {file = "debugpy-1.6.7-cp310-cp310-win_amd64.whl", hash = "sha256:ed6d5413474e209ba50b1a75b2d9eecf64d41e6e4501977991cdc755dc83ab0f"}, + {file = "debugpy-1.6.7-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:38ed626353e7c63f4b11efad659be04c23de2b0d15efff77b60e4740ea685d07"}, + {file = "debugpy-1.6.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:279d64c408c60431c8ee832dfd9ace7c396984fd7341fa3116aee414e7dcd88d"}, + {file = "debugpy-1.6.7-cp37-cp37m-win32.whl", hash = "sha256:dbe04e7568aa69361a5b4c47b4493d5680bfa3a911d1e105fbea1b1f23f3eb45"}, + {file = "debugpy-1.6.7-cp37-cp37m-win_amd64.whl", hash = "sha256:f90a2d4ad9a035cee7331c06a4cf2245e38bd7c89554fe3b616d90ab8aab89cc"}, + {file = "debugpy-1.6.7-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:5224eabbbeddcf1943d4e2821876f3e5d7d383f27390b82da5d9558fd4eb30a9"}, + {file = "debugpy-1.6.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae1123dff5bfe548ba1683eb972329ba6d646c3a80e6b4c06cd1b1dd0205e9b"}, + {file = "debugpy-1.6.7-cp38-cp38-win32.whl", hash = "sha256:9cd10cf338e0907fdcf9eac9087faa30f150ef5445af5a545d307055141dd7a4"}, + {file = "debugpy-1.6.7-cp38-cp38-win_amd64.whl", hash = "sha256:aaf6da50377ff4056c8ed470da24632b42e4087bc826845daad7af211e00faad"}, + {file = "debugpy-1.6.7-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:0679b7e1e3523bd7d7869447ec67b59728675aadfc038550a63a362b63029d2c"}, + {file = "debugpy-1.6.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de86029696e1b3b4d0d49076b9eba606c226e33ae312a57a46dca14ff370894d"}, + {file = "debugpy-1.6.7-cp39-cp39-win32.whl", hash = "sha256:d71b31117779d9a90b745720c0eab54ae1da76d5b38c8026c654f4a066b0130a"}, + {file = "debugpy-1.6.7-cp39-cp39-win_amd64.whl", hash = "sha256:c0ff93ae90a03b06d85b2c529eca51ab15457868a377c4cc40a23ab0e4e552a3"}, + {file = "debugpy-1.6.7-py2.py3-none-any.whl", hash = "sha256:53f7a456bc50706a0eaabecf2d3ce44c4d5010e46dfc65b6b81a518b42866267"}, + {file = "debugpy-1.6.7.zip", hash = "sha256:c4c2f0810fa25323abfdfa36cbbbb24e5c3b1a42cb762782de64439c575d67f2"}, +] + +[[package]] +name = "dparse" +version = "0.6.2" +description = "A parser for Python dependency files" +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "dparse-0.6.2-py3-none-any.whl", hash = "sha256:8097076f1dd26c377f30d4745e6ec18fef42f3bf493933b842ac5bafad8c345f"}, + {file = "dparse-0.6.2.tar.gz", hash = "sha256:d45255bda21f998bc7ddf2afd5e62505ba6134756ba2d42a84c56b0826614dfe"}, +] + +[package.dependencies] +packaging = "*" +toml = "*" + +[package.extras] +conda = ["pyyaml"] +pipenv = ["pipenv"] + +[[package]] +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, +] + +[package.dependencies] +python-dateutil = ">=2.8.1" + +[package.extras] +dev = ["flake8", "markdown", "twine", "wheel"] + +[[package]] +name = "gitdb" +version = "4.0.10" +description = "Git Object Database" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, + {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.30" +description = "GitPython is a python library used to interact with Git repositories" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.30-py3-none-any.whl", hash = "sha256:cd455b0000615c60e286208ba540271af9fe531fa6a87cc590a7298785ab2882"}, + {file = "GitPython-3.1.30.tar.gz", hash = "sha256:769c2d83e13f5d938b7688479da374c4e3d49f71549aaf462b646db9602ea6f8"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" +typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} + +[[package]] +name = "griffe" +version = "0.28.2" +description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "griffe-0.28.2-py3-none-any.whl", hash = "sha256:bde3a3dfa301a4b113c7fac3b2be45e5723bc50cda4c9cfe13f43c447c9aa5d1"}, + {file = "griffe-0.28.2.tar.gz", hash = "sha256:a471498b0b9505c721ea0e652fd77c97df1aeb56c4eb8c93d24bb1140da4216d"}, +] + +[package.dependencies] +cached-property = {version = "*", markers = "python_version < \"3.8\""} +colorama = ">=0.4" + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "importlib-metadata" +version = "4.13.0" +description = "Read metadata from Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-4.13.0-py3-none-any.whl", hash = "sha256:8a8a81bcf996e74fee46f0d16bd3eaa382a7eb20fd82445c3ad11f4090334116"}, + {file = "importlib_metadata-4.13.0.tar.gz", hash = "sha256:dd0173e8f150d6815e098fd354f6414b0f079af4644ddfe90c71e2fc6174346d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +perf = ["ipython"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] + +[[package]] +name = "isort" +version = "5.11.4" +description = "A Python utility / library to sort Python imports." +category = "dev" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "isort-5.11.4-py3-none-any.whl", hash = "sha256:c033fd0edb91000a7f09527fe5c75321878f98322a77ddcc81adbd83724afb7b"}, + {file = "isort-5.11.4.tar.gz", hash = "sha256:6db30c5ded9815d813932c04c2f85a360bcdd35fed496f4d8f35495ef0a261b6"}, +] + +[package.extras] +colors = ["colorama (>=0.4.3,<0.5.0)"] +pipfile-deprecated-finder = ["pipreqs", "requirementslib"] +plugins = ["setuptools"] +requirements-deprecated-finder = ["pip-api", "pipreqs"] + +[[package]] +name = "jinja2" +version = "3.1.2" +description = "A very fast and expressive template engine." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "markdown" +version = "3.3.7" +description = "Python implementation of Markdown." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "Markdown-3.3.7-py3-none-any.whl", hash = "sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"}, + {file = "Markdown-3.3.7.tar.gz", hash = "sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} + +[package.extras] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markdown-include" +version = "0.8.1" +description = "A Python-Markdown extension which provides an 'include' function" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "markdown-include-0.8.1.tar.gz", hash = "sha256:1d0623e0fc2757c38d35df53752768356162284259d259c486b4ab6285cdbbe3"}, + {file = "markdown_include-0.8.1-py3-none-any.whl", hash = "sha256:32f0635b9cfef46997b307e2430022852529f7a5b87c0075c504283e7cc7db53"}, +] + +[package.dependencies] +markdown = ">=3.0" + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "markdown-it-py" +version = "2.2.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, + {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" +typing_extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "2.1.2" +description = "Safely add untrusted strings to HTML/XML markup." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, + {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + +[[package]] +name = "mkdocs" +version = "1.4.3" +description = "Project documentation with Markdown." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocs-1.4.3-py3-none-any.whl", hash = "sha256:6ee46d309bda331aac915cd24aab882c179a933bd9e77b80ce7d2eaaa3f689dd"}, + {file = "mkdocs-1.4.3.tar.gz", hash = "sha256:5955093bbd4dd2e9403c5afaf57324ad8b04f16886512a3ee6ef828956481c57"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} +jinja2 = ">=2.11.1" +markdown = ">=3.2.1,<3.4" +mergedeep = ">=1.3.4" +packaging = ">=20.5" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +typing-extensions = {version = ">=3.10", markers = "python_version < \"3.8\""} +watchdog = ">=2.0" + +[package.extras] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-autorefs" +version = "0.4.1" +description = "Automatically link across pages in MkDocs." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocs-autorefs-0.4.1.tar.gz", hash = "sha256:70748a7bd025f9ecd6d6feeba8ba63f8e891a1af55f48e366d6d6e78493aba84"}, + {file = "mkdocs_autorefs-0.4.1-py3-none-any.whl", hash = "sha256:a2248a9501b29dc0cc8ba4c09f4f47ff121945f6ce33d760f145d6f89d313f5b"}, +] + +[package.dependencies] +Markdown = ">=3.3" +mkdocs = ">=1.1" + +[[package]] +name = "mkdocs-material" +version = "9.1.14" +description = "Documentation that simply works" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocs_material-9.1.14-py3-none-any.whl", hash = "sha256:b56a9f955ed32d38333715cbbf68ce38f683bf38610c65094fa4ef2db9f08bcd"}, + {file = "mkdocs_material-9.1.14.tar.gz", hash = "sha256:1ae74cc5464ef2f64574d4884512efed7f4db386fb9bc6af20fd427d7a702f49"}, +] + +[package.dependencies] +colorama = ">=0.4" +jinja2 = ">=3.0" +markdown = ">=3.2" +mkdocs = ">=1.4.2" +mkdocs-material-extensions = ">=1.1" +pygments = ">=2.14" +pymdown-extensions = ">=9.9.1" +regex = ">=2022.4.24" +requests = ">=2.26" + +[[package]] +name = "mkdocs-material-extensions" +version = "1.1.1" +description = "Extension pack for Python Markdown and MkDocs Material." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocs_material_extensions-1.1.1-py3-none-any.whl", hash = "sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945"}, + {file = "mkdocs_material_extensions-1.1.1.tar.gz", hash = "sha256:9c003da71e2cc2493d910237448c672e00cefc800d3d6ae93d2fc69979e3bd93"}, +] + +[[package]] +name = "mkdocstrings" +version = "0.21.2" +description = "Automatic documentation from sources, for MkDocs." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocstrings-0.21.2-py3-none-any.whl", hash = "sha256:949ef8da92df9d692ca07be50616459a6b536083a25520fd54b00e8814ce019b"}, + {file = "mkdocstrings-0.21.2.tar.gz", hash = "sha256:304e56a2e90595708a38a13a278e538a67ad82052dd5c8b71f77a604a4f3d911"}, +] + +[package.dependencies] +Jinja2 = ">=2.11.1" +Markdown = ">=3.3" +MarkupSafe = ">=1.1" +mkdocs = ">=1.2" +mkdocs-autorefs = ">=0.3.1" +pymdown-extensions = ">=6.3" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.10\""} + +[package.extras] +crystal = ["mkdocstrings-crystal (>=0.3.4)"] +python = ["mkdocstrings-python (>=0.5.2)"] +python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] + +[[package]] +name = "mkdocstrings-python" +version = "1.0.0" +description = "A Python handler for mkdocstrings." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocstrings_python-1.0.0-py3-none-any.whl", hash = "sha256:c59d67009a7a85172f4da990d8523e95606b6a1ff93a22a2351ad3b5f8cafed1"}, + {file = "mkdocstrings_python-1.0.0.tar.gz", hash = "sha256:b89d849df990204f909d5452548b6936a185f912da06208a93909bebe25d6e67"}, +] + +[package.dependencies] +griffe = ">=0.24" +mkdocstrings = ">=0.20" + +[[package]] +name = "mypy" +version = "1.3.0" +description = "Optional static typing for Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1eb485cea53f4f5284e5baf92902cd0088b24984f4209e25981cc359d64448d"}, + {file = "mypy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4c99c3ecf223cf2952638da9cd82793d8f3c0c5fa8b6ae2b2d9ed1e1ff51ba85"}, + {file = "mypy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:550a8b3a19bb6589679a7c3c31f64312e7ff482a816c96e0cecec9ad3a7564dd"}, + {file = "mypy-1.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cbc07246253b9e3d7d74c9ff948cd0fd7a71afcc2b77c7f0a59c26e9395cb152"}, + {file = "mypy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:a22435632710a4fcf8acf86cbd0d69f68ac389a3892cb23fbad176d1cddaf228"}, + {file = "mypy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6e33bb8b2613614a33dff70565f4c803f889ebd2f859466e42b46e1df76018dd"}, + {file = "mypy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7d23370d2a6b7a71dc65d1266f9a34e4cde9e8e21511322415db4b26f46f6b8c"}, + {file = "mypy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:658fe7b674769a0770d4b26cb4d6f005e88a442fe82446f020be8e5f5efb2fae"}, + {file = "mypy-1.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6e42d29e324cdda61daaec2336c42512e59c7c375340bd202efa1fe0f7b8f8ca"}, + {file = "mypy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:d0b6c62206e04061e27009481cb0ec966f7d6172b5b936f3ead3d74f29fe3dcf"}, + {file = "mypy-1.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:76ec771e2342f1b558c36d49900dfe81d140361dd0d2df6cd71b3db1be155409"}, + {file = "mypy-1.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc95f8386314272bbc817026f8ce8f4f0d2ef7ae44f947c4664efac9adec929"}, + {file = "mypy-1.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:faff86aa10c1aa4a10e1a301de160f3d8fc8703b88c7e98de46b531ff1276a9a"}, + {file = "mypy-1.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:8c5979d0deb27e0f4479bee18ea0f83732a893e81b78e62e2dda3e7e518c92ee"}, + {file = "mypy-1.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c5d2cc54175bab47011b09688b418db71403aefad07cbcd62d44010543fc143f"}, + {file = "mypy-1.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:87df44954c31d86df96c8bd6e80dfcd773473e877ac6176a8e29898bfb3501cb"}, + {file = "mypy-1.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:473117e310febe632ddf10e745a355714e771ffe534f06db40702775056614c4"}, + {file = "mypy-1.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:74bc9b6e0e79808bf8678d7678b2ae3736ea72d56eede3820bd3849823e7f305"}, + {file = "mypy-1.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:44797d031a41516fcf5cbfa652265bb994e53e51994c1bd649ffcd0c3a7eccbf"}, + {file = "mypy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ddae0f39ca146972ff6bb4399f3b2943884a774b8771ea0a8f50e971f5ea5ba8"}, + {file = "mypy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c4c42c60a8103ead4c1c060ac3cdd3ff01e18fddce6f1016e08939647a0e703"}, + {file = "mypy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e86c2c6852f62f8f2b24cb7a613ebe8e0c7dc1402c61d36a609174f63e0ff017"}, + {file = "mypy-1.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f9dca1e257d4cc129517779226753dbefb4f2266c4eaad610fc15c6a7e14283e"}, + {file = "mypy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:95d8d31a7713510685b05fbb18d6ac287a56c8f6554d88c19e73f724a445448a"}, + {file = "mypy-1.3.0-py3-none-any.whl", hash = "sha256:a8763e72d5d9574d45ce5881962bc8e9046bf7b375b0abf031f3e6811732a897"}, + {file = "mypy-1.3.0.tar.gz", hash = "sha256:e1f4d16e296f5135624b34e8fb741eb0eadedca90862405b1f1fde2040b9bd11"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} +typing-extensions = ">=3.10" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] + +[[package]] +name = "mypy-boto3-cloudformation" +version = "1.26.108" +description = "Type annotations for boto3.CloudFormation 1.26.108 service generated with mypy-boto3-builder 7.14.5" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-boto3-cloudformation-1.26.108.tar.gz", hash = "sha256:feeb7cae3d2b850c8fc848585e1c4410bddc9860198edd14401a1c030789994d"}, + {file = "mypy_boto3_cloudformation-1.26.108-py3-none-any.whl", hash = "sha256:566487e5037170f58229af7f9fb2c62b0e98e5a1aa5bcd5eef54afb38a214561"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "mypy-boto3-config" +version = "1.26.127" +description = "Type annotations for boto3.ConfigService 1.26.127 service generated with mypy-boto3-builder 7.14.5" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-boto3-config-1.26.127.tar.gz", hash = "sha256:e1058ca19796f671abbcd6dac8d73dbcd0cbc3d926399e7ea30fd87bc557c269"}, + {file = "mypy_boto3_config-1.26.127-py3-none-any.whl", hash = "sha256:bbfcff5fe778984bd1f540034415baca263d8cbbaaa2d6b6d727cdfe8a2db76e"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "mypy-boto3-iam" +version = "1.26.97" +description = "Type annotations for boto3.IAM 1.26.97 service generated with mypy-boto3-builder 7.13.0" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-boto3-iam-1.26.97.tar.gz", hash = "sha256:081482f33d768cefedc26988b0b1a9e62b8ce822745b1b25ac7c7469948eac07"}, + {file = "mypy_boto3_iam-1.26.97-py3-none-any.whl", hash = "sha256:11bf9494f9fda041a0d3d5f15b8803e94f537a17a7917600ad3785a51c091461"}, +] + +[package.dependencies] +typing-extensions = ">=4.1.0" + +[[package]] +name = "mypy-boto3-s3" +version = "1.26.127" +description = "Type annotations for boto3.S3 1.26.127 service generated with mypy-boto3-builder 7.14.5" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-boto3-s3-1.26.127.tar.gz", hash = "sha256:0e548b97c6a2589f7bff5d26a1ca101622749771379226e3ad0822629d0613c5"}, + {file = "mypy_boto3_s3-1.26.127-py3-none-any.whl", hash = "sha256:21e647caa18d98dbbc706597c9b27d41674f18850f42b2cfdb9a39b39820e470"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "mypy-boto3-sts" +version = "1.26.136" +description = "Type annotations for boto3.STS 1.26.136 service generated with mypy-boto3-builder 7.14.5" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-boto3-sts-1.26.136.tar.gz", hash = "sha256:d326916d6f21252f7a7c0086a7da3f5e4a6eb6c7470e7588a4c9b825cb0a01fe"}, + {file = "mypy_boto3_sts-1.26.136-py3-none-any.whl", hash = "sha256:447766738b971a0d74c291062fe022f7d75e2ccda7cb1bd0cad5a5612e51c779"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "packaging" +version = "21.3" +description = "Core utilities for Python packages" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, + {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, +] + +[package.dependencies] +pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" + +[[package]] +name = "pathspec" +version = "0.10.3" +description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pathspec-0.10.3-py3-none-any.whl", hash = "sha256:3c95343af8b756205e2aba76e843ba9520a24dd84f68c22b9f93251507509dd6"}, + {file = "pathspec-0.10.3.tar.gz", hash = "sha256:56200de4077d9d0791465aa9095a01d421861e405b5096955051deefd697d6f6"}, +] + +[[package]] +name = "pbr" +version = "5.11.0" +description = "Python Build Reasonableness" +category = "dev" +optional = false +python-versions = ">=2.6" +files = [ + {file = "pbr-5.11.0-py2.py3-none-any.whl", hash = "sha256:db2317ff07c84c4c63648c9064a79fe9d9f5c7ce85a9099d4b6258b3db83225a"}, + {file = "pbr-5.11.0.tar.gz", hash = "sha256:b97bc6695b2aff02144133c2e7399d5885223d42b7912ffaec2ca3898e673bfe"}, +] + +[[package]] +name = "platformdirs" +version = "2.6.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "platformdirs-2.6.2-py3-none-any.whl", hash = "sha256:83c8f6d04389165de7c9b6f0c682439697887bca0aa2f1c87ef1826be3584490"}, + {file = "platformdirs-2.6.2.tar.gz", hash = "sha256:e1fea1fe471b9ff8332e229df3cb7de4f53eeea4998d3b6bfff542115e998bd2"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.4", markers = "python_version < \"3.8\""} + +[package.extras] +docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=5.3)", "sphinx-autodoc-typehints (>=1.19.5)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] + +[[package]] +name = "pydocstyle" +version = "6.3.0" +description = "Python docstring style checker" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pydocstyle-6.3.0-py3-none-any.whl", hash = "sha256:118762d452a49d6b05e194ef344a55822987a462831ade91ec5c06fd2169d019"}, + {file = "pydocstyle-6.3.0.tar.gz", hash = "sha256:7ce43f0c0ac87b07494eb9c0b462c0b73e6ff276807f204d6b53edc72b7e44e1"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=2.0.0,<5.0.0", markers = "python_version < \"3.8\""} +snowballstemmer = ">=2.2.0" + +[package.extras] +toml = ["tomli (>=1.2.3)"] + +[[package]] +name = "pygments" +version = "2.15.1" +description = "Pygments is a syntax highlighting package written in Python." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, + {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"}, +] + +[package.extras] +plugins = ["importlib-metadata"] + +[[package]] +name = "pymdown-extensions" +version = "10.0.1" +description = "Extension pack for Python Markdown." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pymdown_extensions-10.0.1-py3-none-any.whl", hash = "sha256:ae66d84013c5d027ce055693e09a4628b67e9dec5bce05727e45b0918e36f274"}, + {file = "pymdown_extensions-10.0.1.tar.gz", hash = "sha256:b44e1093a43b8a975eae17b03c3a77aad4681b3b56fce60ce746dbef1944c8cb"}, +] + +[package.dependencies] +markdown = ">=3.2" +pyyaml = "*" + +[[package]] +name = "pyparsing" +version = "3.0.9" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +category = "dev" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pyyaml" +version = "6.0" +description = "YAML parser and emitter for Python" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, + {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, + {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, + {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, + {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, + {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, + {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, + {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, + {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, + {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, + {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, + {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, + {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, + {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, + {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, + {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, + {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, + {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, +] + +[[package]] +name = "pyyaml-env-tag" +version = "0.1" +description = "A custom YAML tag for referencing environment variables in YAML files. " +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, + {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, +] + +[package.dependencies] +pyyaml = "*" + +[[package]] +name = "rdklib" +version = "0.3.0" +description = "Rule Development Kit Library for AWS Config" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "rdklib-0.3.0-py3-none-any.whl", hash = "sha256:6529bca814c0beb4651a88c996c249b337ee9dfa4fea122da39b79d17b37e4b4"}, + {file = "rdklib-0.3.0.tar.gz", hash = "sha256:bda412ae57c97c0f6bf0d315ee5d07552c38e43555aaad561c35dd1e470c8093"}, +] + +[package.dependencies] +boto3 = "*" +botocore = "*" +rdk = "*" + +[[package]] +name = "regex" +version = "2023.5.5" +description = "Alternative regular expression module, to replace re." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "regex-2023.5.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48c9ec56579d4ba1c88f42302194b8ae2350265cb60c64b7b9a88dcb7fbde309"}, + {file = "regex-2023.5.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02f4541550459c08fdd6f97aa4e24c6f1932eec780d58a2faa2068253df7d6ff"}, + {file = "regex-2023.5.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53e22e4460f0245b468ee645156a4f84d0fc35a12d9ba79bd7d79bdcd2f9629d"}, + {file = "regex-2023.5.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b870b6f632fc74941cadc2a0f3064ed8409e6f8ee226cdfd2a85ae50473aa94"}, + {file = "regex-2023.5.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:171c52e320fe29260da550d81c6b99f6f8402450dc7777ef5ced2e848f3b6f8f"}, + {file = "regex-2023.5.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad5524c2aedaf9aa14ef1bc9327f8abd915699dea457d339bebbe2f0d218f86"}, + {file = "regex-2023.5.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a0f874ee8c0bc820e649c900243c6d1e6dc435b81da1492046716f14f1a2a96"}, + {file = "regex-2023.5.5-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e645c757183ee0e13f0bbe56508598e2d9cd42b8abc6c0599d53b0d0b8dd1479"}, + {file = "regex-2023.5.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a4c5da39bca4f7979eefcbb36efea04471cd68db2d38fcbb4ee2c6d440699833"}, + {file = "regex-2023.5.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5e3f4468b8c6fd2fd33c218bbd0a1559e6a6fcf185af8bb0cc43f3b5bfb7d636"}, + {file = "regex-2023.5.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:59e4b729eae1a0919f9e4c0fc635fbcc9db59c74ad98d684f4877be3d2607dd6"}, + {file = "regex-2023.5.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ba73a14e9c8f9ac409863543cde3290dba39098fc261f717dc337ea72d3ebad2"}, + {file = "regex-2023.5.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0bbd5dcb19603ab8d2781fac60114fb89aee8494f4505ae7ad141a3314abb1f9"}, + {file = "regex-2023.5.5-cp310-cp310-win32.whl", hash = "sha256:40005cbd383438aecf715a7b47fe1e3dcbc889a36461ed416bdec07e0ef1db66"}, + {file = "regex-2023.5.5-cp310-cp310-win_amd64.whl", hash = "sha256:59597cd6315d3439ed4b074febe84a439c33928dd34396941b4d377692eca810"}, + {file = "regex-2023.5.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8f08276466fedb9e36e5193a96cb944928301152879ec20c2d723d1031cd4ddd"}, + {file = "regex-2023.5.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cd46f30e758629c3ee91713529cfbe107ac50d27110fdcc326a42ce2acf4dafc"}, + {file = "regex-2023.5.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2910502f718828cecc8beff004917dcf577fc5f8f5dd40ffb1ea7612124547b"}, + {file = "regex-2023.5.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:445d6f4fc3bd9fc2bf0416164454f90acab8858cd5a041403d7a11e3356980e8"}, + {file = "regex-2023.5.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18196c16a584619c7c1d843497c069955d7629ad4a3fdee240eb347f4a2c9dbe"}, + {file = "regex-2023.5.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33d430a23b661629661f1fe8395be2004006bc792bb9fc7c53911d661b69dd7e"}, + {file = "regex-2023.5.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72a28979cc667e5f82ef433db009184e7ac277844eea0f7f4d254b789517941d"}, + {file = "regex-2023.5.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f764e4dfafa288e2eba21231f455d209f4709436baeebb05bdecfb5d8ddc3d35"}, + {file = "regex-2023.5.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:23d86ad2121b3c4fc78c58f95e19173790e22ac05996df69b84e12da5816cb17"}, + {file = "regex-2023.5.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:690a17db524ee6ac4a27efc5406530dd90e7a7a69d8360235323d0e5dafb8f5b"}, + {file = "regex-2023.5.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:1ecf3dcff71f0c0fe3e555201cbe749fa66aae8d18f80d2cc4de8e66df37390a"}, + {file = "regex-2023.5.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:811040d7f3dd9c55eb0d8b00b5dcb7fd9ae1761c454f444fd9f37fe5ec57143a"}, + {file = "regex-2023.5.5-cp311-cp311-win32.whl", hash = "sha256:c8c143a65ce3ca42e54d8e6fcaf465b6b672ed1c6c90022794a802fb93105d22"}, + {file = "regex-2023.5.5-cp311-cp311-win_amd64.whl", hash = "sha256:586a011f77f8a2da4b888774174cd266e69e917a67ba072c7fc0e91878178a80"}, + {file = "regex-2023.5.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b6365703e8cf1644b82104cdd05270d1a9f043119a168d66c55684b1b557d008"}, + {file = "regex-2023.5.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a56c18f21ac98209da9c54ae3ebb3b6f6e772038681d6cb43b8d53da3b09ee81"}, + {file = "regex-2023.5.5-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8b942d8b3ce765dbc3b1dad0a944712a89b5de290ce8f72681e22b3c55f3cc8"}, + {file = "regex-2023.5.5-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:844671c9c1150fcdac46d43198364034b961bd520f2c4fdaabfc7c7d7138a2dd"}, + {file = "regex-2023.5.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2ce65bdeaf0a386bb3b533a28de3994e8e13b464ac15e1e67e4603dd88787fa"}, + {file = "regex-2023.5.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fee0016cc35a8a91e8cc9312ab26a6fe638d484131a7afa79e1ce6165328a135"}, + {file = "regex-2023.5.5-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:18f05d14f14a812fe9723f13afafefe6b74ca042d99f8884e62dbd34dcccf3e2"}, + {file = "regex-2023.5.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:941b3f1b2392f0bcd6abf1bc7a322787d6db4e7457be6d1ffd3a693426a755f2"}, + {file = "regex-2023.5.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:921473a93bcea4d00295799ab929522fc650e85c6b9f27ae1e6bb32a790ea7d3"}, + {file = "regex-2023.5.5-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:e2205a81f815b5bb17e46e74cc946c575b484e5f0acfcb805fb252d67e22938d"}, + {file = "regex-2023.5.5-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:385992d5ecf1a93cb85adff2f73e0402dd9ac29b71b7006d342cc920816e6f32"}, + {file = "regex-2023.5.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:890a09cb0a62198bff92eda98b2b507305dd3abf974778bae3287f98b48907d3"}, + {file = "regex-2023.5.5-cp36-cp36m-win32.whl", hash = "sha256:821a88b878b6589c5068f4cc2cfeb2c64e343a196bc9d7ac68ea8c2a776acd46"}, + {file = "regex-2023.5.5-cp36-cp36m-win_amd64.whl", hash = "sha256:7918a1b83dd70dc04ab5ed24c78ae833ae8ea228cef84e08597c408286edc926"}, + {file = "regex-2023.5.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:338994d3d4ca4cf12f09822e025731a5bdd3a37aaa571fa52659e85ca793fb67"}, + {file = "regex-2023.5.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a69cf0c00c4d4a929c6c7717fd918414cab0d6132a49a6d8fc3ded1988ed2ea"}, + {file = "regex-2023.5.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f5e06df94fff8c4c85f98c6487f6636848e1dc85ce17ab7d1931df4a081f657"}, + {file = "regex-2023.5.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8906669b03c63266b6a7693d1f487b02647beb12adea20f8840c1a087e2dfb5"}, + {file = "regex-2023.5.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fda3e50abad8d0f48df621cf75adc73c63f7243cbe0e3b2171392b445401550"}, + {file = "regex-2023.5.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ac2b7d341dc1bd102be849d6dd33b09701223a851105b2754339e390be0627a"}, + {file = "regex-2023.5.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fb2b495dd94b02de8215625948132cc2ea360ae84fe6634cd19b6567709c8ae2"}, + {file = "regex-2023.5.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:aa7d032c1d84726aa9edeb6accf079b4caa87151ca9fabacef31fa028186c66d"}, + {file = "regex-2023.5.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3d45864693351c15531f7e76f545ec35000d50848daa833cead96edae1665559"}, + {file = "regex-2023.5.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21e90a288e6ba4bf44c25c6a946cb9b0f00b73044d74308b5e0afd190338297c"}, + {file = "regex-2023.5.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:10250a093741ec7bf74bcd2039e697f519b028518f605ff2aa7ac1e9c9f97423"}, + {file = "regex-2023.5.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6b8d0c153f07a953636b9cdb3011b733cadd4178123ef728ccc4d5969e67f3c2"}, + {file = "regex-2023.5.5-cp37-cp37m-win32.whl", hash = "sha256:10374c84ee58c44575b667310d5bbfa89fb2e64e52349720a0182c0017512f6c"}, + {file = "regex-2023.5.5-cp37-cp37m-win_amd64.whl", hash = "sha256:9b320677521aabf666cdd6e99baee4fb5ac3996349c3b7f8e7c4eee1c00dfe3a"}, + {file = "regex-2023.5.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:afb1c70ec1e594a547f38ad6bf5e3d60304ce7539e677c1429eebab115bce56e"}, + {file = "regex-2023.5.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cf123225945aa58b3057d0fba67e8061c62d14cc8a4202630f8057df70189051"}, + {file = "regex-2023.5.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a99757ad7fe5c8a2bb44829fc57ced11253e10f462233c1255fe03888e06bc19"}, + {file = "regex-2023.5.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a623564d810e7a953ff1357f7799c14bc9beeab699aacc8b7ab7822da1e952b8"}, + {file = "regex-2023.5.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ced02e3bd55e16e89c08bbc8128cff0884d96e7f7a5633d3dc366b6d95fcd1d6"}, + {file = "regex-2023.5.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cbe6b5be3b9b698d8cc4ee4dee7e017ad655e83361cd0ea8e653d65e469468"}, + {file = "regex-2023.5.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a6e4b0e0531223f53bad07ddf733af490ba2b8367f62342b92b39b29f72735a"}, + {file = "regex-2023.5.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2e9c4f778514a560a9c9aa8e5538bee759b55f6c1dcd35613ad72523fd9175b8"}, + {file = "regex-2023.5.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:256f7f4c6ba145f62f7a441a003c94b8b1af78cee2cccacfc1e835f93bc09426"}, + {file = "regex-2023.5.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:bd7b68fd2e79d59d86dcbc1ccd6e2ca09c505343445daaa4e07f43c8a9cc34da"}, + {file = "regex-2023.5.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4a5059bd585e9e9504ef9c07e4bc15b0a621ba20504388875d66b8b30a5c4d18"}, + {file = "regex-2023.5.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:6893544e06bae009916a5658ce7207e26ed17385149f35a3125f5259951f1bbe"}, + {file = "regex-2023.5.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c64d5abe91a3dfe5ff250c6bb267ef00dbc01501518225b45a5f9def458f31fb"}, + {file = "regex-2023.5.5-cp38-cp38-win32.whl", hash = "sha256:7923470d6056a9590247ff729c05e8e0f06bbd4efa6569c916943cb2d9b68b91"}, + {file = "regex-2023.5.5-cp38-cp38-win_amd64.whl", hash = "sha256:4035d6945cb961c90c3e1c1ca2feb526175bcfed44dfb1cc77db4fdced060d3e"}, + {file = "regex-2023.5.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:50fd2d9b36938d4dcecbd684777dd12a407add4f9f934f235c66372e630772b0"}, + {file = "regex-2023.5.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d19e57f888b00cd04fc38f5e18d0efbd91ccba2d45039453ab2236e6eec48d4d"}, + {file = "regex-2023.5.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd966475e963122ee0a7118ec9024388c602d12ac72860f6eea119a3928be053"}, + {file = "regex-2023.5.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db09e6c18977a33fea26fe67b7a842f706c67cf8bda1450974d0ae0dd63570df"}, + {file = "regex-2023.5.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6164d4e2a82f9ebd7752a06bd6c504791bedc6418c0196cd0a23afb7f3e12b2d"}, + {file = "regex-2023.5.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84397d3f750d153ebd7f958efaa92b45fea170200e2df5e0e1fd4d85b7e3f58a"}, + {file = "regex-2023.5.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c3efee9bb53cbe7b285760c81f28ac80dc15fa48b5fe7e58b52752e642553f1"}, + {file = "regex-2023.5.5-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:144b5b017646b5a9392a5554a1e5db0000ae637be4971c9747566775fc96e1b2"}, + {file = "regex-2023.5.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1189fbbb21e2c117fda5303653b61905aeeeea23de4a94d400b0487eb16d2d60"}, + {file = "regex-2023.5.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f83fe9e10f9d0b6cf580564d4d23845b9d692e4c91bd8be57733958e4c602956"}, + {file = "regex-2023.5.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:72aa4746993a28c841e05889f3f1b1e5d14df8d3daa157d6001a34c98102b393"}, + {file = "regex-2023.5.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:de2f780c3242ea114dd01f84848655356af4dd561501896c751d7b885ea6d3a1"}, + {file = "regex-2023.5.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:290fd35219486dfbc00b0de72f455ecdd63e59b528991a6aec9fdfc0ce85672e"}, + {file = "regex-2023.5.5-cp39-cp39-win32.whl", hash = "sha256:732176f5427e72fa2325b05c58ad0b45af341c459910d766f814b0584ac1f9ac"}, + {file = "regex-2023.5.5-cp39-cp39-win_amd64.whl", hash = "sha256:1307aa4daa1cbb23823d8238e1f61292fd07e4e5d8d38a6efff00b67a7cdb764"}, + {file = "regex-2023.5.5.tar.gz", hash = "sha256:7d76a8a1fc9da08296462a18f16620ba73bcbf5909e42383b253ef34d9d5141e"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rich" +version = "13.3.5" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +category = "dev" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.3.5-py3-none-any.whl", hash = "sha256:69cdf53799e63f38b95b9bf9c875f8c90e78dd62b2f00c13a911c7a3b9fa4704"}, + {file = "rich-13.3.5.tar.gz", hash = "sha256:2d11b9b8dd03868f09b4fffadc84a6a8cda574e40dc90821bd845720ebb8e89c"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0,<3.0.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "ruamel-yaml" +version = "0.17.21" +description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" +category = "dev" +optional = false +python-versions = ">=3" +files = [ + {file = "ruamel.yaml-0.17.21-py3-none-any.whl", hash = "sha256:742b35d3d665023981bd6d16b3d24248ce5df75fdb4e2924e93a05c1f8b61ca7"}, + {file = "ruamel.yaml-0.17.21.tar.gz", hash = "sha256:8b7ce697a2f212752a35c1ac414471dc16c424c9573be4926b56ff3f5d23b7af"}, +] + +[package.dependencies] +"ruamel.yaml.clib" = {version = ">=0.2.6", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.11\""} + +[package.extras] +docs = ["ryd"] +jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] + +[[package]] +name = "ruamel-yaml-clib" +version = "0.2.7" +description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5859983f26d8cd7bb5c287ef452e8aacc86501487634573d260968f753e1d71"}, + {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:debc87a9516b237d0466a711b18b6ebeb17ba9f391eb7f91c649c5c4ec5006c7"}, + {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:df5828871e6648db72d1c19b4bd24819b80a755c4541d3409f0f7acd0f335c80"}, + {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:efa08d63ef03d079dcae1dfe334f6c8847ba8b645d08df286358b1f5293d24ab"}, + {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-win32.whl", hash = "sha256:763d65baa3b952479c4e972669f679fe490eee058d5aa85da483ebae2009d231"}, + {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:d000f258cf42fec2b1bbf2863c61d7b8918d31ffee905da62dede869254d3b8a"}, + {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:045e0626baf1c52e5527bd5db361bc83180faaba2ff586e763d3d5982a876a9e"}, + {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-macosx_12_6_arm64.whl", hash = "sha256:721bc4ba4525f53f6a611ec0967bdcee61b31df5a56801281027a3a6d1c2daf5"}, + {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:41d0f1fa4c6830176eef5b276af04c89320ea616655d01327d5ce65e50575c94"}, + {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:4b3a93bb9bc662fc1f99c5c3ea8e623d8b23ad22f861eb6fce9377ac07ad6072"}, + {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-macosx_12_0_arm64.whl", hash = "sha256:a234a20ae07e8469da311e182e70ef6b199d0fbeb6c6cc2901204dd87fb867e8"}, + {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:15910ef4f3e537eea7fe45f8a5d19997479940d9196f357152a09031c5be59f3"}, + {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:370445fd795706fd291ab00c9df38a0caed0f17a6fb46b0f607668ecb16ce763"}, + {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-win32.whl", hash = "sha256:ecdf1a604009bd35c674b9225a8fa609e0282d9b896c03dd441a91e5f53b534e"}, + {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-win_amd64.whl", hash = "sha256:f34019dced51047d6f70cb9383b2ae2853b7fc4dce65129a5acd49f4f9256646"}, + {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2aa261c29a5545adfef9296b7e33941f46aa5bbd21164228e833412af4c9c75f"}, + {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f01da5790e95815eb5a8a138508c01c758e5f5bc0ce4286c4f7028b8dd7ac3d0"}, + {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:40d030e2329ce5286d6b231b8726959ebbe0404c92f0a578c0e2482182e38282"}, + {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c3ca1fbba4ae962521e5eb66d72998b51f0f4d0f608d3c0347a48e1af262efa7"}, + {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-win32.whl", hash = "sha256:7bdb4c06b063f6fd55e472e201317a3bb6cdeeee5d5a38512ea5c01e1acbdd93"}, + {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-win_amd64.whl", hash = "sha256:be2a7ad8fd8f7442b24323d24ba0b56c51219513cfa45b9ada3b87b76c374d4b"}, + {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91a789b4aa0097b78c93e3dc4b40040ba55bef518f84a40d4442f713b4094acb"}, + {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:99e77daab5d13a48a4054803d052ff40780278240a902b880dd37a51ba01a307"}, + {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3243f48ecd450eddadc2d11b5feb08aca941b5cd98c9b1db14b2fd128be8c697"}, + {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8831a2cedcd0f0927f788c5bdf6567d9dc9cc235646a434986a852af1cb54b4b"}, + {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-win32.whl", hash = "sha256:3110a99e0f94a4a3470ff67fc20d3f96c25b13d24c6980ff841e82bafe827cac"}, + {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-win_amd64.whl", hash = "sha256:92460ce908546ab69770b2e576e4f99fbb4ce6ab4b245345a3869a0a0410488f"}, + {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5bc0667c1eb8f83a3752b71b9c4ba55ef7c7058ae57022dd9b29065186a113d9"}, + {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:4a4d8d417868d68b979076a9be6a38c676eca060785abaa6709c7b31593c35d1"}, + {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf9a6bc4a0221538b1a7de3ed7bca4c93c02346853f44e1cd764be0023cd3640"}, + {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a7b301ff08055d73223058b5c46c55638917f04d21577c95e00e0c4d79201a6b"}, + {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-win32.whl", hash = "sha256:d5e51e2901ec2366b79f16c2299a03e74ba4531ddcfacc1416639c557aef0ad8"}, + {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-win_amd64.whl", hash = "sha256:184faeaec61dbaa3cace407cffc5819f7b977e75360e8d5ca19461cd851a5fc5"}, + {file = "ruamel.yaml.clib-0.2.7.tar.gz", hash = "sha256:1f08fd5a2bea9c4180db71678e850b995d2a5f4537be0e94557668cf0f5f9497"}, +] + +[[package]] +name = "ruff" +version = "0.0.269" +description = "An extremely fast Python linter, written in Rust." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.0.269-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:3569bcdee679045c09c0161fabc057599759c49219a08d9a4aad2cc3982ccba3"}, + {file = "ruff-0.0.269-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:56347da63757a56cbce7d4b3d6044ca4f1941cd1bbff3714f7554360c3361f83"}, + {file = "ruff-0.0.269-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6da8ee25ef2f0cc6cc8e6e20942c1d44d25a36dce35070d7184655bc14f63f63"}, + {file = "ruff-0.0.269-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd81b8e681b9eaa6cf15484f3985bd8bd97c3d114e95bff3e8ea283bf8865062"}, + {file = "ruff-0.0.269-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f19f59ca3c28742955241fb452f3346241ddbd34e72ac5cb3d84fadebcf6bc8"}, + {file = "ruff-0.0.269-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f062059b8289a4fab7f6064601b811d447c2f9d3d432a17f689efe4d68988450"}, + {file = "ruff-0.0.269-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f5dc7aac52c58e82510217e3c7efd80765c134c097c2815d59e40face0d1fe6"}, + {file = "ruff-0.0.269-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e131b4dbe798c391090c6407641d6ab12c0fa1bb952379dde45e5000e208dabb"}, + {file = "ruff-0.0.269-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a374434e588e06550df0f8dcb74777290f285678de991fda4e1063c367ab2eb2"}, + {file = "ruff-0.0.269-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:cec2f4b84a14b87f1b121488649eb5b4eaa06467a2387373f750da74bdcb5679"}, + {file = "ruff-0.0.269-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:374b161753a247904aec7a32d45e165302b76b6e83d22d099bf3ff7c232c888f"}, + {file = "ruff-0.0.269-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9ca0a1ddb1d835b5f742db9711c6cf59f213a1ad0088cb1e924a005fd399e7d8"}, + {file = "ruff-0.0.269-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5a20658f0b97d207c7841c13d528f36d666bf445b00b01139f28a8ccb80093bb"}, + {file = "ruff-0.0.269-py3-none-win32.whl", hash = "sha256:03ff42bc91ceca58e0f0f072cb3f9286a9208f609812753474e799a997cdad1a"}, + {file = "ruff-0.0.269-py3-none-win_amd64.whl", hash = "sha256:f3b59ccff57b21ef0967ea8021fd187ec14c528ec65507d8bcbe035912050776"}, + {file = "ruff-0.0.269-py3-none-win_arm64.whl", hash = "sha256:bbeb857b1e508a4487bdb02ca1e6d41dd8d5ac5335a5246e25de8a3dff38c1ff"}, + {file = "ruff-0.0.269.tar.gz", hash = "sha256:11ddcfbab32cf5c420ea9dd5531170ace5a3e59c16d9251c7bd2581f7b16f602"}, +] + +[[package]] +name = "s3transfer" +version = "0.6.0" +description = "An Amazon S3 Transfer Manager" +category = "main" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "s3transfer-0.6.0-py3-none-any.whl", hash = "sha256:06176b74f3a15f61f1b4f25a1fc29a4429040b7647133a463da8fa5bd28d5ecd"}, + {file = "s3transfer-0.6.0.tar.gz", hash = "sha256:2ed07d3866f523cc561bf4a00fc5535827981b117dd7876f036b0c1aca42c947"}, +] + +[package.dependencies] +botocore = ">=1.12.36,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] + +[[package]] +name = "safety" +version = "2.3.5" +description = "Checks installed dependencies for known vulnerabilities and licenses." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "safety-2.3.5-py3-none-any.whl", hash = "sha256:2227fcac1b22b53c1615af78872b48348661691450aa25d6704a5504dbd1f7e2"}, + {file = "safety-2.3.5.tar.gz", hash = "sha256:a60c11f8952f412cbb165d70cb1f673a3b43a2ba9a93ce11f97e6a4de834aa3a"}, +] + +[package.dependencies] +Click = ">=8.0.2" +dparse = ">=0.6.2" +packaging = ">=21.0,<22.0" +requests = "*" +"ruamel.yaml" = ">=0.17.21" +setuptools = ">=19.3" + +[package.extras] +github = ["jinja2 (>=3.1.0)", "pygithub (>=1.43.3)"] +gitlab = ["python-gitlab (>=1.3.0)"] + +[[package]] +name = "setuptools" +version = "65.6.3" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "setuptools-65.6.3-py3-none-any.whl", hash = "sha256:57f6f22bde4e042978bcd50176fdb381d7c21a9efa4041202288d3737a0c6a54"}, + {file = "setuptools-65.6.3.tar.gz", hash = "sha256:a7620757bf984b58deaf32fc8a4577a9bbc0850cf92c20e1ce41c38c19e5fb75"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.0" +description = "A pure Python implementation of a sliding window memory map manager" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, + {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, +] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, +] + +[[package]] +name = "stevedore" +version = "3.5.2" +description = "Manage dynamic plugins for Python applications" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "stevedore-3.5.2-py3-none-any.whl", hash = "sha256:fa2630e3d0ad3e22d4914aff2501445815b9a4467a6edc49387c667a38faf5bf"}, + {file = "stevedore-3.5.2.tar.gz", hash = "sha256:cf99f41fc0d5a4f185ca4d3d42b03be9011b0a1ec1a4ea1a282be1b4b306dcc2"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""} +pbr = ">=2.0.0,<2.1.0 || >2.1.0" + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +category = "dev" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "typed-ast" +version = "1.5.4" +description = "a fork of Python 2 and 3 ast modules with type comment support" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, + {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, + {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, + {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, + {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, + {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, + {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, + {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, + {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, + {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, +] + +[[package]] +name = "types-awscrt" +version = "0.16.17" +description = "Type annotations and code completion for awscrt" +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "types_awscrt-0.16.17-py3-none-any.whl", hash = "sha256:e28fb3f20568ce9e96e33e01e0b87b891822f36b8f368adb582553b016d4aa08"}, + {file = "types_awscrt-0.16.17.tar.gz", hash = "sha256:9e447df3ad46767887d14fa9c856df94f80e8a0a7f0169577ab23b52ee37bcdf"}, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.10" +description = "Typing stubs for PyYAML" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "types-PyYAML-6.0.12.10.tar.gz", hash = "sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97"}, + {file = "types_PyYAML-6.0.12.10-py3-none-any.whl", hash = "sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f"}, +] + +[[package]] +name = "types-s3transfer" +version = "0.6.1" +description = "Type annotations and code completion for s3transfer" +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "types_s3transfer-0.6.1-py3-none-any.whl", hash = "sha256:6d1ac1dedac750d570428362acdf60fdd4f277b0788855c3894d3226756b2bfb"}, + {file = "types_s3transfer-0.6.1.tar.gz", hash = "sha256:75ac1d7143d58c1e6af467cfd4a96c67ee058a3adf7c249d9309999e1f5f41e4"}, +] + +[package.dependencies] +types-awscrt = "*" + +[[package]] +name = "typing-extensions" +version = "4.4.0" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, + {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, +] + +[[package]] +name = "urllib3" +version = "1.26.13" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.13-py2.py3-none-any.whl", hash = "sha256:47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc"}, + {file = "urllib3-1.26.13.tar.gz", hash = "sha256:c083dd0dce68dbfbe1129d5271cb90f9447dea7d52097c6e0126120c521ddea8"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "watchdog" +version = "3.0.0" +description = "Filesystem events monitoring" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, + {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"}, + {file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"}, + {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"}, + {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"}, + {file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"}, + {file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"}, + {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"}, + {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"}, + {file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"}, + {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"}, + {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"}, + {file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"}, + {file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"}, + {file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"}, + {file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"}, + {file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"}, + {file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"}, + {file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"}, + {file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[[package]] +name = "zipp" +version = "3.11.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zipp-3.11.0-py3-none-any.whl", hash = "sha256:83a28fcb75844b5c0cdaf5aa4003c2d728c77e05f5aeabe8e95e56727005fbaa"}, + {file = "zipp-3.11.0.tar.gz", hash = "sha256:a7a22e05929290a67401440b39690ae6563279bced5f314609d9d03798f56766"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.7.2" +content-hash = "ad2a2d0ac50f61faa70f6b3dea42914278a5524cfaf822aa6aa265881915135e" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..6a15747f --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,138 @@ +# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +[tool.poetry] +name = "rdk" +version = "0.15.0" +description = "Rule Development Kit CLI for AWS Config" +authors = [ + "AWS RDK Maintainers ", +] +repository = "https://github.com/awslabs/aws-config-rdk" +homepage = "https://github.com/awslabs/aws-config-rdk" +readme = "README.md" +packages = [{include = "rdk"}] +keywords = ["amazon", "aws", "awslabs", "rdk", "config", "rules", "compliance"] +documentation = "https://aws-config-rdk.readthedocs.io" +classifiers = [ + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", +] +include = [ + "README.md", + "NOTICE.txt", + "LICENSE", + "rdk/template/*", + "rdk/template/terraform/*", + "rdk/template/terraform/0.11/*", + "rdk/template/terraform/0.12/*", + "rdk/template/example_ci/*", + "rdk/template/runtime/*", + "rdk/template/runtime/java8/*", + "rdk/template/runtime/java8/jars/*", + "rdk/template/runtime/java8/src/main/java/com/rdk/*", + "rdk/template/runtime/nodejs4.3/*", + "rdk/template/runtime/python3.7/*", + "rdk/template/runtime/python3.7-lib/*", + "rdk/template/runtime/python3.8/*", + "rdk/template/runtime/python3.8-lib/*", + "rdk/template/runtime/python3.9/*", + "rdk/template/runtime/python3.9-lib/*", + "rdk/template/runtime/python3.10/*", + "rdk/template/runtime/python3.10-lib/*", + "rdk/template/runtime/dotnetcore1.0/*", + "rdk/template/runtime/dotnetcore1.0/bin/*", + "rdk/template/runtime/dotnetcore1.0/obj/*", + "rdk/template/runtime/dotnetcore1.0/obj/Debug/*", + "rdk/template/runtime/dotnetcore1.0/obj/Release/netcoreapp1.0/*", + "rdk/template/runtime/dotnetcore1.0/obj/Release/netcoreapp2.0/*", + "rdk/template/runtime/dotnetcore2.0/*", + "rdk/template/runtime/dotnetcore2.0/bin/*", + "rdk/template/runtime/dotnetcore2.0/obj/*", + "rdk/template/runtime/dotnetcore2.0/obj/Debug/*", + "rdk/template/runtime/dotnetcore2.0/obj/Release/netcoreapp1.0/*", + "rdk/template/runtime/dotnetcore2.0/obj/Release/netcoreapp2.0/*", +] +license = "Apache-2.0" + +[tool.poetry.scripts] +rdk = "rdk.cli:main" + +[tool.bandit] +exclude_dirs = ["tests"] + +# Styling and linting Configurations +[tool.isort] +profile = "black" +line_length = 120 + +[tool.black] +line-length = 120 +target-version = ["py310"] + +[tool.ruff] +line-length = 120 +target-version = "py310" + +[tool.poe.tasks] +isort = "isort --profile=black ." +black = "black ." +check-black = {cmd = "black . --check --diff", help = "Check code for black styling"} +check-isort = {cmd = "isort --check --profile=black .", help = "Check code for import styling"} +check-docstrings = "pydocstyle -e ." +check-ruff = "ruff check rdk" +check = ["check-isort", "check-black"] +lint = ["check-docstrings", "check-ruff"] +fix = ["isort", "black"] +# test = "pytest --cov=rdk --cov-report=xml --cov-report=term" +ruff = "ruff check --fix rdk" +safety = "safety check" +bandit = "bandit -r rdk" +security = ["safety", "bandit"] +update-doc-deps = {cmd = "poetry export --only=docs -f requirements.txt > docs/requirements.txt", help = "Generate an updated requirements.txt for docs" } +serve-docs = {cmd = "mkdocs serve"} +# requires poethepoet outside of poetry. +install = "poetry install" +build = "poetry build" + +[tool.poetry.dependencies] +python = "^3.7.2" +boto3 = "^1.26.139" +pyyaml = "^6.0" + +[tool.poetry.group.dev.dependencies] +rdklib = "^0.3.0" +black = "^22.12.0" +pydocstyle = "^6.3.0" +isort = {extras = ["toml"], version = "^5.11.4"} +mypy = "^1.3.0" +debugpy = "^1.6.7" +ruff = "^0.0.269" + +[tool.poetry.group.security.dependencies] +bandit = "^1.7.5" +safety = "^2.3.5" + +[tool.poetry.group.types.dependencies] +types-pyyaml = "^6.0.12.10" +boto3-stubs = {extras = ["cloudformation", "config", "iam", "s3", "sts"], version = "^1.26.139"} + + +[tool.poetry.group.docs.dependencies] +mkdocs = "^1.4.3" +mkdocs-material = "^9.1.14" +mkdocstrings-python = "^1.0.0" +markdown-include = "^0.8.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/rdk-workshop/WorkshopSetup.yaml b/rdk-workshop/WorkshopSetup.yaml index 1c181b89..6785c9c7 100644 --- a/rdk-workshop/WorkshopSetup.yaml +++ b/rdk-workshop/WorkshopSetup.yaml @@ -292,6 +292,7 @@ Resources: Principal: Service: - "lambda.amazonaws.com" + - "ssm.amazonaws.com" Action: "sts:AssumeRole" Policies: - @@ -312,6 +313,9 @@ Resources: Action: - "iam:AddUserToGroup" - "iam:ListUsers" + - "s3:GetBucketVersioning" + - "s3:ListBucketVersions" + - "s3:PutBucketVersioning" Resource: "*" RoleName: WorkshopRemediationRole @@ -390,4 +394,4 @@ Resources: StringEquals: s3:x-amz-acl: bucket-owner-full-control -#-----------------------------------------------------------------------------# \ No newline at end of file +#-----------------------------------------------------------------------------# diff --git a/rdk-workshop/instructions.md b/rdk-workshop/instructions.md index 7f50805b..210f13c5 100644 --- a/rdk-workshop/instructions.md +++ b/rdk-workshop/instructions.md @@ -77,21 +77,27 @@ Tips: To enable AWS Config at scale, it is recommended to use AWS CloudFormation Note: It might take up to 2 hours to get the information about the CIS benchmark into Security Hub, we will see the result later. ## Task 3: Launch a Managed Config Rules -3. Deploy the Managed Config Rule "s3-bucket-versioning-enabled" with remediation +3. Deploy the Managed Config Rule "s3-bucket-versioning-enabled" * Navigate to the Config Service. * On the left panel, click on "Rules" * Click on "Add rule" * Type "versioning" into the "Filter by rule.." box. * Select "s3-bucket-versioning-enabled" - * In the configuration of the rule, scroll down to "Choose remediation action" - * Select "AWS-ConfigureS3BucketVersioning" in the remediation action drop-down - * Select "No" for Auto remediation * Choose "BucketName" in the Resource ID parameter drop-down + * For "AutomationAssumeRole" input the ARN of the WorkshopRemediationRole created from the RDKWorkshopSetup Cloudformation stack * Leave the other options at the defaults. * Click "Save" -## Task 4: Remediate a Noncompliant resource -4. Visualize the results for the rule "s3-bucket-versioning-enabled" +## Task 4: Add Remediation Action +4. Edit the rule for remediation + * Select the previously created "s3-bucket-versioning-enabled" rule + * Edit the remediation action + * Select "AWS-ConfigureS3BucketVersioning" in the remediation action drop-down + * Select "No" for Auto remediation + * Click "Save changes" + +## Task 5: Remediate a Noncompliant resource +5. Visualize the results for the rule "s3-bucket-versioning-enabled" * Navigate to the Config Service. * On the left panel, click on "Rules" * Search for "s3-bucket-versioning-enabled" in the list of rule (scroll down if necessary). @@ -99,15 +105,15 @@ Note: It might take up to 2 hours to get the information about the CIS benchmark * Refresh the page until there is no banner ["No results available" or "Evaluating"] on the top (meaning that the rule has been executed) * Search for the evaluation result on a bucket named "my-bucket-to-remediate-*accountid*-*regionname*" -5. Remediate the non-compliant bucket "my-bucket-to-remediate-*accountid*-*regionname*" +6. Remediate the non-compliant bucket "my-bucket-to-remediate-*accountid*-*regionname*" * Check the box next to the line showing **Noncompliant** * Click on "Remediate" * Refresh (with double arrow button) until completion. Note 1: the "Action executed successfully" and showing **Compliant** is not at the same time (it takes up to ~5 min), keep refreshing. Note 2: on the console, the filter of the result show the "noncompliant" by default, you will need to switch the compliance status filter to see the "compliant". ## (Optional) Going further -6. Discover all the available [Managed Config Rules](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html). +7. Discover all the available [Managed Config Rules](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html). -7. Navigate to [AWS System Manager Automation Documents](https://eu-west-1.console.aws.amazon.com/systems-manager/documents?region=eu-west-1) to discover all existing remediations actions. +8. Navigate to [AWS System Manager Automation Documents](https://us-east-1.console.aws.amazon.com/systems-manager/documents?region=us-east-1) to discover all existing remediation actions. # Lab 2: Writing Your First Config Rule @@ -252,7 +258,7 @@ rdk modify MFA_ENABLED_RULE --maximum-frequency One_Hour 2. In your text editor, open up the MFA_ENABLED_RULE.py file. If you do not yet have a working version of this Rule from the last lab, go ahead and copy it from the solutions section at the end of this lab guide. -3. Remember that in a Periodic invocation of the Rule the configuration_item passed in to your evaluate_compliance function will be empty so we'll need to make an API call to retrieve all of the IAM users in the account and evaluate them all using the same logic as we used in the previous exercise. +3. Remember that in a Periodic invocation of the Rule the configuration_item passed in to your evaluate_compliance function will be empty, so we'll need to make an API call to retrieve all the IAM users in the account and evaluate them all using the same logic as we used in the previous exercise. 4. To make that a little easier, let's refactor what we've got to make a trigger-independent `evaluate_user(username)` function out of your existing compliance evaluation logic, which we will conditionally call if the configuration_item is present. This will preserve our existing functionality, and should look something like the following pseudo-code: @@ -290,14 +296,14 @@ rdk modify MFA_ENABLED_RULE --maximum-frequency One_Hour # Lab 4 (on your own time): Remediation -In this lab, you will use CloudWatch Events and a Lambda Function to remediate non-compliant resources. +In this lab, you will use EventBridge and a Lambda Function to remediate non-compliant resources. ## Lab Overview ### Objectives After completing this lab, you will be able to: -- Configure CloudWatch Events to trigger a Lambda Function when the compliance state of a Config Rule changes +- Configure Amazon EventBridge to trigger a Lambda Function when the compliance state of a Config Rule changes - Create a Lambda Function to remediate compliance issues @@ -312,7 +318,7 @@ After completing this lab, you will be able to: 3. Navigate to the Lambda service. Click on "Create Function" and choose "Author From Scratch." For the Name enter "MFA_ENABLED_REMEDIATION", and for Runtime choose python3.7. For the Role drop-down select "Choose an existing role", and in the "Existing role" drop-down select the "WorkshopRemediationRole" that was created by the lab setup CloudFormation template. Click on "Create Function" to complete the Function creation. -4. Navigate to the CloudWatch service. Click on "Rules" in the left-hand navigation. +4. Navigate to the EventBridge service. Click on "Rules" in the left-hand navigation. 5. Click on "Create rule". For Event Source choose "Event Pattern". For Service Name choose "Config", and for Event Type choose "Config Rules Compliance Change". Leave most of the filters to the "Any ..." settings, but change "Any rule name" to "Specific rule name(s)", and type "MFA_ENABLED_RULE" in the text box. @@ -322,7 +328,7 @@ After completing this lab, you will be able to: 8. Go back to your MFA_ENABLED_REMEDIATION Lambda Function. It's time to update it to secure your environment! -9. The CloudWatch Event Rule will send an Event to your Lambda function every time the MFA_ENABLED_RULE changes compliance status for any IAM user. The event that Lambda receives will look something like this: +9. The EventBridge Rule will send an Event to your Lambda function every time the MFA_ENABLED_RULE changes compliance status for any IAM user. The event that Lambda receives will look something like this: ~~~~~ { @@ -382,7 +388,7 @@ After completing this lab, you will be able to: 12. Some other hints: * Make sure you add `import boto3` to the top of your python code. - * The CloudWatch Event will contain the unique ID of the IAM User, not the Username. You will need to call IAM ListUsers and loop through the returned list. + * The EventBridge will contain the unique ID of the IAM User, not the Username. You will need to call IAM ListUsers and loop through the returned list. * If you are using an active AWS account, consider simply logging messages to show that the function is working using the print() statement, rather than making changes to your account. 13. For a basic solution to the exercise, scroll to the end of this lab guide. diff --git a/rdk/__init__.py b/rdk/__init__.py index b573c0b6..d03245c4 100644 --- a/rdk/__init__.py +++ b/rdk/__init__.py @@ -6,4 +6,4 @@ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -MY_VERSION = "0.9.0" +MY_VERSION = "0.15.0" diff --git a/rdk/rdk.py b/rdk/rdk.py index 35c06f36..4cd52bbc 100644 --- a/rdk/rdk.py +++ b/rdk/rdk.py @@ -1,10 +1,16 @@ # Copyright 2017-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +# Licensed under the Apache License, Version 2.0 (the "License"). +# +# You may not use this file except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # -# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +# or in the "license" file accompanying this file. +# +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# +# See the License for the specific language governing permissions and limitations under the License. import argparse import base64 import fileinput @@ -69,55 +75,112 @@ "sa-east-1": "5", } -RDKLIB_LAYER_SAR_ID = "arn:aws:serverlessrepo:ap-southeast-1:711761543063:applications/rdklib" +RDKLIB_LAYER_SAR_ID = ( + "arn:aws:serverlessrepo:ap-southeast-1:711761543063:applications/rdklib" +) RDKLIB_ARN_STRING = "arn:aws:lambda:{region}:711761543063:layer:rdklib-layer:{version}" -PARALLEL_COMMAND_THROTTLE_PERIOD = 2 # 2 seconds, used in running commands in parallel over multiple regions +PARALLEL_COMMAND_THROTTLE_PERIOD = ( + 2 # 2 seconds, used in running commands in parallel over multiple regions +) -# this need to be update whenever config service supports more resource types : https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html +# This need to be update whenever config service supports more resource types +# See: https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html accepted_resource_types = [ "AWS::ACM::Certificate", + "AWS::AccessAnalyzer::Analyzer", + "AWS::AmazonMQ::Broker", "AWS::ApiGateway::RestApi", "AWS::ApiGateway::Stage", "AWS::ApiGatewayV2::Api", "AWS::ApiGatewayV2::Stage", + "AWS::AppConfig::Application", + "AWS::AppConfig::ConfigurationProfile", + "AWS::AppConfig::DeploymentStrategy", + "AWS::AppConfig::Environment", + "AWS::AppFlow::Flow", + "AWS::AppStream::DirectoryConfig", + "AWS::AppSync::GraphQLApi", + "AWS::Athena::DataCatalog", + "AWS::Athena::WorkGroup", + "AWS::AuditManager::Assessment", "AWS::AutoScaling::AutoScalingGroup", "AWS::AutoScaling::LaunchConfiguration", "AWS::AutoScaling::ScalingPolicy", "AWS::AutoScaling::ScheduledAction", + "AWS::AutoScaling::WarmPool", "AWS::Backup::BackupPlan", "AWS::Backup::BackupSelection", "AWS::Backup::BackupVault", "AWS::Backup::RecoveryPoint", + "AWS::Backup::ReportPlan", + "AWS::Batch::ComputeEnvironment", + "AWS::Batch::JobQueue", + "AWS::Budgets::BudgetsAction", + "AWS::Cloud9::EnvironmentEC2", "AWS::CloudFormation::Stack", "AWS::CloudFront::Distribution", "AWS::CloudFront::StreamingDistribution", "AWS::CloudTrail::Trail", "AWS::CloudWatch::Alarm", + "AWS::CloudWatch::MetricStream", "AWS::CodeBuild::Project", "AWS::CodeDeploy::Application", "AWS::CodeDeploy::DeploymentConfig", "AWS::CodeDeploy::DeploymentGroup", + "AWS::CodeGuruReviewer::RepositoryAssociation", "AWS::CodePipeline::Pipeline", + "AWS::Config::ConfigurationRecorder", "AWS::Config::ConformancePackCompliance", "AWS::Config::ResourceCompliance", + "AWS::Connect::PhoneNumber", + "AWS::CustomerProfiles::Domain", + "AWS::DMS::Certificate", + "AWS::DMS::EventSubscription", + "AWS::DMS::ReplicationInstance", + "AWS::DMS::ReplicationSubnetGroup", + "AWS::DMS::ReplicationTask", + "AWS::DataSync::LocationEFS", + "AWS::DataSync::LocationFSxLustre", + "AWS::DataSync::LocationFSxWindows", + "AWS::DataSync::LocationHDFS", + "AWS::DataSync::LocationNFS", + "AWS::DataSync::LocationObjectStorage", + "AWS::DataSync::LocationS3", + "AWS::DataSync::LocationSMB", + "AWS::DataSync::Task", + "AWS::Detective::Graph", + "AWS::DeviceFarm::InstanceProfile", + "AWS::DeviceFarm::Project", + "AWS::DeviceFarm::TestGridProject", "AWS::DynamoDB::Table", "AWS::EC2::CustomerGateway", + "AWS::EC2::DHCPOptions", + "AWS::EC2::EC2Fleet", "AWS::EC2::EIP", "AWS::EC2::EgressOnlyInternetGateway", "AWS::EC2::FlowLog", "AWS::EC2::Host", + "AWS::EC2::IPAM", "AWS::EC2::Instance", "AWS::EC2::InternetGateway", "AWS::EC2::LaunchTemplate", "AWS::EC2::NatGateway", "AWS::EC2::NetworkAcl", + "AWS::EC2::NetworkInsightsAccessScopeAnalysis", + "AWS::EC2::NetworkInsightsPath", "AWS::EC2::NetworkInterface", "AWS::EC2::RegisteredHAInstance", "AWS::EC2::RouteTable", "AWS::EC2::SecurityGroup", "AWS::EC2::Subnet", + "AWS::EC2::SubnetRouteTableAssociation", + "AWS::EC2::TrafficMirrorFilter", + "AWS::EC2::TrafficMirrorSession", + "AWS::EC2::TrafficMirrorTarget", "AWS::EC2::TransitGateway", + "AWS::EC2::TransitGatewayAttachment", + "AWS::EC2::TransitGatewayRouteTable", "AWS::EC2::VPC", "AWS::EC2::VPCEndpoint", "AWS::EC2::VPCEndpointService", @@ -126,33 +189,122 @@ "AWS::EC2::VPNGateway", "AWS::EC2::Volume", "AWS::ECR::PublicRepository", + "AWS::ECR::PullThroughCacheRule", + "AWS::ECR::RegistryPolicy", "AWS::ECR::Repository", "AWS::ECS::Cluster", "AWS::ECS::Service", "AWS::ECS::TaskDefinition", "AWS::EFS::AccessPoint", "AWS::EFS::FileSystem", + "AWS::EKS::Addon", "AWS::EKS::Cluster", + "AWS::EKS::FargateProfile", + "AWS::EKS::IdentityProviderConfig", "AWS::EMR::SecurityConfiguration", "AWS::ElasticBeanstalk::Application", "AWS::ElasticBeanstalk::ApplicationVersion", "AWS::ElasticBeanstalk::Environment", "AWS::ElasticLoadBalancing::LoadBalancer", + "AWS::ElasticLoadBalancingV2::Listener", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::ElasticSearch::Domain", + "AWS::EventSchemas::Discoverer", + "AWS::EventSchemas::Registry", + "AWS::EventSchemas::RegistryPolicy", + "AWS::EventSchemas::Schema", + "AWS::Events::ApiDestination", + "AWS::Events::Archive", + "AWS::Events::Connection", + "AWS::Events::Endpoint", + "AWS::Events::EventBus", + "AWS::Events::Rule", + "AWS::FIS::ExperimentTemplate", + "AWS::FraudDetector::EntityType", + "AWS::FraudDetector::Label", + "AWS::FraudDetector::Outcome", + "AWS::FraudDetector::Variable", + "AWS::GlobalAccelerator::Accelerator", + "AWS::GlobalAccelerator::EndpointGroup", + "AWS::GlobalAccelerator::Listener", + "AWS::Glue::Classifier", + "AWS::Glue::Job", + "AWS::Glue::MLTransform", + "AWS::GroundStation::Config", "AWS::GuardDuty::Detector", + "AWS::GuardDuty::Filter", + "AWS::GuardDuty::IPSet", + "AWS::GuardDuty::ThreatIntelSet", + "AWS::HealthLake::FHIRDatastore", "AWS::IAM::Group", "AWS::IAM::Policy", "AWS::IAM::Role", "AWS::IAM::User", + "AWS::IVS::Channel", + "AWS::IVS::PlaybackKeyPair", + "AWS::IVS::RecordingConfiguration", + "AWS::ImageBuilder::ContainerRecipe", + "AWS::ImageBuilder::DistributionConfiguration", + "AWS::ImageBuilder::ImagePipeline", + "AWS::ImageBuilder::InfrastructureConfiguration", + "AWS::IoT::AccountAuditConfiguration", + "AWS::IoT::Authorizer", + "AWS::IoT::CustomMetric", + "AWS::IoT::Dimension", + "AWS::IoT::FleetMetric", + "AWS::IoT::MitigationAction", + "AWS::IoT::Policy", + "AWS::IoT::RoleAlias", + "AWS::IoT::ScheduledAudit", + "AWS::IoT::SecurityProfile", + "AWS::IoTAnalytics::Channel", + "AWS::IoTAnalytics::Dataset", + "AWS::IoTAnalytics::Datastore", + "AWS::IoTAnalytics::Pipeline", + "AWS::IoTEvents::AlarmModel", + "AWS::IoTEvents::DetectorModel", + "AWS::IoTEvents::Input", + "AWS::IoTSiteWise::AssetModel", + "AWS::IoTSiteWise::Dashboard", + "AWS::IoTSiteWise::Gateway", + "AWS::IoTSiteWise::Portal", + "AWS::IoTSiteWise::Project", + "AWS::IoTTwinMaker::Entity", + "AWS::IoTTwinMaker::Scene", + "AWS::IoTTwinMaker::Workspace", + "AWS::IoTWireless::ServiceProfile", + "AWS::KMS::Alias", "AWS::KMS::Key", "AWS::Kinesis::Stream", "AWS::Kinesis::StreamConsumer", + "AWS::KinesisAnalyticsV2::Application", + "AWS::KinesisVideo::SignalingChannel", "AWS::Lambda::Function", + "AWS::Lex::Bot", + "AWS::Lex::BotAlias", + "AWS::Lightsail::Bucket", + "AWS::Lightsail::Certificate", + "AWS::Lightsail::Disk", + "AWS::Lightsail::StaticIp", + "AWS::LookoutMetrics::Alert", + "AWS::LookoutVision::Project", + "AWS::MSK::Cluster", + "AWS::MediaPackage::PackagingConfiguration", + "AWS::MediaPackage::PackagingGroup", "AWS::NetworkFirewall::Firewall", "AWS::NetworkFirewall::FirewallPolicy", "AWS::NetworkFirewall::RuleGroup", + "AWS::NetworkFirewall::TLSInspectionConfiguration", + "AWS::NetworkManager::Device", + "AWS::NetworkManager::GlobalNetwork", + "AWS::NetworkManager::Link", + "AWS::NetworkManager::Site", + "AWS::NetworkManager::TransitGatewayRegistration", "AWS::OpenSearch::Domain", + "AWS::Panorama::Package", + "AWS::Pinpoint::App", + "AWS::Pinpoint::ApplicationSettings", + "AWS::Pinpoint::Segment", "AWS::QLDB::Ledger", "AWS::RDS::DBCluster", "AWS::RDS::DBClusterSnapshot", @@ -161,27 +313,69 @@ "AWS::RDS::DBSnapshot", "AWS::RDS::DBSubnetGroup", "AWS::RDS::EventSubscription", + "AWS::RDS::GlobalCluster", + "AWS::RUM::AppMonitor", "AWS::Redshift::Cluster", "AWS::Redshift::ClusterParameterGroup", "AWS::Redshift::ClusterSecurityGroup", "AWS::Redshift::ClusterSnapshot", "AWS::Redshift::ClusterSubnetGroup", "AWS::Redshift::EventSubscription", + "AWS::Redshift::ScheduledAction", + "AWS::ResilienceHub::ResiliencyPolicy", + "AWS::RoboMaker::RobotApplication", + "AWS::RoboMaker::RobotApplicationVersion", + "AWS::RoboMaker::SimulationApplication", + "AWS::Route53::HealthCheck", + "AWS::Route53::HostedZone", + "AWS::Route53RecoveryControl::Cluster", + "AWS::Route53RecoveryControl::ControlPanel", + "AWS::Route53RecoveryControl::RoutingControl", + "AWS::Route53RecoveryControl::SafetyRule", + "AWS::Route53RecoveryReadiness::Cell", + "AWS::Route53RecoveryReadiness::ReadinessCheck", + "AWS::Route53RecoveryReadiness::RecoveryGroup", + "AWS::Route53RecoveryReadiness::ResourceSet", + "AWS::Route53Resolver::FirewallDomainList", + "AWS::Route53Resolver::FirewallRuleGroupAssociation", + "AWS::Route53Resolver::ResolverEndpoint", + "AWS::Route53Resolver::ResolverRule", + "AWS::Route53Resolver::ResolverRuleAssociation", "AWS::S3::AccountPublicAccessBlock", "AWS::S3::Bucket", + "AWS::S3::MultiRegionAccessPoint", + "AWS::S3::StorageLens", + "AWS::SES::ConfigurationSet", + "AWS::SES::ContactList", + "AWS::SES::ReceiptFilter", + "AWS::SES::ReceiptRuleSet", + "AWS::SES::Template", "AWS::SNS::Topic", "AWS::SQS::Queue", "AWS::SSM::AssociationCompliance", "AWS::SSM::FileData", "AWS::SSM::ManagedInstanceInventory", "AWS::SSM::PatchCompliance", + "AWS::SageMaker::AppImageConfig", "AWS::SageMaker::CodeRepository", + "AWS::SageMaker::EndpointConfig", + "AWS::SageMaker::Image", + "AWS::SageMaker::Model", + "AWS::SageMaker::NotebookInstance", + "AWS::SageMaker::NotebookInstanceLifecycleConfig", + "AWS::SageMaker::Workteam", "AWS::SecretsManager::Secret", "AWS::ServiceCatalog::CloudFormationProduct", "AWS::ServiceCatalog::CloudFormationProvisionedProduct", "AWS::ServiceCatalog::Portfolio", + "AWS::ServiceDiscovery::HttpNamespace", + "AWS::ServiceDiscovery::PublicDnsNamespace", + "AWS::ServiceDiscovery::Service", "AWS::Shield::Protection", "AWS::ShieldRegional::Protection", + "AWS::StepFunctions::Activity", + "AWS::StepFunctions::StateMachine", + "AWS::Transfer::Workflow", "AWS::WAF::RateBasedRule", "AWS::WAF::Rule", "AWS::WAF::RuleGroup", @@ -195,6 +389,8 @@ "AWS::WAFv2::RegexPatternSet", "AWS::WAFv2::RuleGroup", "AWS::WAFv2::WebACL", + "AWS::WorkSpaces::ConnectionAlias", + "AWS::WorkSpaces::Workspace", "AWS::XRay::EncryptionConfig", ] @@ -211,7 +407,9 @@ { "Sid": "REMOTE", "Effect": "Allow", - "Principal": {"AWS": {"Fn::Sub": "arn:${AWS::Partition}:iam::${LambdaAccountId}:root"}}, + "Principal": { + "AWS": {"Fn::Sub": "arn:${AWS::Partition}:iam::${LambdaAccountId}:root"} + }, "Action": "sts:AssumeRole", }, ], @@ -222,7 +420,9 @@ { "Effect": "Allow", "Action": "s3:PutObject*", - "Resource": {"Fn::Sub": "arn:${AWS::Partition}:s3:::${ConfigBucket}/AWSLogs/${AWS::AccountId}/*"}, + "Resource": { + "Fn::Sub": "arn:${AWS::Partition}:s3:::${ConfigBucket}/AWSLogs/${AWS::AccountId}/*" + }, "Condition": {"StringLike": {"s3:x-amz-acl": "bucket-owner-full-control"}}, }, { @@ -243,10 +443,18 @@ def get_command_parser(): # formatter_class=argparse.RawDescriptionHelpFormatter, description="The RDK is a command-line utility for authoring, deploying, and testing custom AWS Config rules." ) - parser.add_argument("-p", "--profile", help="[optional] indicate which Profile to use.") - parser.add_argument("-k", "--access-key-id", help="[optional] Access Key ID to use.") - parser.add_argument("-s", "--secret-access-key", help="[optional] Secret Access Key to use.") - parser.add_argument("-r", "--region", help="Select the region to run the command in.") + parser.add_argument( + "-p", "--profile", help="[optional] indicate which Profile to use." + ) + parser.add_argument( + "-k", "--access-key-id", help="[optional] Access Key ID to use." + ) + parser.add_argument( + "-s", "--secret-access-key", help="[optional] Secret Access Key to use." + ) + parser.add_argument( + "-r", "--region", help="Select the region to run the command in." + ) parser.add_argument( "-f", "--region-file", @@ -281,7 +489,7 @@ def get_command_parser(): parser.add_argument( "command", metavar="", - help=f"Command to run. Refer to the usage instructions for each command for more details. Commands are: {rdk_commands})", + help=f"Command to run. Refer to the usage instructions for each command for more details. Commands are: {rdk_commands}", choices=rdk_commands, ) parser.add_argument( @@ -291,7 +499,11 @@ def get_command_parser(): help="Run `rdk --help` to see command-specific arguments.", ) parser.add_argument( - "-v", "--version", help="Display the version of this tool", action="version", version="%(prog)s " + MY_VERSION + "-v", + "--version", + help="Display the version of this tool", + action="version", + version="%(prog)s " + MY_VERSION, ) return parser @@ -373,7 +585,9 @@ def get_rule_parser(is_required, command): + command + " the Rule and metadata.", ) - parser.add_argument("rulename", metavar="", help="Rule name to create/modify") + parser.add_argument( + "rulename", metavar="", help="Rule name to create/modify" + ) runtime_group = parser.add_mutually_exclusive_group() runtime_group.add_argument( "-R", @@ -381,7 +595,6 @@ def get_rule_parser(is_required, command): required=False, help="Runtime for lambda function", choices=[ - "nodejs6.10", "java8", "python3.7", "python3.7-lib", @@ -389,16 +602,23 @@ def get_rule_parser(is_required, command): "python3.8-lib", "python3.9", "python3.9-lib", - "dotnetcore1.0", - "dotnetcore2.0", + "python3.10", + "python3.10-lib", ], metavar="", ) runtime_group.add_argument( - "--source-identifier", required=False, help="[optional] Used only for creating Managed Rules." + "--source-identifier", + required=False, + help="[optional] Used only for creating Managed Rules.", ) - parser.add_argument("-l", "--custom-lambda-name", required=False, help="[optional] Provide custom lambda name") - parser.set_defaults(runtime="python3.9-lib") + parser.add_argument( + "-l", + "--custom-lambda-name", + required=False, + help="[optional] Provide custom lambda name", + ) + parser.set_defaults(runtime="python3.10-lib") parser.add_argument( "-r", "--resource-types", @@ -410,17 +630,41 @@ def get_rule_parser(is_required, command): "--maximum-frequency", required=False, help="[optional] Maximum execution frequency for scheduled Rules", - choices=["One_Hour", "Three_Hours", "Six_Hours", "Twelve_Hours", "TwentyFour_Hours"], + choices=[ + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + ], + ) + parser.add_argument( + "-i", + "--input-parameters", + help="[optional] JSON for required Config parameters.", + ) + parser.add_argument( + "--optional-parameters", help="[optional] JSON for optional Config parameters." + ) + parser.add_argument( + "--tags", + help="[optional] JSON for tags to be applied to all CFN created resources.", + ) + parser.add_argument( + "-s", + "--rulesets", + required=False, + help="[optional] comma-delimited list of RuleSet names to add this Rule to.", ) - parser.add_argument("-i", "--input-parameters", help="[optional] JSON for required Config parameters.") - parser.add_argument("--optional-parameters", help="[optional] JSON for optional Config parameters.") - parser.add_argument("--tags", help="[optional] JSON for tags to be applied to all CFN created resources.") parser.add_argument( - "-s", "--rulesets", required=False, help="[optional] comma-delimited list of RuleSet names to add this Rule to." + "--remediation-action", + required=False, + help="[optional] SSM document for remediation.", ) - parser.add_argument("--remediation-action", required=False, help="[optional] SSM document for remediation.") parser.add_argument( - "--remediation-action-version", required=False, help="[optional] SSM document version for remediation action." + "--remediation-action-version", + required=False, + help="[optional] SSM document version for remediation action.", ) parser.add_argument( "--auto-remediate", @@ -434,7 +678,9 @@ def get_rule_parser(is_required, command): help="[optional] Number of times to retry automated remediation.", ) parser.add_argument( - "--auto-remediation-retry-time", required=False, help="[optional] Duration of automated remediation retries." + "--auto-remediation-retry-time", + required=False, + help="[optional] Duration of automated remediation retries.", ) parser.add_argument( "--remediation-concurrent-execution-percent", @@ -485,13 +731,27 @@ def get_deployment_parser(ForceArgument=False, Command="deploy"): parser = argparse.ArgumentParser( prog="rdk " + Command, - description="Used to " + Command + " the Config Rule " + direction + " the target account.", + description="Used to " + + Command + + " the Config Rule " + + direction + + " the target account.", + ) + parser.add_argument( + "rulename", + metavar="", + nargs="*", + help="Rule name(s) to deploy. Rule(s) will be pushed to AWS.", + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="All rules in the working directory will be deployed.", ) parser.add_argument( - "rulename", metavar="", nargs="*", help="Rule name(s) to deploy. Rule(s) will be pushed to AWS." + "-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names" ) - parser.add_argument("--all", "-a", action="store_true", help="All rules in the working directory will be deployed.") - parser.add_argument("-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names") parser.add_argument( "-f", "--functions-only", @@ -575,20 +835,36 @@ def get_deployment_parser(ForceArgument=False, Command="deploy"): return parser -def get_deployment_organization_parser(ForceArgument=False, Command="deploy-organization"): +def get_deployment_organization_parser( + ForceArgument=False, Command="deploy-organization" +): direction = "to" if Command == "undeploy": direction = "from" parser = argparse.ArgumentParser( prog="rdk " + Command, - description="Used to " + Command + " the Config Rule " + direction + " the target Organization.", + description="Used to " + + Command + + " the Config Rule " + + direction + + " the target Organization.", ) parser.add_argument( - "rulename", metavar="", nargs="*", help="Rule name(s) to deploy. Rule(s) will be pushed to AWS." + "rulename", + metavar="", + nargs="*", + help="Rule name(s) to deploy. Rule(s) will be pushed to AWS.", + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="All rules in the working directory will be deployed.", + ) + parser.add_argument( + "-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names" ) - parser.add_argument("--all", "-a", action="store_true", help="All rules in the working directory will be deployed.") - parser.add_argument("-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names") parser.add_argument( "-f", "--functions-only", @@ -673,13 +949,25 @@ def get_deployment_organization_parser(ForceArgument=False, Command="deploy-orga def get_export_parser(ForceArgument=False, Command="export"): - parser = argparse.ArgumentParser( - prog="rdk " + Command, description="Used to " + Command + " the Config Rule to terraform file." + prog="rdk " + Command, + description="Used to " + Command + " the Config Rule to terraform file.", + ) + parser.add_argument( + "rulename", + metavar="", + nargs="*", + help="Rule name(s) to export to a file.", + ) + parser.add_argument( + "-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names" + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="All rules in the working directory will be deployed.", ) - parser.add_argument("rulename", metavar="", nargs="*", help="Rule name(s) to export to a file.") - parser.add_argument("-s", "--rulesets", required=False, help="comma-delimited list of RuleSet names") - parser.add_argument("--all", "-a", action="store_true", help="All rules in the working directory will be deployed.") parser.add_argument( "--lambda-layers", required=False, @@ -717,8 +1005,16 @@ def get_export_parser(ForceArgument=False, Command="export"): required=False, help="[optional] Lambda Layer ARN that contains the desired rdklib. Note that Lambda Layers are region-specific.", ) - parser.add_argument("-v", "--version", required=True, help="Terraform version", choices=["0.11", "0.12"]) - parser.add_argument("-f", "--format", required=True, help="Export Format", choices=["terraform"]) + parser.add_argument( + "-v", + "--version", + required=True, + help="Terraform version", + choices=["0.11", "0.12"], + ) + parser.add_argument( + "-f", "--format", required=True, help="Export Format", choices=["terraform"] + ) parser.add_argument( "-g", "--generated-lambda-layer", @@ -736,15 +1032,36 @@ def get_export_parser(ForceArgument=False, Command="export"): def get_test_parser(command): - parser = argparse.ArgumentParser(prog="rdk " + command, description="Used to run tests on your Config Rule code.") - parser.add_argument("rulename", metavar="[,,...]", nargs="*", help="Rule name(s) to test") + parser = argparse.ArgumentParser( + prog="rdk " + command, description="Used to run tests on your Config Rule code." + ) parser.add_argument( - "--all", "-a", action="store_true", help="Test will be run against all rules in the working directory." + "rulename", + metavar="[,,...]", + nargs="*", + help="Rule name(s) to test", + ) + parser.add_argument( + "--all", + "-a", + action="store_true", + help="Test will be run against all rules in the working directory.", + ) + parser.add_argument( + "--test-ci-json", "-j", help="[optional] JSON for test CI for testing." + ) + parser.add_argument( + "--test-ci-types", "-t", help="[optional] CI type to use for testing." + ) + parser.add_argument( + "--verbose", "-v", action="store_true", help="[optional] Enable full log output" + ) + parser.add_argument( + "-s", + "--rulesets", + required=False, + help="[optional] comma-delimited list of RuleSet names", ) - parser.add_argument("--test-ci-json", "-j", help="[optional] JSON for test CI for testing.") - parser.add_argument("--test-ci-types", "-t", help="[optional] CI type to use for testing.") - parser.add_argument("--verbose", "-v", action="store_true", help="[optional] Enable full log output") - parser.add_argument("-s", "--rulesets", required=False, help="[optional] comma-delimited list of RuleSet names") return parser @@ -772,11 +1089,21 @@ def get_logs_parser(): usage="rdk logs [-n/--number NUMBER] [-f/--follow]", description="Displays CloudWatch logs for the Lambda Function for the specified Rule.", ) - parser.add_argument("rulename", metavar="", help="Rule whose logs will be displayed") parser.add_argument( - "-f", "--follow", action="store_true", help="[optional] Continuously poll Lambda logs and write to stdout." + "rulename", metavar="", help="Rule whose logs will be displayed" + ) + parser.add_argument( + "-f", + "--follow", + action="store_true", + help="[optional] Continuously poll Lambda logs and write to stdout.", + ) + parser.add_argument( + "-n", + "--number", + default=3, + help="[optional] Number of previous logged events to display.", ) - parser.add_argument("-n", "--number", default=3, help="[optional] Number of previous logged events to display.") return parser @@ -788,7 +1115,9 @@ def get_rulesets_parser(): ) parser.add_argument("subcommand", help="One of list, add, or remove") parser.add_argument("ruleset", nargs="?", help="Name of RuleSet") - parser.add_argument("rulename", nargs="?", help="Name of Rule to be added or removed") + parser.add_argument( + "rulename", nargs="?", help="Name of Rule to be added or removed" + ) return parser @@ -843,10 +1172,15 @@ def get_create_rule_template_parser(): def get_create_region_set_parser(): parser = argparse.ArgumentParser( - prog="rdk create-region-set", description="Outputs a YAML region set file for multi-region deployment." + prog="rdk create-region-set", + description="Outputs a YAML region set file for multi-region deployment.", ) parser.add_argument( - "-o", "--output-file", required=False, default="regions", help="Filename of the generated region set file" + "-o", + "--output-file", + required=False, + default="regions", + help="Filename of the generated region set file", ) return parser @@ -859,7 +1193,9 @@ def parse_region_file(args): region_text = yaml.safe_load(open(args.region_file, "r")) return region_text[region_set] except Exception: - raise SyntaxError(f"Error reading regions: {region_set} in file: {args.region_file}") + raise SyntaxError( + f"Error reading regions: {region_set} in file: {args.region_file}" + ) def run_multi_region(args): @@ -908,7 +1244,9 @@ def init(self): config_bucket_exists = False if self.args.config_bucket_exists_in_another_account: - print(f"[{my_session.region_name}]: Skipping Config Bucket check due to command line args") + print( + f"[{my_session.region_name}]: Skipping Config Bucket check due to command line args" + ) config_bucket_exists = True config_bucket_name = config_bucket_prefix + "-" + account_id @@ -921,9 +1259,14 @@ def init(self): control_tower = True if self.args.generate_lambda_layer: - lambda_layer_version = self.__get_existing_lambda_layer(my_session, layer_name=self.args.custom_layer_name) + lambda_layer_version = self.__get_existing_lambda_layer( + my_session, layer_name=self.args.custom_layer_name + ) if lambda_layer_version: - print(f"[{my_session.region_name}]: Found Version: " + lambda_layer_version) + print( + f"[{my_session.region_name}]: Found Version: " + + lambda_layer_version + ) if self.args.generate_lambda_layer: print( f"[{my_session.region_name}]: --generate-lambda-layer Flag received, forcing update of the Lambda Layer in {my_session.region_name}" @@ -933,8 +1276,12 @@ def init(self): f"[{my_session.region_name}]: Lambda Layer not found in {my_session.region_name}. Creating one now" ) # Try to generate lambda layer with ServerlessAppRepo, manually generate if impossible - self.__create_new_lambda_layer(my_session, layer_name=self.args.custom_layer_name) - lambda_layer_version = self.__get_existing_lambda_layer(my_session, layer_name=self.args.custom_layer_name) + self.__create_new_lambda_layer( + my_session, layer_name=self.args.custom_layer_name + ) + lambda_layer_version = self.__get_existing_lambda_layer( + my_session, layer_name=self.args.custom_layer_name + ) # Check to see if the ConfigRecorder has been created. recorders = my_config.describe_configuration_recorders() @@ -942,13 +1289,18 @@ def init(self): config_recorder_exists = True config_recorder_name = recorders["ConfigurationRecorders"][0]["name"] config_role_arn = recorders["ConfigurationRecorders"][0]["roleARN"] - print(f"[{my_session.region_name}]: Found Config Recorder: " + config_recorder_name) + print( + f"[{my_session.region_name}]: Found Config Recorder: " + + config_recorder_name + ) print(f"[{my_session.region_name}]: Found Config Role: " + config_role_arn) delivery_channels = my_config.describe_delivery_channels() if len(delivery_channels["DeliveryChannels"]) > 0: delivery_channel_exists = True - config_bucket_name = delivery_channels["DeliveryChannels"][0]["s3BucketName"] + config_bucket_name = delivery_channels["DeliveryChannels"][0][ + "s3BucketName" + ] my_s3 = my_session.client("s3") @@ -958,18 +1310,26 @@ def init(self): bucket_exists = False for bucket in response["Buckets"]: if bucket["Name"] == config_bucket_name: - print(f"[{my_session.region_name}]: Found Bucket: " + config_bucket_name) + print( + f"[{my_session.region_name}]: Found Bucket: " + + config_bucket_name + ) config_bucket_exists = True bucket_exists = True if not bucket_exists: - print(f"[{my_session.region_name}]: Creating Config bucket " + config_bucket_name) + print( + f"[{my_session.region_name}]: Creating Config bucket " + + config_bucket_name + ) if my_session.region_name == "us-east-1": my_s3.create_bucket(Bucket=config_bucket_name) else: my_s3.create_bucket( Bucket=config_bucket_name, - CreateBucketConfiguration={"LocationConstraint": my_session.region_name}, + CreateBucketConfiguration={ + "LocationConstraint": my_session.region_name + }, ) if not config_role_arn: @@ -988,29 +1348,49 @@ def init(self): elif partition == "aws-cn": partition_url = ".com.cn" assume_role_policy_template = open( - os.path.join(path.dirname(__file__), "template", assume_role_policy_file), "r" + os.path.join( + path.dirname(__file__), "template", assume_role_policy_file + ), + "r", ).read() - assume_role_policy = json.loads(assume_role_policy_template.replace("${PARTITIONURL}", partition_url)) + assume_role_policy = json.loads( + assume_role_policy_template.replace( + "${PARTITIONURL}", partition_url + ) + ) assume_role_policy["Statement"].append( - {"Effect": "Allow", "Principal": {"AWS": str(account_id)}, "Action": "sts:AssumeRole"} + { + "Effect": "Allow", + "Principal": {"AWS": str(account_id)}, + "Action": "sts:AssumeRole", + } ) my_iam.create_role( - RoleName=config_role_name, AssumeRolePolicyDocument=json.dumps(assume_role_policy), Path="/rdk/" + RoleName=config_role_name, + AssumeRolePolicyDocument=json.dumps(assume_role_policy), + Path="/rdk/", ) # attach role policy my_iam.attach_role_policy( - RoleName=config_role_name, PolicyArn="arn:" + partition + ":iam::aws:policy/service-role/AWS_ConfigRole" + RoleName=config_role_name, + PolicyArn="arn:" + + partition + + ":iam::aws:policy/service-role/AWS_ConfigRole", ) my_iam.attach_role_policy( - RoleName=config_role_name, PolicyArn="arn:" + partition + ":iam::aws:policy/ReadOnlyAccess" + RoleName=config_role_name, + PolicyArn="arn:" + partition + ":iam::aws:policy/ReadOnlyAccess", ) policy_template = open( - os.path.join(path.dirname(__file__), "template", delivery_permission_policy_file), "r" + os.path.join( + path.dirname(__file__), "template", delivery_permission_policy_file + ), + "r", ).read() - delivery_permissions_policy = policy_template.replace("${ACCOUNTID}", account_id).replace( - "${PARTITION}", partition - ) + delivery_permissions_policy = policy_template.replace( + "${ACCOUNTID}", account_id + ).replace("${PARTITION}", partition) my_iam.put_role_policy( RoleName=config_role_name, PolicyName="ConfigDeliveryPermissions", @@ -1023,30 +1403,42 @@ def init(self): # create or update config recorder if not config_role_arn: - config_role_arn = "arn:" + partition + ":iam::" + account_id + ":role/rdk/config-role" + config_role_arn = ( + "arn:" + partition + ":iam::" + account_id + ":role/rdk/config-role" + ) if not control_tower: my_config.put_configuration_recorder( ConfigurationRecorder={ "name": config_recorder_name, "roleARN": config_role_arn, - "recordingGroup": {"allSupported": True, "includeGlobalResourceTypes": True}, + "recordingGroup": { + "allSupported": True, + "includeGlobalResourceTypes": True, + }, } ) if not delivery_channel_exists: # create delivery channel - print(f"[{my_session.region_name}]: Creating delivery channel to bucket " + config_bucket_name) + print( + f"[{my_session.region_name}]: Creating delivery channel to bucket " + + config_bucket_name + ) my_config.put_delivery_channel( DeliveryChannel={ "name": "default", "s3BucketName": config_bucket_name, - "configSnapshotDeliveryProperties": {"deliveryFrequency": "Six_Hours"}, + "configSnapshotDeliveryProperties": { + "deliveryFrequency": "Six_Hours" + }, } ) # start config recorder - my_config.start_configuration_recorder(ConfigurationRecorderName=config_recorder_name) + my_config.start_configuration_recorder( + ConfigurationRecorderName=config_recorder_name + ) print(f"[{my_session.region_name}]: Config Service is ON") else: print( @@ -1056,26 +1448,39 @@ def init(self): print(f"[{my_session.region_name}]: Config setup complete.") # create code bucket - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) response = my_s3.list_buckets() bucket_exists = False for bucket in response["Buckets"]: if bucket["Name"] == code_bucket_name: bucket_exists = True - print(f"[{my_session.region_name}]: Found code bucket: " + code_bucket_name) + print( + f"[{my_session.region_name}]: Found code bucket: " + + code_bucket_name + ) if not bucket_exists: if self.args.skip_code_bucket_creation: - print(f"[{my_session.region_name}]: Skipping Code Bucket creation due to command line args") + print( + f"[{my_session.region_name}]: Skipping Code Bucket creation due to command line args" + ) else: - print(f"[{my_session.region_name}]: Creating Code bucket " + code_bucket_name) + print( + f"[{my_session.region_name}]: Creating Code bucket " + + code_bucket_name + ) # Consideration for us-east-1 S3 API if my_session.region_name == "us-east-1": my_s3.create_bucket(Bucket=code_bucket_name) else: my_s3.create_bucket( - Bucket=code_bucket_name, CreateBucketConfiguration={"LocationConstraint": my_session.region_name} + Bucket=code_bucket_name, + CreateBucketConfiguration={ + "LocationConstraint": my_session.region_name + }, ) return 0 @@ -1122,10 +1527,14 @@ def clean(self): try: # First delete the Config Recorder itself. Do we need to stop it first? Let's stop it just to be safe. my_config.stop_configuration_recorder( - ConfigurationRecorderName=recorders["ConfigurationRecorders"][0]["name"] + ConfigurationRecorderName=recorders["ConfigurationRecorders"][0][ + "name" + ] ) my_config.delete_configuration_recorder( - ConfigurationRecorderName=recorders["ConfigurationRecorders"][0]["name"] + ConfigurationRecorderName=recorders["ConfigurationRecorders"][0][ + "name" + ] ) except Exception as e: print("Error encountered removing Configuration Recorder: " + str(e)) @@ -1135,13 +1544,21 @@ def clean(self): try: response = iam_client.get_role(RoleName=config_role_name) try: - role_policy_results = iam_client.list_role_policies(RoleName=config_role_name) + role_policy_results = iam_client.list_role_policies( + RoleName=config_role_name + ) for policy_name in role_policy_results["PolicyNames"]: - iam_client.delete_role_policy(RoleName=config_role_name, PolicyName=policy_name) + iam_client.delete_role_policy( + RoleName=config_role_name, PolicyName=policy_name + ) - role_policy_results = iam_client.list_attached_role_policies(RoleName=config_role_name) + role_policy_results = iam_client.list_attached_role_policies( + RoleName=config_role_name + ) for policy in role_policy_results["AttachedPolicies"]: - iam_client.detach_role_policy(RoleName=config_role_name, PolicyArn=policy["PolicyArn"]) + iam_client.detach_role_policy( + RoleName=config_role_name, PolicyArn=policy["PolicyArn"] + ) # Once all policies are detached we should be able to delete the Role. iam_client.delete_role(RoleName=config_role_name) @@ -1154,11 +1571,17 @@ def clean(self): delivery_channels = my_config.describe_delivery_channels() if len(delivery_channels["DeliveryChannels"]) > 0: for delivery_channel in delivery_channels["DeliveryChannels"]: - config_bucket_names.append(delivery_channels["DeliveryChannels"][0]["s3BucketName"]) + config_bucket_names.append( + delivery_channels["DeliveryChannels"][0]["s3BucketName"] + ) try: - my_config.delete_delivery_channel(DeliveryChannelName=delivery_channel["name"]) + my_config.delete_delivery_channel( + DeliveryChannelName=delivery_channel["name"] + ) except Exception as e: - print("Error encountered trying to delete Delivery Channel: " + str(e)) + print( + "Error encountered trying to delete Delivery Channel: " + str(e) + ) if config_bucket_names: # empty and then delete the config bucket. @@ -1192,7 +1615,9 @@ def clean(self): print("Error encountered deleting Functions stack: " + str(e)) # Delete the code bucket, if one exists. - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) try: code_bucket = my_session.resource("s3").Bucket(code_bucket_name) code_bucket.objects.all().delete() @@ -1219,16 +1644,14 @@ def create(self): extension_mapping = { "java8": ".java", - "python3.6-managed": ".py", "python3.7": ".py", "python3.7-lib": ".py", "python3.8": ".py", "python3.8-lib": ".py", "python3.9": ".py", "python3.9-lib": ".py", - "nodejs6.10": ".js", - "dotnetcore1.0": "cs", - "dotnetcore2.0": "cs", + "python3.10": ".py", + "python3.10-lib": ".py", } if self.args.runtime not in extension_mapping: print("rdk does not support that runtime yet.") @@ -1250,8 +1673,6 @@ def create(self): # copy rule template into rule directory if self.args.runtime == "java8": self.__create_java_rule() - elif self.args.runtime in ["dotnetcore1.0", "dotnetcore2.0"]: - self.__create_dotnet_rule() else: src = os.path.join( path.dirname(__file__), @@ -1269,18 +1690,32 @@ def create(self): shutil.copyfile(src, dst) f = fileinput.input(files=dst, inplace=True) for line in f: - if self.args.runtime in ["python3.7-lib", "python3.8-lib", "python3.9-lib"]: + if self.args.runtime in [ + "python3.7-lib", + "python3.8-lib", + "python3.9-lib", + "python3.10-lib", + ]: if self.args.resource_types: applicable_resource_list = "" - for resource_type in self.args.resource_types.split(","): - applicable_resource_list += "'" + resource_type + "', " + for resource_type in self.args.resource_types.split( + "," + ): + applicable_resource_list += ( + "'" + resource_type + "', " + ) print( line.replace("<%RuleName%>", self.args.rulename) .replace( "<%ApplicableResources1%>", - "\nAPPLICABLE_RESOURCES = [" + applicable_resource_list[:-2] + "]\n", + "\nAPPLICABLE_RESOURCES = [" + + applicable_resource_list[:-2] + + "]\n", ) - .replace("<%ApplicableResources2%>", ", APPLICABLE_RESOURCES"), + .replace( + "<%ApplicableResources2%>", + ", APPLICABLE_RESOURCES", + ), end="", ) else: @@ -1291,7 +1726,9 @@ def create(self): end="", ) else: - print(line.replace("<%RuleName%>", self.args.rulename), end="") + print( + line.replace("<%RuleName%>", self.args.rulename), end="" + ) f.close() src = os.path.join( @@ -1306,12 +1743,16 @@ def create(self): os.getcwd(), rules_dir, self.args.rulename, - self.args.rulename + "_test" + extension_mapping[self.args.runtime], + self.args.rulename + + "_test" + + extension_mapping[self.args.runtime], ) shutil.copyfile(src, dst) f = fileinput.input(files=dst, inplace=True) for line in f: - print(line.replace("<%RuleName%>", self.args.rulename), end="") + print( + line.replace("<%RuleName%>", self.args.rulename), end="" + ) f.close() src = os.path.join( @@ -1387,10 +1828,18 @@ def modify(self): self.args.remediation_concurrent_execution_percent = ssm_controls.get( "ConcurrentExecutionRatePercentage", "" ) - self.args.remediation_error_rate_percent = ssm_controls.get("ErrorPercentage", "") - self.args.remediation_parameters = json.dumps(params["Parameters"]) if params.get("Parameters") else None - self.args.auto_remediation_retry_attempts = params.get("MaximumAutomaticAttempts", "") - self.args.auto_remediation_retry_time = params.get("RetryAttemptSeconds", "") + self.args.remediation_error_rate_percent = ssm_controls.get( + "ErrorPercentage", "" + ) + self.args.remediation_parameters = ( + json.dumps(params["Parameters"]) if params.get("Parameters") else None + ) + self.args.auto_remediation_retry_attempts = params.get( + "MaximumAutomaticAttempts", "" + ) + self.args.auto_remediation_retry_time = params.get( + "RetryAttemptSeconds", "" + ) self.args.remediation_action = params.get("TargetId", "") self.args.remediation_action_version = params.get("TargetVersion", "") @@ -1401,7 +1850,11 @@ def modify(self): # Write the parameters to a file in the rule directory. self.__populate_params() - print("Modified Rule '" + self.args.rulename + "'. Use the `deploy` command to push your changes to AWS.") + print( + "Modified Rule '" + + self.args.rulename + + "'. Use the `deploy` command to push your changes to AWS." + ) def undeploy(self): self.__parse_deploy_args(ForceArgument=True) @@ -1409,7 +1862,9 @@ def undeploy(self): if not self.args.force: confirmation = False while not confirmation: - my_input = input("Delete specified Rules and Lambda Functions from your AWS Account? (y/N): ") + my_input = input( + "Delete specified Rules and Lambda Functions from your AWS Account? (y/N): " + ) if my_input.lower() == "y": confirmation = True if my_input.lower() == "n" or my_input == "": @@ -1447,7 +1902,9 @@ def undeploy(self): for rule_name in rule_names: try: - cfn_client.delete_stack(StackName=self.__get_stack_name_from_rule_name(rule_name)) + cfn_client.delete_stack( + StackName=self.__get_stack_name_from_rule_name(rule_name) + ) deleted_stacks.append(self.__get_stack_name_from_rule_name(rule_name)) except ClientError as ce: print( @@ -1460,12 +1917,16 @@ def undeploy(self): + str(e) ) - print(f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete.") + print( + f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete." + ) for stack_name in deleted_stacks: self.__wait_for_cfn_stack(cfn_client, stack_name) - print(f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved.") + print( + f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved." + ) print(f"[{my_session.region_name}]: To re-deploy, use the 'deploy' command.") def undeploy_organization(self): @@ -1474,7 +1935,9 @@ def undeploy_organization(self): if not self.args.force: confirmation = False while not confirmation: - my_input = input("Delete specified Rules and Lambda Functions from your Organization? (y/N): ") + my_input = input( + "Delete specified Rules and Lambda Functions from your Organization? (y/N): " + ) if my_input.lower() == "y": confirmation = True if my_input.lower() == "n" or my_input == "": @@ -1512,7 +1975,9 @@ def undeploy_organization(self): for rule_name in rule_names: try: - cfn_client.delete_stack(StackName=self.__get_stack_name_from_rule_name(rule_name)) + cfn_client.delete_stack( + StackName=self.__get_stack_name_from_rule_name(rule_name) + ) deleted_stacks.append(self.__get_stack_name_from_rule_name(rule_name)) except ClientError as ce: print( @@ -1525,13 +1990,19 @@ def undeploy_organization(self): + str(e) ) - print(f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete.") + print( + f"[{my_session.region_name}]: Rule removal initiated. Waiting for Stack Deletion to complete." + ) for stack_name in deleted_stacks: self.__wait_for_cfn_stack(cfn_client, stack_name) - print(f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved.") - print(f"[{my_session.region_name}]: To re-deploy, use the 'deploy-organization' command.") + print( + f"[{my_session.region_name}]: Rule removal complete, but local files have been preserved." + ) + print( + f"[{my_session.region_name}]: To re-deploy, use the 'deploy-organization' command." + ) def deploy(self): self.__parse_deploy_args() @@ -1552,7 +2023,9 @@ def deploy(self): if self.args.custom_code_bucket: code_bucket_name = self.args.custom_code_bucket else: - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) # If we're only deploying the Lambda functions (and role + permissions), branch here. Someday the "main" execution path should use the same generated CFN templates for single-account deployment. if self.args.functions_only: @@ -1580,7 +2053,9 @@ def deploy(self): for rule_name in rule_names: rule_params, cfn_tags = self.__get_rule_parameters(rule_name) if "SourceIdentifier" in rule_params: - print(f"[{my_session.region_name}]: Skipping code packaging for Managed Rule.") + print( + f"[{my_session.region_name}]: Skipping code packaging for Managed Rule." + ) else: s3_dst = self.__upload_function_code( rule_name, rule_params, account_id, my_session, code_bucket_name @@ -1593,7 +2068,12 @@ def deploy(self): config = my_s3_client._client_config config.signature_version = botocore.UNSIGNED template_url = boto3.client("s3", config=config).generate_presigned_url( - "get_object", ExpiresIn=0, Params={"Bucket": code_bucket_name, "Key": self.args.stack_name + ".json"} + "get_object", + ExpiresIn=0, + Params={ + "Bucket": code_bucket_name, + "Key": self.args.stack_name + ".json", + }, ) # Check if stack exists. If it does, update it. If it doesn't, create it. @@ -1602,9 +2082,10 @@ def deploy(self): my_stack = my_cfn.describe_stacks(StackName=self.args.stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for Lambda functions.") + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for Lambda functions." + ) try: - cfn_args = { "StackName": self.args.stack_name, "TemplateURL": template_url, @@ -1624,7 +2105,9 @@ def deploy(self): if e.response["Error"]["Code"] == "ValidationError": if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule configurations.") + print( + f"[{my_session.region_name}]: No changes to Config Rule configurations." + ) else: # Something unexpected has gone wrong. Emit an error and bail. print(f"[{my_session.region_name}]: {e}") @@ -1636,10 +2119,16 @@ def deploy(self): for rule_name in rule_names: rule_params, cfn_tags = self.__get_rule_parameters(rule_name) my_lambda_arn = self.__get_lambda_arn_for_rule( - rule_name, partition, my_session.region_name, account_id, rule_params + rule_name, + partition, + my_session.region_name, + account_id, + rule_params, ) if "SourceIdentifier" in rule_params: - print(f"[{my_session.region_name}]: Skipping Lambda upload for Managed Rule.") + print( + f"[{my_session.region_name}]: Skipping Lambda upload for Managed Rule." + ) continue print(f"[{my_session.region_name}]: Publishing Lambda code...") @@ -1651,9 +2140,11 @@ def deploy(self): Publish=True, ) print(f"[{my_session.region_name}]: Lambda code updated.") - except ClientError as e: + except ClientError: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for Lambda Functions.") + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for Lambda Functions." + ) cfn_args = { "StackName": self.args.stack_name, @@ -1689,7 +2180,9 @@ def deploy(self): combined_input_parameters = {} if "InputParameters" in rule_params: - combined_input_parameters.update(json.loads(rule_params["InputParameters"])) + combined_input_parameters.update( + json.loads(rule_params["InputParameters"]) + ) if "OptionalParameters" in rule_params: # Remove empty parameters @@ -1731,65 +2224,112 @@ def deploy(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceIdentifier", "ParameterValue": rule_params["SourceIdentifier"]}, + { + "ParameterKey": "SourceIdentifier", + "ParameterValue": rule_params["SourceIdentifier"], + }, ] my_cfn = my_session.client("cloudformation") if "Remediation" in rule_params: - print(f"[{my_session.region_name}]: Build The CFN Template with Remediation Settings") - cfn_body = os.path.join(path.dirname(__file__), "template", "configManagedRuleWithRemediation.json") + print( + f"[{my_session.region_name}]: Build The CFN Template with Remediation Settings" + ) + cfn_body = os.path.join( + path.dirname(__file__), + "template", + "configManagedRuleWithRemediation.json", + ) template_body = open(cfn_body, "r").read() json_body = json.loads(template_body) - remediation = self.__create_remediation_cloudformation_block(rule_params["Remediation"]) + remediation = self.__create_remediation_cloudformation_block( + rule_params["Remediation"] + ) json_body["Resources"]["Remediation"] = remediation if "SSMAutomation" in rule_params: # Reference the SSM Automation Role Created, if IAM is created - print(f"[{my_session.region_name}]: Building SSM Automation Section") + print( + f"[{my_session.region_name}]: Building SSM Automation Section" + ) ssm_automation = self.__create_automation_cloudformation_block( - rule_params["SSMAutomation"], self.__get_alphanumeric_rule_name(rule_name) + rule_params["SSMAutomation"], + self.__get_alphanumeric_rule_name(rule_name), ) json_body["Resources"][ - self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) ] = ssm_automation if "IAM" in rule_params["SSMAutomation"]: - print(f"[{my_session.region_name}]: Lets Build IAM Role and Policy") + print( + f"[{my_session.region_name}]: Lets Build IAM Role and Policy" + ) # TODO Check For IAM Settings - json_body["Resources"]["Remediation"]["Properties"]["Parameters"]["AutomationAssumeRole"][ - "StaticValue" - ]["Values"] = [ - {"Fn::GetAtt": [self.__get_alphanumeric_rule_name(rule_name + "Role"), "Arn"]} + json_body["Resources"]["Remediation"]["Properties"][ + "Parameters" + ]["AutomationAssumeRole"]["StaticValue"]["Values"] = [ + { + "Fn::GetAtt": [ + self.__get_alphanumeric_rule_name( + rule_name + "Role" + ), + "Arn", + ] + } ] - ssm_iam_role, ssm_iam_policy = self.__create_automation_iam_cloudformation_block( - rule_params["SSMAutomation"], self.__get_alphanumeric_rule_name(rule_name) + ( + ssm_iam_role, + ssm_iam_policy, + ) = self.__create_automation_iam_cloudformation_block( + rule_params["SSMAutomation"], + self.__get_alphanumeric_rule_name(rule_name), ) - json_body["Resources"][self.__get_alphanumeric_rule_name(rule_name + "Role")] = ssm_iam_role + json_body["Resources"][ + self.__get_alphanumeric_rule_name(rule_name + "Role") + ] = ssm_iam_role json_body["Resources"][ self.__get_alphanumeric_rule_name(rule_name + "Policy") ] = ssm_iam_policy - print(f"[{my_session.region_name}]: Build Supporting SSM Resources") + print( + f"[{my_session.region_name}]: Build Supporting SSM Resources" + ) resource_depends_on = [ "rdkConfigRule", - self.__get_alphanumeric_rule_name(rule_name + "RemediationAction"), + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ), ] # Builds SSM Document Before Config RUle - json_body["Resources"]["Remediation"]["DependsOn"] = resource_depends_on - json_body["Resources"]["Remediation"]["Properties"]["TargetId"] = { - "Ref": self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") + json_body["Resources"]["Remediation"][ + "DependsOn" + ] = resource_depends_on + json_body["Resources"]["Remediation"]["Properties"][ + "TargetId" + ] = { + "Ref": self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) } try: my_stack_name = self.__get_stack_name_from_rule_name(rule_name) my_stack = my_cfn.describe_stacks(StackName=my_stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for " + + rule_name + ) try: cfn_args = { "StackName": my_stack_name, "TemplateBody": json.dumps(json_body, indent=2), "Parameters": my_params, - "Capabilities": ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], + "Capabilities": [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + ], } # If no tags key is specified, or if the tags dict is empty @@ -1801,23 +2341,31 @@ def deploy(self): if e.response["Error"]["Code"] == "ValidationError": if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule.") + print( + f"[{my_session.region_name}]: No changes to Config Rule." + ) else: # Something unexpected has gone wrong. Emit an error and bail. print(f"[{my_session.region_name}]: {e}") return 1 else: raise - except ClientError as e: + except ClientError: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for " + + rule_name + ) if "Remediation" in rule_params: cfn_args = { "StackName": my_stack_name, "TemplateBody": json.dumps(json_body, indent=2), "Parameters": my_params, - "Capabilities": ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], + "Capabilities": [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + ], } else: @@ -1838,13 +2386,18 @@ def deploy(self): else: # deploy config rule - cfn_body = os.path.join(path.dirname(__file__), "template", "configManagedRule.json") + cfn_body = os.path.join( + path.dirname(__file__), "template", "configManagedRule.json" + ) try: my_stack_name = self.__get_stack_name_from_rule_name(rule_name) my_stack = my_cfn.describe_stacks(StackName=my_stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for " + + rule_name + ) try: cfn_args = { "StackName": my_stack_name, @@ -1861,7 +2414,9 @@ def deploy(self): if e.response["Error"]["Code"] == "ValidationError": if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule.") + print( + f"[{my_session.region_name}]: No changes to Config Rule." + ) else: # Something unexpected has gone wrong. Emit an error and bail. print(f"[{my_session.region_name}]: {e}") @@ -1870,7 +2425,10 @@ def deploy(self): raise except ClientError as e: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for " + + rule_name + ) cfn_args = { "StackName": my_stack_name, "TemplateBody": open(cfn_body, "r").read(), @@ -1894,20 +2452,31 @@ def deploy(self): print(f"[{my_session.region_name}]: Found Custom Rule.") s3_src = "" - s3_dst = self.__upload_function_code(rule_name, rule_params, account_id, my_session, code_bucket_name) + s3_dst = self.__upload_function_code( + rule_name, rule_params, account_id, my_session, code_bucket_name + ) # create CFN Parameters for Custom Rules lambdaRoleArn = "" if self.args.lambda_role_arn: - print(f"[{my_session.region_name}]: Existing IAM Role provided: " + self.args.lambda_role_arn) + print( + f"[{my_session.region_name}]: Existing IAM Role provided: " + + self.args.lambda_role_arn + ) lambdaRoleArn = self.args.lambda_role_arn elif self.args.lambda_role_name: - print(f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + self.args.lambda_role_name) + print( + f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + + self.args.lambda_role_name + ) arn = f"arn:{partition}:iam::{account_id}:role/{self.args.lambda_role_name}" lambdaRoleArn = arn if self.args.boundary_policy_arn: - print(f"[{my_session.region_name}]: Boundary Policy provided: " + self.args.boundary_policy_arn) + print( + f"[{my_session.region_name}]: Boundary Policy provided: " + + self.args.boundary_policy_arn + ) boundaryPolicyArn = self.args.boundary_policy_arn else: boundaryPolicyArn = "" @@ -1962,8 +2531,14 @@ def deploy(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceHandler", "ParameterValue": self.__get_handler(rule_name, rule_params)}, - {"ParameterKey": "Timeout", "ParameterValue": str(self.args.lambda_timeout)}, + { + "ParameterKey": "SourceHandler", + "ParameterValue": self.__get_handler(rule_name, rule_params), + }, + { + "ParameterKey": "Timeout", + "ParameterValue": str(self.args.lambda_timeout), + }, ] layers = self.__get_lambda_layers(my_session, self.args, rule_params) @@ -1972,55 +2547,89 @@ def deploy(self): layers.extend(additional_layers) if layers: - my_params.append({"ParameterKey": "Layers", "ParameterValue": ",".join(layers)}) + my_params.append( + {"ParameterKey": "Layers", "ParameterValue": ",".join(layers)} + ) if self.args.lambda_security_groups and self.args.lambda_subnets: my_params.append( - {"ParameterKey": "SecurityGroupIds", "ParameterValue": self.args.lambda_security_groups} + { + "ParameterKey": "SecurityGroupIds", + "ParameterValue": self.args.lambda_security_groups, + } + ) + my_params.append( + { + "ParameterKey": "SubnetIds", + "ParameterValue": self.args.lambda_subnets, + } ) - my_params.append({"ParameterKey": "SubnetIds", "ParameterValue": self.args.lambda_subnets}) # create json of CFN template - cfn_body = os.path.join(path.dirname(__file__), "template", "configRule.json") + cfn_body = os.path.join( + path.dirname(__file__), "template", "configRule.json" + ) template_body = open(cfn_body, "r").read() json_body = json.loads(template_body) remediation = "" if "Remediation" in rule_params: - remediation = self.__create_remediation_cloudformation_block(rule_params["Remediation"]) + remediation = self.__create_remediation_cloudformation_block( + rule_params["Remediation"] + ) json_body["Resources"]["Remediation"] = remediation if "SSMAutomation" in rule_params: ##AWS needs to build the SSM before the Config Rule resource_depends_on = [ "rdkConfigRule", - self.__get_alphanumeric_rule_name(rule_name + "RemediationAction"), + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ), ] remediation["DependsOn"] = resource_depends_on # Add JSON Reference to SSM Document { "Ref" : "MyEC2Instance" } remediation["Properties"]["TargetId"] = { - "Ref": self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") + "Ref": self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) } if "SSMAutomation" in rule_params: print(f"[{my_session.region_name}]: Building SSM Automation Section") - ssm_automation = self.__create_automation_cloudformation_block(rule_params["SSMAutomation"], rule_name) + ssm_automation = self.__create_automation_cloudformation_block( + rule_params["SSMAutomation"], rule_name + ) json_body["Resources"][ self.__get_alphanumeric_rule_name(rule_name + "RemediationAction") ] = ssm_automation if "IAM" in rule_params["SSMAutomation"]: print("Lets Build IAM Role and Policy") # TODO Check For IAM Settings - json_body["Resources"]["Remediation"]["Properties"]["Parameters"]["AutomationAssumeRole"][ - "StaticValue" - ]["Values"] = [{"Fn::GetAtt": [self.__get_alphanumeric_rule_name(rule_name + "Role"), "Arn"]}] + json_body["Resources"]["Remediation"]["Properties"]["Parameters"][ + "AutomationAssumeRole" + ]["StaticValue"]["Values"] = [ + { + "Fn::GetAtt": [ + self.__get_alphanumeric_rule_name(rule_name + "Role"), + "Arn", + ] + } + ] - ssm_iam_role, ssm_iam_policy = self.__create_automation_iam_cloudformation_block( + ( + ssm_iam_role, + ssm_iam_policy, + ) = self.__create_automation_iam_cloudformation_block( rule_params["SSMAutomation"], rule_name ) - json_body["Resources"][self.__get_alphanumeric_rule_name(rule_name + "Role")] = ssm_iam_role - json_body["Resources"][self.__get_alphanumeric_rule_name(rule_name + "Policy")] = ssm_iam_policy + json_body["Resources"][ + self.__get_alphanumeric_rule_name(rule_name + "Role") + ] = ssm_iam_role + json_body["Resources"][ + self.__get_alphanumeric_rule_name(rule_name + "Policy") + ] = ssm_iam_policy # debugging # print(json.dumps(json_body, indent=2)) @@ -2031,7 +2640,10 @@ def deploy(self): my_stack_name = self.__get_stack_name_from_rule_name(rule_name) my_stack = my_cfn.describe_stacks(StackName=my_stack_name) # If we've gotten here, stack exists and we should update it. - print(f"[{my_session.region_name}]: Updating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Updating CloudFormation Stack for " + + rule_name + ) try: cfn_args = { "StackName": my_stack_name, @@ -2047,14 +2659,21 @@ def deploy(self): response = my_cfn.update_stack(**cfn_args) except ClientError as e: if e.response["Error"]["Code"] == "ValidationError": - if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. - print(f"[{my_session.region_name}]: No changes to Config Rule.") + print( + f"[{my_session.region_name}]: No changes to Config Rule." + ) else: # Something unexpected has gone wrong. Emit an error and bail. - print(f"[{my_session.region_name}]: Validation Error on CFN\n") - print(f"[{my_session.region_name}]: " + json.dumps(cfn_args) + "\n") + print( + f"[{my_session.region_name}]: Validation Error on CFN\n" + ) + print( + f"[{my_session.region_name}]: " + + json.dumps(cfn_args) + + "\n" + ) print(f"[{my_session.region_name}]: {e}\n") return 1 else: @@ -2065,12 +2684,18 @@ def deploy(self): print(f"[{my_session.region_name}]: Publishing Lambda code...") my_lambda_client = my_session.client("lambda") my_lambda_client.update_function_code( - FunctionName=my_lambda_arn, S3Bucket=code_bucket_name, S3Key=s3_dst, Publish=True + FunctionName=my_lambda_arn, + S3Bucket=code_bucket_name, + S3Key=s3_dst, + Publish=True, ) print(f"[{my_session.region_name}]: Lambda code updated.") except ClientError as e: # If we're in the exception, the stack does not exist and we should create it. - print(f"[{my_session.region_name}]: Creating CloudFormation Stack for " + rule_name) + print( + f"[{my_session.region_name}]: Creating CloudFormation Stack for " + + rule_name + ) cfn_args = { "StackName": my_stack_name, "TemplateBody": json.dumps(json_body, indent=2), @@ -2114,7 +2739,9 @@ def deploy_organization(self): if self.args.custom_code_bucket: code_bucket_name = self.args.custom_code_bucket else: - code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name + code_bucket_name = ( + code_bucket_prefix + account_id + "-" + my_session.region_name + ) # If we're only deploying the Lambda functions (and role + permissions), branch here. Someday the "main" execution path should use the same generated CFN templates for single-account deployment. if self.args.functions_only: @@ -2141,7 +2768,9 @@ def deploy_organization(self): combined_input_parameters = {} if "InputParameters" in rule_params: - combined_input_parameters.update(json.loads(rule_params["InputParameters"])) + combined_input_parameters.update( + json.loads(rule_params["InputParameters"]) + ) if "OptionalParameters" in rule_params: # Remove empty parameters @@ -2183,12 +2812,19 @@ def deploy_organization(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceIdentifier", "ParameterValue": rule_params["SourceIdentifier"]}, + { + "ParameterKey": "SourceIdentifier", + "ParameterValue": rule_params["SourceIdentifier"], + }, ] my_cfn = my_session.client("cloudformation") # deploy config rule - cfn_body = os.path.join(path.dirname(__file__), "template", "configManagedRuleOrganization.json") + cfn_body = os.path.join( + path.dirname(__file__), + "template", + "configManagedRuleOrganization.json", + ) try: my_stack_name = self.__get_stack_name_from_rule_name(rule_name) @@ -2246,7 +2882,9 @@ def deploy_organization(self): print("Found Custom Rule.") s3_src = "" - s3_dst = self.__upload_function_code(rule_name, rule_params, account_id, my_session, code_bucket_name) + s3_dst = self.__upload_function_code( + rule_name, rule_params, account_id, my_session, code_bucket_name + ) # create CFN Parameters for Custom Rules lambdaRoleArn = "" @@ -2254,7 +2892,10 @@ def deploy_organization(self): print("Existing IAM Role provided: " + self.args.lambda_role_arn) lambdaRoleArn = self.args.lambda_role_arn elif self.args.lambda_role_name: - print(f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + self.args.lambda_role_name) + print( + f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + + self.args.lambda_role_name + ) arn = f"arn:{partition}:iam::{account_id}:role/{self.args.lambda_role_name}" lambdaRoleArn = arn @@ -2314,8 +2955,14 @@ def deploy_organization(self): "ParameterKey": "SourceInputParameters", "ParameterValue": json.dumps(combined_input_parameters), }, - {"ParameterKey": "SourceHandler", "ParameterValue": self.__get_handler(rule_name, rule_params)}, - {"ParameterKey": "Timeout", "ParameterValue": str(self.args.lambda_timeout)}, + { + "ParameterKey": "SourceHandler", + "ParameterValue": self.__get_handler(rule_name, rule_params), + }, + { + "ParameterKey": "Timeout", + "ParameterValue": str(self.args.lambda_timeout), + }, ] layers = self.__get_lambda_layers(my_session, self.args, rule_params) @@ -2324,16 +2971,28 @@ def deploy_organization(self): layers.extend(additional_layers) if layers: - my_params.append({"ParameterKey": "Layers", "ParameterValue": ",".join(layers)}) + my_params.append( + {"ParameterKey": "Layers", "ParameterValue": ",".join(layers)} + ) if self.args.lambda_security_groups and self.args.lambda_subnets: my_params.append( - {"ParameterKey": "SecurityGroupIds", "ParameterValue": self.args.lambda_security_groups} + { + "ParameterKey": "SecurityGroupIds", + "ParameterValue": self.args.lambda_security_groups, + } + ) + my_params.append( + { + "ParameterKey": "SubnetIds", + "ParameterValue": self.args.lambda_subnets, + } ) - my_params.append({"ParameterKey": "SubnetIds", "ParameterValue": self.args.lambda_subnets}) # create json of CFN template - cfn_body = os.path.join(path.dirname(__file__), "template", "configRuleOrganization.json") + cfn_body = os.path.join( + path.dirname(__file__), "template", "configRuleOrganization.json" + ) template_body = open(cfn_body, "r").read() json_body = json.loads(template_body) @@ -2362,7 +3021,6 @@ def deploy_organization(self): response = my_cfn.update_stack(**cfn_args) except ClientError as e: if e.response["Error"]["Code"] == "ValidationError": - if "No updates are to be performed." in str(e): # No changes made to Config rule definition, so CloudFormation won't do anything. print("No changes to Config Rule.") @@ -2380,7 +3038,10 @@ def deploy_organization(self): print("Publishing Lambda code...") my_lambda_client = my_session.client("lambda") my_lambda_client.update_function_code( - FunctionName=my_lambda_arn, S3Bucket=code_bucket_name, S3Key=s3_dst, Publish=True + FunctionName=my_lambda_arn, + S3Bucket=code_bucket_name, + S3Key=s3_dst, + Publish=True, ) print("Lambda code updated.") except ClientError as e: @@ -2412,7 +3073,6 @@ def deploy_organization(self): return 0 def export(self): - self.__parse_export_args() # get the rule names @@ -2439,7 +3099,9 @@ def export(self): combined_input_parameters = {} if "InputParameters" in rule_params: - combined_input_parameters.update(json.loads(rule_params["InputParameters"])) + combined_input_parameters.update( + json.loads(rule_params["InputParameters"]) + ) if "OptionalParameters" in rule_params: # Remove empty parameters @@ -2493,22 +3155,36 @@ def export(self): "lambda_timeout": str(self.args.lambda_timeout), } - params_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + ".tfvars.json") + params_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, rule_name.lower() + ".tfvars.json" + ) parameters_file = open(params_file_path, "w") json.dump(my_params, parameters_file, indent=4) parameters_file.close() # create json of CFN template print(self.args.format + " version: " + self.args.version) tf_file_body = os.path.join( - path.dirname(__file__), "template", self.args.format, self.args.version, "config_rule.tf" + path.dirname(__file__), + "template", + self.args.format, + self.args.version, + "config_rule.tf", + ) + tf_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_rule.tf" ) - tf_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_rule.tf") shutil.copy(tf_file_body, tf_file_path) variables_file_body = os.path.join( - path.dirname(__file__), "template", self.args.format, self.args.version, "variables.tf" + path.dirname(__file__), + "template", + self.args.format, + self.args.version, + "variables.tf", + ) + variables_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_variables.tf" ) - variables_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_variables.tf") shutil.copy(variables_file_body, variables_file_path) print("Export completed.This will generate three .tf files.") @@ -2530,8 +3206,14 @@ def test_local(self): "python3.8-lib", "python3.9", "python3.9-lib", + "python3.10", + "python3.10-lib", ): - print("Skipping " + rule_name + " - Runtime not supported for local testing.") + print( + "Skipping " + + rule_name + + " - Runtime not supported for local testing." + ) continue print("Testing " + rule_name) @@ -2539,9 +3221,13 @@ def test_local(self): print("Looking for tests in " + test_dir) if args.verbose == True: - results = unittest.TextTestRunner(buffer=False, verbosity=2).run(self.__create_test_suite(test_dir)) + results = unittest.TextTestRunner(buffer=False, verbosity=2).run( + self.__create_test_suite(test_dir) + ) else: - results = unittest.TextTestRunner(buffer=True, verbosity=2).run(self.__create_test_suite(test_dir)) + results = unittest.TextTestRunner(buffer=True, verbosity=2).run( + self.__create_test_suite(test_dir) + ) print(results) @@ -2574,11 +3260,19 @@ def test_remote(self): # Generate test event from templates test_event = json.load( - open(os.path.join(path.dirname(__file__), "template", event_template_filename), "r"), strict=False + open( + os.path.join( + path.dirname(__file__), "template", event_template_filename + ), + "r", + ), + strict=False, ) my_invoking_event = json.loads(test_event["invokingEvent"]) my_invoking_event["configurationItem"] = my_ci - my_invoking_event["notificationCreationTime"] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z") + my_invoking_event[ + "notificationCreationTime" + ] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z") test_event["invokingEvent"] = json.dumps(my_invoking_event) test_event["ruleParameters"] = json.dumps(my_parameters) @@ -2631,11 +3325,15 @@ def logs(self): logGroupName=log_group_name, orderBy="LastEventTime", descending=True, - limit=int(self.args.number), # This is the worst-case scenario if there is only one event per stream + limit=int( + self.args.number + ), # This is the worst-case scenario if there is only one event per stream ) # Sadly we can't just use filter_log_events, since we don't know the timestamps yet and filter_log_events doesn't appear to support ordering. - my_events = self.__get_log_events(cw_logs, log_streams, int(self.args.number)) + my_events = self.__get_log_events( + cw_logs, log_streams, int(self.args.number) + ) latest_timestamp = 0 @@ -2680,8 +3378,12 @@ def logs(self): def rulesets(self): self.args = get_rulesets_parser().parse_args(self.args.command_args, self.args) - if self.args.subcommand in ["add", "remove"] and (not self.args.ruleset or not self.args.rulename): - print("You must specify a ruleset name and a rule for the `add` and `remove` commands.") + if self.args.subcommand in ["add", "remove"] and ( + not self.args.ruleset or not self.args.rulename + ): + print( + "You must specify a ruleset name and a rule for the `add` and `remove` commands." + ) return 1 if self.args.subcommand == "list": @@ -2694,7 +3396,9 @@ def rulesets(self): print("Unknown subcommand.") def create_terraform_template(self): - self.args = get_create_rule_template_parser().parse_args(self.args.command_args, self.args) + self.args = get_create_rule_template_parser().parse_args( + self.args.command_args, self.args + ) if self.args.rulesets: self.args.rulesets = self.args.rulesets.split(",") @@ -2716,7 +3420,9 @@ def create_terraform_template(self): print("CloudFormation template written to " + self.args.output_file) def create_rule_template(self): - self.args = get_create_rule_template_parser().parse_args(self.args.command_args, self.args) + self.args = get_create_rule_template_parser().parse_args( + self.args.command_args, self.args + ) if self.args.rulesets: self.args.rulesets = self.args.rulesets.split(",") @@ -2738,7 +3444,9 @@ def create_rule_template(self): parameters = {} parameters["LambdaAccountId"] = {} - parameters["LambdaAccountId"]["Description"] = "Account ID that contains Lambda functions for Config Rules." + parameters["LambdaAccountId"][ + "Description" + ] = "Account ID that contains Lambda functions for Config Rules." parameters["LambdaAccountId"]["Type"] = "String" parameters["LambdaAccountId"]["MinLength"] = "12" parameters["LambdaAccountId"]["MaxLength"] = "12" @@ -2755,17 +3463,29 @@ def create_rule_template(self): "RoleName": config_role_name, "Path": "/rdk/", "ManagedPolicyArns": [ - {"Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWS_ConfigRole"}, + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWS_ConfigRole" + }, {"Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/ReadOnlyAccess"}, ], "AssumeRolePolicyDocument": CONFIG_ROLE_ASSUME_ROLE_POLICY_DOCUMENT, - "Policies": [{"PolicyName": "DeliveryPermission", "PolicyDocument": CONFIG_ROLE_POLICY_DOCUMENT}], + "Policies": [ + { + "PolicyName": "DeliveryPermission", + "PolicyDocument": CONFIG_ROLE_POLICY_DOCUMENT, + } + ], } # Create Bucket for Config Data resources["ConfigBucket"] = { "Type": "AWS::S3::Bucket", - "Properties": {"BucketName": {"Fn::Sub": config_bucket_prefix + "-${AWS::AccountId}-${AWS::Region}"}}, + "Properties": { + "BucketName": { + "Fn::Sub": config_bucket_prefix + + "-${AWS::AccountId}-${AWS::Region}" + } + }, } # Create ConfigurationRecorder and DeliveryChannel @@ -2774,18 +3494,25 @@ def create_rule_template(self): "Properties": { "Name": "default", "RoleARN": {"Fn::GetAtt": ["ConfigRole", "Arn"]}, - "RecordingGroup": {"AllSupported": True, "IncludeGlobalResourceTypes": True}, + "RecordingGroup": { + "AllSupported": True, + "IncludeGlobalResourceTypes": True, + }, }, } if self.args.config_role_arn: - resources["ConfigurationRecorder"]["Properties"]["RoleARN"] = self.args.config_role_arn + resources["ConfigurationRecorder"]["Properties"][ + "RoleARN" + ] = self.args.config_role_arn resources["DeliveryChannel"] = { "Type": "AWS::Config::DeliveryChannel", "Properties": { "Name": "default", "S3BucketName": {"Ref": "ConfigBucket"}, - "ConfigSnapshotDeliveryProperties": {"DeliveryFrequency": "One_Hour"}, + "ConfigSnapshotDeliveryProperties": { + "DeliveryFrequency": "One_Hour" + }, }, } @@ -2797,7 +3524,10 @@ def create_rule_template(self): for input_param in input_params: cfn_param = {} cfn_param["Description"] = ( - "Pass-through to required Input Parameter " + input_param + " for Config Rule " + rule_name + "Pass-through to required Input Parameter " + + input_param + + " for Config Rule " + + rule_name ) if len(str(input_params[input_param]).strip()) == 0: default = "" @@ -2817,17 +3547,24 @@ def create_rule_template(self): for optional_param in optional_params: cfn_param = {} cfn_param["Description"] = ( - "Pass-through to optional Input Parameter " + optional_param + " for Config Rule " + rule_name + "Pass-through to optional Input Parameter " + + optional_param + + " for Config Rule " + + rule_name ) cfn_param["Default"] = optional_params[optional_param] cfn_param["Type"] = "String" - param_name = self.__get_alphanumeric_rule_name(rule_name) + optional_param + param_name = ( + self.__get_alphanumeric_rule_name(rule_name) + optional_param + ) parameters[param_name] = cfn_param optional_parameter_group["Parameters"].append(param_name) - conditions[param_name] = {"Fn::Not": [{"Fn::Equals": ["", {"Ref": param_name}]}]} + conditions[param_name] = { + "Fn::Not": [{"Fn::Equals": ["", {"Ref": param_name}]}] + } config_rule = {} config_rule["Type"] = "AWS::Config::ConfigRule" @@ -2852,7 +3589,10 @@ def create_rule_template(self): # Also add the appropriate event source. source["SourceDetails"].append( - {"EventSource": "aws.config", "MessageType": "ConfigurationItemChangeNotification"} + { + "EventSource": "aws.config", + "MessageType": "ConfigurationItemChangeNotification", + } ) if "SourcePeriodic" in params: source["SourceDetails"].append( @@ -2884,56 +3624,102 @@ def create_rule_template(self): if "InputParameters" in params: for required_param in json.loads(params["InputParameters"]): - cfn_param_name = self.__get_alphanumeric_rule_name(rule_name) + required_param - properties["InputParameters"][required_param] = {"Ref": cfn_param_name} + cfn_param_name = ( + self.__get_alphanumeric_rule_name(rule_name) + required_param + ) + properties["InputParameters"][required_param] = { + "Ref": cfn_param_name + } if "OptionalParameters" in params: for optional_param in json.loads(params["OptionalParameters"]): - cfn_param_name = self.__get_alphanumeric_rule_name(rule_name) + optional_param + cfn_param_name = ( + self.__get_alphanumeric_rule_name(rule_name) + optional_param + ) properties["InputParameters"][optional_param] = { - "Fn::If": [cfn_param_name, {"Ref": cfn_param_name}, {"Ref": "AWS::NoValue"}] + "Fn::If": [ + cfn_param_name, + {"Ref": cfn_param_name}, + {"Ref": "AWS::NoValue"}, + ] } config_rule["Properties"] = properties - config_rule_resource_name = self.__get_alphanumeric_rule_name(rule_name) + "ConfigRule" + config_rule_resource_name = ( + self.__get_alphanumeric_rule_name(rule_name) + "ConfigRule" + ) resources[config_rule_resource_name] = config_rule # If Remediation create the remediation section with potential links to the SSM Details if "Remediation" in params: - remediation = self.__create_remediation_cloudformation_block(params["Remediation"]) + remediation = self.__create_remediation_cloudformation_block( + params["Remediation"] + ) remediation["DependsOn"] = [config_rule_resource_name] if not self.args.rules_only: remediation["DependsOn"].append("ConfigRole") if "SSMAutomation" in params: - ssm_automation = self.__create_automation_cloudformation_block(params["SSMAutomation"], rule_name) + ssm_automation = self.__create_automation_cloudformation_block( + params["SSMAutomation"], rule_name + ) # AWS needs to build the SSM before the Config Rule - remediation["DependsOn"].append(self.__get_alphanumeric_rule_name(rule_name + "RemediationAction")) + remediation["DependsOn"].append( + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) + ) # Add JSON Reference to SSM Document { "Ref" : "MyEC2Instance" } remediation["Properties"]["TargetId"] = { - "Ref": self.__get_alphanumeric_rule_name(rule_name) + "RemediationAction" + "Ref": self.__get_alphanumeric_rule_name(rule_name) + + "RemediationAction" } if "IAM" in params["SSMAutomation"]: print("Lets Build IAM Role and Policy For the SSM Document") - ssm_iam_role, ssm_iam_policy = self.__create_automation_iam_cloudformation_block( + ( + ssm_iam_role, + ssm_iam_policy, + ) = self.__create_automation_iam_cloudformation_block( params["SSMAutomation"], rule_name ) - resources[self.__get_alphanumeric_rule_name(rule_name + "Role")] = ssm_iam_role - resources[self.__get_alphanumeric_rule_name(rule_name + "Policy")] = ssm_iam_policy - remediation["Properties"]["Parameters"]["AutomationAssumeRole"]["StaticValue"]["Values"] = [ - {"Fn::GetAtt": [self.__get_alphanumeric_rule_name(rule_name + "Role"), "Arn"]} + resources[ + self.__get_alphanumeric_rule_name(rule_name + "Role") + ] = ssm_iam_role + resources[ + self.__get_alphanumeric_rule_name(rule_name + "Policy") + ] = ssm_iam_policy + remediation["Properties"]["Parameters"]["AutomationAssumeRole"][ + "StaticValue" + ]["Values"] = [ + { + "Fn::GetAtt": [ + self.__get_alphanumeric_rule_name( + rule_name + "Role" + ), + "Arn", + ] + } ] # Override the placeholder to associate the SSM Document Role with newly crafted role - resources[self.__get_alphanumeric_rule_name(rule_name + "RemediationAction")] = ssm_automation - resources[self.__get_alphanumeric_rule_name(rule_name) + "Remediation"] = remediation + resources[ + self.__get_alphanumeric_rule_name( + rule_name + "RemediationAction" + ) + ] = ssm_automation + resources[ + self.__get_alphanumeric_rule_name(rule_name) + "Remediation" + ] = remediation if tags: tags_str = "" for tag in tags: - tags_str += "Key={},Value={} ".format(tag["Key"], tag["Value"]) - script_for_tag += "aws configservice tag-resource --resources-arn $(aws configservice describe-config-rules --config-rule-names {} --query 'ConfigRules[0].ConfigRuleArn' | tr -d '\"') --tags {} \n".format( - rule_name, tags_str + key = tag["Key"] + val = tag["Value"] + tags_str += f"Key={key},Value={val} " + script_for_tag += ( + "aws configservice tag-resource --resources-arn $(aws configservice describe-config-rules " + + f"--config-rule-names {rule_name} --query 'ConfigRules[0].ConfigRuleArn' | tr -d '\"') --tags {tags_str} \n" ) template["Resources"] = resources @@ -2942,7 +3728,10 @@ def create_rule_template(self): template["Metadata"] = { "AWS::CloudFormation::Interface": { "ParameterGroups": [ - {"Label": {"default": "Lambda Account ID"}, "Parameters": ["LambdaAccountId"]}, + { + "Label": {"default": "Lambda Account ID"}, + "Parameters": ["LambdaAccountId"], + }, required_parameter_group, optional_parameter_group, ], @@ -2959,7 +3748,9 @@ def create_rule_template(self): print("CloudFormation template written to " + self.args.output_file) if script_for_tag: - print("Found tags on config rules. Cloudformation do not support tagging config rule at the moment") + print( + "Found tags on config rules. Cloudformation do not support tagging config rule at the moment" + ) print("Generating script for config rules tags") script_for_tag = "#! /bin/bash \n" + script_for_tag if self.args.tag_config_rules_script: @@ -2968,10 +3759,14 @@ def create_rule_template(self): else: print("=========SCRIPT=========") print(script_for_tag) - print("you can use flag [--tag-config-rules-script ] to output the script") + print( + "you can use flag [--tag-config-rules-script ] to output the script" + ) def create_region_set(self): - self.args = get_create_region_set_parser().parse_args(self.args.command_args, self.args) + self.args = get_create_region_set_parser().parse_args( + self.args.command_args, self.args + ) output_file = self.args.output_file output_dict = { "default": ["us-east-1", "us-west-1", "eu-north-1", "ap-southeast-1"], @@ -3023,7 +3818,8 @@ def __list_rulesets(self): rules = [] for obj_name in os.listdir("."): - # print(obj_name) + if obj_name.startswith("."): + continue # Skip hidden items params_file_path = os.path.join(".", obj_name, parameter_file_name) if os.path.isfile(params_file_path): parameters_file = open(params_file_path, "r") @@ -3050,7 +3846,7 @@ def __get_template_dir(self): def __create_test_suite(self, test_dir): tests = [] - for (top, dirs, filenames) in os.walk(test_dir): + for top, dirs, filenames in os.walk(test_dir): for filename in fnmatch.filter(filenames, "*_test.py"): print(filename) sys.path.append(top) @@ -3072,39 +3868,39 @@ def __clean_rule_name(self, rule_name): return output def __create_java_rule(self): - src = os.path.join(path.dirname(__file__), "template", "runtime", "java8", "src") + src = os.path.join( + path.dirname(__file__), "template", "runtime", "java8", "src" + ) dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, "src") shutil.copytree(src, dst) - src = os.path.join(path.dirname(__file__), "template", "runtime", "java8", "jars") + src = os.path.join( + path.dirname(__file__), "template", "runtime", "java8", "jars" + ) dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, "jars") shutil.copytree(src, dst) - src = os.path.join(path.dirname(__file__), "template", "runtime", "java8", "build.gradle") + src = os.path.join( + path.dirname(__file__), "template", "runtime", "java8", "build.gradle" + ) dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, "build.gradle") shutil.copyfile(src, dst) - def __create_dotnet_rule(self): - runtime_path = os.path.join(path.dirname(__file__), "template", "runtime", self.args.runtime) - dst_path = os.path.join(os.getcwd(), rules_dir, self.args.rulename) - for obj in os.listdir(runtime_path): - src = os.path.join(runtime_path, obj) - dst = os.path.join(dst_path, obj) - if os.path.isfile(src): - shutil.copyfile(src, dst) - else: - shutil.copytree(src, dst) - def __print_log_event(self, event): - time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(event["timestamp"] / 1000)) + time_string = time.strftime( + "%Y-%m-%d %H:%M:%S", time.localtime(event["timestamp"] / 1000) + ) rows = 24 columns = 80 - try: - rows, columns = os.popen("stty size", "r").read().split() - except ValueError as e: - # This was probably being run in a headless test environment which had no stty. - print("Using default terminal rows and columns.") + if shutil.which("stty") is not None: + try: + rows, columns = os.popen("stty size", "r").read().split() + except Exception as e: + # This was probably being run in a headless test environment which had no stty. + print("Using default terminal rows and columns.") + else: + print("stty not present -- using default terminal rows and columns.") line_wrap = int(columns) - 22 message_lines = str(event["message"]).splitlines() @@ -3112,7 +3908,11 @@ def __print_log_event(self, event): for line in message_lines: line = line.replace("\t", " ") - formatted_lines.append("\n".join(line[i : i + line_wrap] for i in range(0, len(line), line_wrap))) + formatted_lines.append( + "\n".join( + line[i : i + line_wrap] for i in range(0, len(line), line_wrap) + ) + ) message_string = "\n".join(formatted_lines) message_string = message_string.replace("\n", "\n ") @@ -3165,7 +3965,11 @@ def __get_caller_identity_details(self, my_session): response = my_sts.get_caller_identity() arn_split = response["Arn"].split(":") - return {"account_id": response["Account"], "partition": arn_split[1], "region": arn_split[3]} + return { + "account_id": response["Account"], + "partition": arn_split[1], + "region": arn_split[3], + } def __get_stack_name_from_rule_name(self, rule_name): output = rule_name.replace("_", "") @@ -3180,22 +3984,33 @@ def __get_alphanumeric_rule_name(self, rule_name): def __get_rule_list_for_command(self, Command="deploy"): rule_names = [] if self.args.all: - d = "." for obj_name in os.listdir("."): obj_path = os.path.join(".", obj_name) if os.path.isdir(obj_path) and not obj_name == "rdk": for file_name in os.listdir(obj_path): if obj_name not in rule_names: - if os.path.exists(os.path.join(obj_path, "parameters.json")): + if os.path.exists( + os.path.join(obj_path, "parameters.json") + ): rule_names.append(obj_name) else: if file_name.split(".")[0] == obj_name: rule_names.append(obj_name) if os.path.exists( - os.path.join(obj_path, "src", "main", "java", "com", "rdk", "RuleCode.java") + os.path.join( + obj_path, + "src", + "main", + "java", + "com", + "rdk", + "RuleCode.java", + ) ): rule_names.append(obj_name) - if os.path.exists(os.path.join(obj_path, "RuleCode.cs")): + if os.path.exists( + os.path.join(obj_path, "RuleCode.cs") + ): rule_names.append(obj_name) elif self.args.rulesets: for obj_name in os.listdir("."): @@ -3215,7 +4030,10 @@ def __get_rule_list_for_command(self, Command="deploy"): if os.path.isdir(cleaned_rule_name): rule_names.append(cleaned_rule_name) else: - print('Invalid Option: Specify Rule Name or RuleSet. Run "rdk %s -h" for more info.' % (Command)) + print( + 'Invalid Option: Specify Rule Name or RuleSet. Run "rdk %s -h" for more info.' + % (Command) + ) sys.exit(1) if len(rule_names) == 0: @@ -3226,21 +4044,21 @@ def __get_rule_list_for_command(self, Command="deploy"): for name in rule_names: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) return rule_names def __get_rule_parameters(self, rule_name): - params_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, parameter_file_name) + params_file_path = os.path.join( + os.getcwd(), rules_dir, rule_name, parameter_file_name + ) try: parameters_file = open(params_file_path, "r") except IOError as e: - print("Failed to open parameters file for rule '{}'".format(rule_name)) + print(f"Failed to open parameters file for rule '{rule_name}'") print(e.message) sys.exit(1) @@ -3249,12 +4067,12 @@ def __get_rule_parameters(self, rule_name): try: my_json = json.load(parameters_file) except ValueError as ve: # includes simplejson.decoder.JSONDecodeError - print("Failed to decode JSON in parameters file for Rule {}".format(rule_name)) + print(f"Failed to decode JSON in parameters file for Rule {rule_name}") print(ve.message) parameters_file.close() sys.exit(1) except Exception as e: - print("Error loading parameters file for Rule {}".format(rule_name)) + print(f"Error loading parameters file for Rule {rule_name}") print(e.message) parameters_file.close() sys.exit(1) @@ -3275,7 +4093,9 @@ def __get_rule_parameters(self, rule_name): return my_json["Parameters"], my_tags def __parse_rule_args(self, is_required): - self.args = get_rule_parser(is_required, self.args.command).parse_args(self.args.command_args, self.args) + self.args = get_rule_parser(is_required, self.args.command).parse_args( + self.args.command_args, self.args + ) if self.args.rulename: if len(self.args.rulename) > 128: @@ -3287,7 +4107,10 @@ def __parse_rule_args(self, is_required): for resource_type in self.args.resource_types.split(","): if resource_type not in accepted_resource_types: resource_type_error = ( - resource_type_error + ' "' + resource_type + '" not found in list of accepted resource types.' + resource_type_error + + ' "' + + resource_type + + '" not found in list of accepted resource types.' ) if resource_type_error: print(resource_type_error) @@ -3298,8 +4121,14 @@ def __parse_rule_args(self, is_required): "Skip-Supported-Resource-Check Flag set (--skip-supported-resource-check), ignoring missing resource type error." ) - if is_required and not self.args.resource_types and not self.args.maximum_frequency: - print("You must specify either a resource type trigger or a maximum frequency.") + if ( + is_required + and not self.args.resource_types + and not self.args.maximum_frequency + ): + print( + "You must specify either a resource type trigger or a maximum frequency." + ) sys.exit(1) if self.args.input_parameters: @@ -3312,16 +4141,20 @@ def __parse_rule_args(self, is_required): if self.args.optional_parameters: try: - optional_params_dict = json.loads(self.args.optional_parameters, strict=False) + optional_params_dict = json.loads( + self.args.optional_parameters, strict=False + ) except Exception as e: - print("Failed to parse optional parameters.") + print(f"Failed to parse optional parameters. {repr(e)}") sys.exit(1) if self.args.rulesets: self.args.rulesets = self.args.rulesets.split(",") def __parse_test_args(self): - self.args = get_test_parser(self.args.command).parse_args(self.args.command_args, self.args) + self.args = get_test_parser(self.args.command).parse_args( + self.args.command_args, self.args + ) if self.args.all and self.args.rulename: print("You may specify either specific rules or --all, but not both.") @@ -3333,12 +4166,15 @@ def __parse_test_args(self): return self.args def __parse_deploy_args(self, ForceArgument=False): + self.args = get_deployment_parser(ForceArgument).parse_args( + self.args.command_args, self.args + ) - self.args = get_deployment_parser(ForceArgument).parse_args(self.args.command_args, self.args) - - ### Validate inputs ### + # Validate inputs # if self.args.stack_name and not self.args.functions_only: - print("--stack-name can only be specified when using the --functions-only feature.") + print( + "--stack-name can only be specified when using the --functions-only feature." + ) sys.exit(1) # Make sure we're not exceeding Layer limits @@ -3347,12 +4183,20 @@ def __parse_deploy_args(self, ForceArgument=False): if layer_count > 5: print("You may only specify 5 Lambda Layers.") sys.exit(1) - if self.args.rdklib_layer_arn or self.args.generated_lambda_layer and layer_count > 4: - print("Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers.") + if ( + self.args.rdklib_layer_arn + or self.args.generated_lambda_layer + and layer_count > 4 + ): + print( + "Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers." + ) sys.exit(1) # RDKLib version and RDKLib Layer ARN/Generated RDKLib Layer are mutually exclusive. - if "rdk_lib_version" in self.args and (self.args.rdklib_layer_arn or self.args.generated_lambda_layer): + if "rdk_lib_version" in self.args and ( + self.args.rdklib_layer_arn or self.args.generated_lambda_layer + ): print( "Specify EITHER an RDK Lib version to use the official release OR a specific Layer ARN to use a custom implementation." ) @@ -3360,7 +4204,9 @@ def __parse_deploy_args(self, ForceArgument=False): # RDKLib version and RDKLib Layer ARN/Generated RDKLib Layer are mutually exclusive. if self.args.rdklib_layer_arn and self.args.generated_lambda_layer: - print("Specify EITHER an RDK Lib Layer ARN OR the generated lambda layer flag.") + print( + "Specify EITHER an RDK Lib Layer ARN OR the generated lambda layer flag." + ) sys.exit(1) # Check rule names to make sure none are too long. This is needed to catch Rules created before length constraint was added. @@ -3368,9 +4214,7 @@ def __parse_deploy_args(self, ForceArgument=False): for name in self.args.rulename: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) @@ -3381,12 +4225,15 @@ def __parse_deploy_args(self, ForceArgument=False): self.args.rulesets = self.args.rulesets.split(",") def __parse_deploy_organization_args(self, ForceArgument=False): + self.args = get_deployment_organization_parser(ForceArgument).parse_args( + self.args.command_args, self.args + ) - self.args = get_deployment_organization_parser(ForceArgument).parse_args(self.args.command_args, self.args) - - ### Validate inputs ### + # Validate inputs # if self.args.stack_name and not self.args.functions_only: - print("--stack-name can only be specified when using the --functions-only feature.") + print( + "--stack-name can only be specified when using the --functions-only feature." + ) sys.exit(1) # Make sure we're not exceeding Layer limits @@ -3396,7 +4243,9 @@ def __parse_deploy_organization_args(self, ForceArgument=False): print("You may only specify 5 Lambda Layers.") sys.exit(1) if self.args.rdklib_layer_arn and layer_count > 4: - print("Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers.") + print( + "Because you have selected a 'lib' runtime You may only specify 4 additional Lambda Layers." + ) sys.exit(1) # RDKLib version and RDKLib Layer ARN are mutually exclusive. @@ -3411,9 +4260,7 @@ def __parse_deploy_organization_args(self, ForceArgument=False): for name in self.args.rulename: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) @@ -3424,17 +4271,16 @@ def __parse_deploy_organization_args(self, ForceArgument=False): self.args.rulesets = self.args.rulesets.split(",") def __parse_export_args(self, ForceArgument=False): - - self.args = get_export_parser(ForceArgument).parse_args(self.args.command_args, self.args) + self.args = get_export_parser(ForceArgument).parse_args( + self.args.command_args, self.args + ) # Check rule names to make sure none are too long. This is needed to catch Rules created before length constraint was added. if self.args.rulename: for name in self.args.rulename: if len(name) > 128: print( - "Error: Found Rule with name over 128 characters: {} \n Recreate the Rule with a shorter name.".format( - name - ) + f"Error: Found Rule with name over 128 characters: {name} \n Recreate the Rule with a shorter name." ) sys.exit(1) @@ -3448,35 +4294,14 @@ def __package_function_code(self, rule_name, params): subprocess.call(command, cwd=working_dir) # set source as distribution zip - s3_src = os.path.join(os.getcwd(), rules_dir, rule_name, "build", "distributions", rule_name + ".zip") - elif params["SourceRuntime"] in ["dotnetcore1.0", "dotnetcore2.0"]: - print("Packaging " + rule_name) - working_dir = os.path.join(os.getcwd(), rules_dir, rule_name) - commands = [["dotnet", "restore"]] - - app_runtime = "netcoreapp1.0" - if params["SourceRuntime"] == "dotnetcore2.0": - app_runtime = "netcoreapp2.0" - - commands.append(["dotnet", "lambda", "package", "-c", "Release", "-f", app_runtime]) - - for command in commands: - subprocess.call(command, cwd=working_dir) - - # Remove old zip file if it already exists - package_file_dst = os.path.join(rule_name, rule_name + ".zip") - self.__delete_package_file(package_file_dst) - - # Create new package in temp directory, copy to rule directory - # This copy avoids the archiver trying to include the output zip in itself - s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name, "bin", "Release", app_runtime, "publish") - tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir + s3_src = os.path.join( + os.getcwd(), + rules_dir, + rule_name, + "build", + "distributions", + rule_name + ".zip", ) - if not (os.path.exists(package_file_dst)): - shutil.copy(tmp_src, package_file_dst) - s3_src = os.path.abspath(package_file_dst) - self.__delete_package_file(tmp_src) else: print("Zipping " + rule_name) @@ -3487,7 +4312,9 @@ def __package_function_code(self, rule_name, params): # zip rule code files and upload to s3 bucket s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name) tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir + os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), + "zip", + s3_src_dir, ) if not (os.path.exists(package_file_dst)): shutil.copy(tmp_src, package_file_dst) @@ -3526,7 +4353,9 @@ def __populate_params(self): if self.args.optional_parameters: # As above, but with the optional input parameters. try: - my_optional_params = json.loads(self.args.optional_parameters, strict=False) + my_optional_params = json.loads( + self.args.optional_parameters, strict=False + ) except Exception as e: print( "Error parsing optional input parameter JSON. Make sure your JSON keys and values are enclosed in properly escaped double quotes and your optional-parameters string is enclosed in single quotes." @@ -3538,7 +4367,7 @@ def __populate_params(self): # As above, but with the optional tag key value pairs. try: my_tags = json.loads(self.args.tags, strict=False) - except Exception as e: + except Exception: print( "Error parsing optional tags JSON. Make sure your JSON keys and values are enclosed in properly escaped double quotes and tags string is enclosed in single quotes." ) @@ -3558,12 +4387,14 @@ def __populate_params(self): ) and not self.args.remediation_action ): - print("Remediation Flags detected but no remediation action (--remediation-action) set") + print( + "Remediation Flags detected but no remediation action (--remediation-action) set" + ) if self.args.remediation_action: try: my_remediation = self.__generate_remediation_params() - except Exception as e: + except Exception: print("Error parsing remediation configuration.") # create config file and place in rule directory @@ -3571,7 +4402,7 @@ def __populate_params(self): "RuleName": self.args.rulename, "Description": self.args.rulename, "SourceRuntime": self.args.runtime, - #'CodeBucket': code_bucket_prefix + account_id, + # 'CodeBucket': code_bucket_prefix + account_id, "CodeKey": self.args.rulename + my_session.region_name + ".zip", "InputParameters": json.dumps(my_input_params), "OptionalParameters": json.dumps(my_optional_params), @@ -3610,7 +4441,9 @@ def __generate_remediation_params(self): ssm_controls = {} if self.args.remediation_concurrent_execution_percent: - ssm_controls["ConcurrentExecutionRatePercentage"] = self.args.remediation_concurrent_execution_percent + ssm_controls[ + "ConcurrentExecutionRatePercentage" + ] = self.args.remediation_concurrent_execution_percent if self.args.remediation_error_rate_percent: ssm_controls["ErrorPercentage"] = self.args.remediation_error_rate_percent @@ -3619,7 +4452,9 @@ def __generate_remediation_params(self): params["ExecutionControls"] = {"SsmControls": ssm_controls} if self.args.auto_remediation_retry_attempts: - params["MaximumAutomaticAttempts"] = self.args.auto_remediation_retry_attempts + params[ + "MaximumAutomaticAttempts" + ] = self.args.auto_remediation_retry_attempts if self.args.remediation_parameters: params["Parameters"] = json.loads(self.args.remediation_parameters) @@ -3640,7 +4475,9 @@ def __generate_remediation_params(self): def __write_params_file(self, rulename, parameters, tags): my_params = {"Version": "1.0", "Parameters": parameters, "Tags": tags} - params_file_path = os.path.join(os.getcwd(), rules_dir, rulename, parameter_file_name) + params_file_path = os.path.join( + os.getcwd(), rules_dir, rulename, parameter_file_name + ) parameters_file = open(params_file_path, "w") json.dump(my_params, parameters_file, indent=2) parameters_file.close() @@ -3653,8 +4490,8 @@ def __wait_for_cfn_stack(self, cfn_client, stackname): response = cfn_client.list_stacks() all_stacks = response["StackSummaries"] - while 'NextToken' in response: - response = cfn_client.list_stacks(NextToken=response['NextToken']) + while "NextToken" in response: + response = cfn_client.list_stacks(NextToken=response["NextToken"]) all_stacks += response["StackSummaries"] for stack in all_stacks: @@ -3672,26 +4509,44 @@ def __wait_for_cfn_stack(self, cfn_client, stackname): # If all stacks have been deleted, clearly we're done! if all_deleted: in_progress = False - print(f"[{my_session.region_name}]: CloudFormation stack operation complete.") + print( + f"[{my_session.region_name}]: CloudFormation stack operation complete." + ) continue else: if "FAILED" in active_stack["StackStatus"]: in_progress = False - print(f"[{my_session.region_name}]: CloudFormation stack operation Failed for " + stackname + ".") + print( + f"[{my_session.region_name}]: CloudFormation stack operation Failed for " + + stackname + + "." + ) if "StackStatusReason" in active_stack: - print(f"[{my_session.region_name}]: Reason: " + active_stack["StackStatusReason"]) + print( + f"[{my_session.region_name}]: Reason: " + + active_stack["StackStatusReason"] + ) elif active_stack["StackStatus"] == "ROLLBACK_COMPLETE": in_progress = False print( - f"[{my_session.region_name}]: CloudFormation stack operation Rolled Back for " + stackname + "." + f"[{my_session.region_name}]: CloudFormation stack operation Rolled Back for " + + stackname + + "." ) if "StackStatusReason" in active_stack: - print(f"[{my_session.region_name}]: Reason: " + active_stack["StackStatusReason"]) + print( + f"[{my_session.region_name}]: Reason: " + + active_stack["StackStatusReason"] + ) elif "COMPLETE" in active_stack["StackStatus"]: in_progress = False - print(f"[{my_session.region_name}]: CloudFormation stack operation complete.") + print( + f"[{my_session.region_name}]: CloudFormation stack operation complete." + ) else: - print(f"[{my_session.region_name}]: Waiting for CloudFormation stack operation to complete...") + print( + f"[{my_session.region_name}]: Waiting for CloudFormation stack operation to complete..." + ) time.sleep(5) def __get_handler(self, rule_name, params): @@ -3704,21 +4559,19 @@ def __get_handler(self, rule_name, params): "python3.8-lib", "python3.9", "python3.9-lib", - "nodejs6.10", - "nodejs8.10", + "python3.10", + "python3.10-lib", ]: return rule_name + ".lambda_handler" elif params["SourceRuntime"] in ["java8"]: return "com.rdk.RuleUtil::handler" - elif params["SourceRuntime"] in ["dotnetcore1.0", "dotnetcore2.0"]: - return "csharp7.0::Rdk.CustomConfigHandler::FunctionHandler" def __get_runtime_string(self, params): if params["SourceRuntime"] in [ - "python3.6-managed", "python3.7-lib", "python3.8-lib", "python3.9-lib", + "python3.10-lib", ]: runtime = params["SourceRuntime"].split("-") return runtime[0] @@ -3735,9 +4588,13 @@ def __get_test_CIs(self, rulename): test_ci_list.append(my_test_ci.get_json()) else: # Check to see if there is a test_ci.json file in the Rule directory - tests_path = os.path.join(os.getcwd(), rules_dir, rulename, test_ci_filename) + tests_path = os.path.join( + os.getcwd(), rules_dir, rulename, test_ci_filename + ) if os.path.exists(tests_path): - print("\tTesting with CI's provided in test_ci.json file. NOT YET IMPLEMENTED") # TODO + print( + "\tTesting with CI's provided in test_ci.json file. NOT YET IMPLEMENTED" + ) # TODO # test_ci_list self._load_cis_from_file(tests_path) else: print("\tTesting with generic CI for configured Resource Type(s)") @@ -3755,7 +4612,8 @@ def __get_lambda_arn_for_stack(self, stack_name): my_cfn = my_session.client("cloudformation") - # Since CFN won't detect changes to the lambda code stored in S3 as a reason to update the stack, we need to manually update the code reference in Lambda once the CFN has run. + # Since CFN won't detect changes to the lambda code stored in S3 as a reason to update the stack, + # we need to manually update the code reference in Lambda once the CFN has run. self.__wait_for_cfn_stack(my_cfn, stack_name) # Lambda function is an output of the stack. @@ -3767,7 +4625,9 @@ def __get_lambda_arn_for_stack(self, stack_name): my_lambda_arn = output["OutputValue"] if my_lambda_arn == "NOTFOUND": - print(f"[{my_session.region_name}]: Could not read CloudFormation stack output to find Lambda function.") + print( + f"[{my_session.region_name}]: Could not read CloudFormation stack output to find Lambda function." + ) sys.exit(1) return my_lambda_arn @@ -3777,27 +4637,27 @@ def __get_lambda_name(self, rule_name, params): lambda_name = params["CustomLambdaName"] if len(lambda_name) > 64: print( - "Error: Found Rule's Lambda function with name over 64 characters: {} \n Recreate the lambda name with a shorter name.".format( - lambda_name - ) + f"Error: Found Rule's Lambda function with name over 64 characters: {lambda_name}." + + "\nRecreate the lambda name with a shorter name." ) sys.exit(1) return lambda_name else: - lambda_name = "RDK-Rule-Function-" + self.__get_stack_name_from_rule_name(rule_name) + lambda_name = "RDK-Rule-Function-" + self.__get_stack_name_from_rule_name( + rule_name + ) if len(lambda_name) > 64: print( - "Error: Found Rule's Lambda function with name over 64 characters: {} \n Recreate the rule with a shorter name or with CustomLambdaName attribute in parameter.json. If you are using 'rdk create', you can add '--custom-lambda-name ' to create your RDK rules".format( - lambda_name - ) + f"Error: Found Rule's Lambda function with name over 64 characters: {lambda_name}." + + "\nRecreate the rule with a shorter name or with CustomLambdaName attribute in parameter.json." + + "\nIf you are using 'rdk create', you can add '--custom-lambda-name ' to create your RDK rules" ) sys.exit(1) return lambda_name def __get_lambda_arn_for_rule(self, rule_name, partition, region, account, params): - return "arn:{}:lambda:{}:{}:function:{}".format( - partition, region, account, self.__get_lambda_name(rule_name, params) - ) + lambda_name = self.__get_lambda_name(rule_name, params) + return f"arn:{partition}:lambda:{region}:{account}:function:{lambda_name}" def __delete_package_file(self, file): try: @@ -3805,7 +4665,9 @@ def __delete_package_file(self, file): except OSError: pass - def __upload_function_code(self, rule_name, params, account_id, my_session, code_bucket_name): + def __upload_function_code( + self, rule_name, params, account_id, my_session, code_bucket_name + ): if params["SourceRuntime"] == "java8": # Do java build and package. print(f"[{my_session.region_name}]: Running Gradle Build for " + rule_name) @@ -3815,7 +4677,12 @@ def __upload_function_code(self, rule_name, params, account_id, my_session, code # set source as distribution zip s3_src = os.path.join( - os.getcwd(), rules_dir, rule_name, "build", "distributions", rule_name + my_session.region_name + ".zip" + os.getcwd(), + rules_dir, + rule_name, + "build", + "distributions", + rule_name + my_session.region_name + ".zip", ) s3_dst = "/".join((rule_name, rule_name + ".zip")) @@ -3825,41 +4692,6 @@ def __upload_function_code(self, rule_name, params, account_id, my_session, code my_s3.meta.client.upload_file(s3_src, code_bucket_name, s3_dst) print(f"[{my_session.region_name}]: Upload complete.") - elif params["SourceRuntime"] in ["dotnetcore1.0", "dotnetcore2.0"]: - print("Packaging " + rule_name) - working_dir = os.path.join(os.getcwd(), rules_dir, rule_name) - commands = [["dotnet", "restore"]] - - app_runtime = "netcoreapp1.0" - if params["SourceRuntime"] == "dotnetcore2.0": - app_runtime = "netcoreapp2.0" - - commands.append(["dotnet", "lambda", "package", "-c", "Release", "-f", app_runtime]) - - for command in commands: - subprocess.call(command, cwd=working_dir) - - # Remove old zip file if it already exists - package_file_dst = os.path.join(rule_name, rule_name + ".zip") - self.__delete_package_file(package_file_dst) - - # Create new package in temp directory, copy to rule directory - # This copy avoids the archiver trying to include the output zip in itself - s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name, "bin", "Release", app_runtime, "publish") - tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir - ) - s3_dst = "/".join((rule_name, rule_name + ".zip")) - - my_s3 = my_session.resource("s3") - - print(f"[{my_session.region_name}]: Uploading " + rule_name) - my_s3.meta.client.upload_file(tmp_src, code_bucket_name, s3_dst) - print(f"[{my_session.region_name}]: Upload complete.") - if not (os.path.exists(package_file_dst)): - shutil.copy(tmp_src, package_file_dst) - self.__delete_package_file(tmp_src) - else: print(f"[{my_session.region_name}]: Zipping " + rule_name) # Remove old zip file if it already exists @@ -3870,7 +4702,9 @@ def __upload_function_code(self, rule_name, params, account_id, my_session, code s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name) tmp_src = shutil.make_archive( - os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), "zip", s3_src_dir + os.path.join(tempfile.gettempdir(), rule_name + my_session.region_name), + "zip", + s3_src_dir, ) s3_dst = "/".join((rule_name, rule_name + ".zip")) @@ -3897,7 +4731,6 @@ def __create_remediation_cloudformation_block(self, remediation_config): def __create_automation_cloudformation_block(self, ssm_automation, rule_name): print("Generate SSM Resources") - current_working_direcoty = os.getcwd() ssm_json_dir = os.path.join(os.getcwd(), ssm_automation["Document"]) print("Reading SSM JSON From -> " + ssm_json_dir) # params_file_path = os.path.join(os.getcwd(), rules_dir, rulename, parameter_file_name) @@ -3905,19 +4738,28 @@ def __create_automation_cloudformation_block(self, ssm_automation, rule_name): ssm_automation_json = json.loads(ssm_automation_content) ssm_automation_config = { "Type": "AWS::SSM::Document", - "Properties": {"DocumentType": "Automation", "Content": ssm_automation_json}, + "Properties": { + "DocumentType": "Automation", + "Content": ssm_automation_json, + }, } return ssm_automation_config def __create_automation_iam_cloudformation_block(self, ssm_automation, rule_name): - - print("Generate IAM Role for SSM Document with these actions", str(ssm_automation["IAM"])) + print( + "Generate IAM Role for SSM Document with these actions", + str(ssm_automation["IAM"]), + ) assume_role_template = { "Version": "2012-10-17", "Statement": [ - {"Effect": "Allow", "Principal": {"Service": "ssm.amazonaws.com"}, "Action": "sts:AssumeRole"} + { + "Effect": "Allow", + "Principal": {"Service": "ssm.amazonaws.com"}, + "Action": "sts:AssumeRole", + } ], } @@ -3925,7 +4767,8 @@ def __create_automation_iam_cloudformation_block(self, ssm_automation, rule_name ssm_automation_iam_role = { "Type": "AWS::IAM::Role", "Properties": { - "Description": "IAM Role to Support Config Remediation for " + rule_name, + "Description": "IAM Role to Support Config Remediation for " + + rule_name, "Path": "/rdk-remediation-role/", # "RoleName": {"Fn::Sub": "" + rule_name + "-Remediation-Role-${AWS::Region}"}, "AssumeRolePolicyDocument": assume_role_template, @@ -3936,11 +4779,21 @@ def __create_automation_iam_cloudformation_block(self, ssm_automation, rule_name "Type": "AWS::IAM::Policy", "Properties": { "PolicyDocument": { - "Statement": [{"Action": ssm_automation["IAM"], "Effect": "Allow", "Resource": "*"}], + "Statement": [ + { + "Action": ssm_automation["IAM"], + "Effect": "Allow", + "Resource": "*", + } + ], "Version": "2012-10-17", }, - "PolicyName": {"Fn::Sub": "" + rule_name + "-Remediation-Policy-${AWS::Region}"}, - "Roles": [{"Ref": self.__get_alphanumeric_rule_name(rule_name + "Role")}], + "PolicyName": { + "Fn::Sub": "" + rule_name + "-Remediation-Policy-${AWS::Region}" + }, + "Roles": [ + {"Ref": self.__get_alphanumeric_rule_name(rule_name + "Role")} + ], }, } @@ -3958,7 +4811,9 @@ def __create_function_cloudformation_template(self): parameters = {} parameters["SourceBucket"] = {} - parameters["SourceBucket"]["Description"] = "Name of the S3 bucket that you have stored the rule zip files in." + parameters["SourceBucket"][ + "Description" + ] = "Name of the S3 bucket that you have stored the rule zip files in." parameters["SourceBucket"]["Type"] = "String" parameters["SourceBucket"]["MinLength"] = "1" parameters["SourceBucket"]["MaxLength"] = "255" @@ -3973,10 +4828,16 @@ def __create_function_cloudformation_template(self): partition = identity_details["partition"] lambdaRoleArn = "" if self.args.lambda_role_arn: - print(f"[{my_session.region_name}]: Existing IAM Role provided: " + self.args.lambda_role_arn) + print( + f"[{my_session.region_name}]: Existing IAM Role provided: " + + self.args.lambda_role_arn + ) lambdaRoleArn = self.args.lambda_role_arn elif self.args.lambda_role_name: - print(f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + self.args.lambda_role_name) + print( + f"[{my_session.region_name}]: Building IAM Role ARN from Name: " + + self.args.lambda_role_name + ) arn = f"arn:{partition}:iam::{account_id}:role/{self.args.lambda_role_name}" lambdaRoleArn = arn else: @@ -3997,12 +4858,6 @@ def __create_function_cloudformation_template(self): ], } lambda_policy_statements = [ - { - "Sid": "1", - "Action": ["s3:GetObject"], - "Effect": "Allow", - "Resource": {"Fn::Sub": "arn:${AWS::Partition}:s3:::${SourceBucket}/*"}, - }, { "Sid": "2", "Action": [ @@ -4014,9 +4869,24 @@ def __create_function_cloudformation_template(self): "Effect": "Allow", "Resource": "*", }, - {"Sid": "3", "Action": ["config:PutEvaluations"], "Effect": "Allow", "Resource": "*"}, - {"Sid": "4", "Action": ["iam:List*", "iam:Describe*", "iam:Get*"], "Effect": "Allow", "Resource": "*"}, - {"Sid": "5", "Action": ["sts:AssumeRole"], "Effect": "Allow", "Resource": "*"}, + { + "Sid": "3", + "Action": ["config:PutEvaluations"], + "Effect": "Allow", + "Resource": "*", + }, + { + "Sid": "4", + "Action": ["iam:List*", "iam:Get*"], + "Effect": "Allow", + "Resource": "*", + }, + { + "Sid": "5", + "Action": ["sts:AssumeRole"], + "Effect": "Allow", + "Resource": "*", + }, ] if self.args.lambda_subnets and self.args.lambda_security_groups: vpc_policy = { @@ -4033,7 +4903,10 @@ def __create_function_cloudformation_template(self): lambda_role["Properties"]["Policies"] = [ { "PolicyName": "ConfigRulePolicy", - "PolicyDocument": {"Version": "2012-10-17", "Statement": lambda_policy_statements}, + "PolicyDocument": { + "Version": "2012-10-17", + "Statement": lambda_policy_statements, + }, } ] lambda_role["Properties"]["ManagedPolicyArns"] = [ @@ -4054,7 +4927,10 @@ def __create_function_cloudformation_template(self): lambda_function["Type"] = "AWS::Lambda::Function" properties = {} properties["FunctionName"] = self.__get_lambda_name(rule_name, params) - properties["Code"] = {"S3Bucket": {"Ref": "SourceBucket"}, "S3Key": rule_name + "/" + rule_name + ".zip"} + properties["Code"] = { + "S3Bucket": {"Ref": "SourceBucket"}, + "S3Key": rule_name + "/" + rule_name + ".zip", + } properties["Description"] = "Function for AWS Config Rule " + rule_name properties["Handler"] = self.__get_handler(rule_name, params) properties["MemorySize"] = "256" @@ -4087,7 +4963,9 @@ def __create_function_cloudformation_template(self): lambda_permissions["Type"] = "AWS::Lambda::Permission" lambda_permissions["DependsOn"] = alphanum_rule_name + "LambdaFunction" lambda_permissions["Properties"] = { - "FunctionName": {"Fn::GetAtt": [alphanum_rule_name + "LambdaFunction", "Arn"]}, + "FunctionName": { + "Fn::GetAtt": [alphanum_rule_name + "LambdaFunction", "Arn"] + }, "Action": "lambda:InvokeFunction", "Principal": "config.amazonaws.com", } @@ -4099,15 +4977,25 @@ def __create_function_cloudformation_template(self): def __tag_config_rule(self, rule_name, cfn_tags, my_session): config_client = my_session.client("config") - config_arn = config_client.describe_config_rules(ConfigRuleNames=[rule_name])["ConfigRules"][0]["ConfigRuleArn"] + config_arn = config_client.describe_config_rules(ConfigRuleNames=[rule_name])[ + "ConfigRules" + ][0]["ConfigRuleArn"] response = config_client.tag_resource(ResourceArn=config_arn, Tags=cfn_tags) return response def __get_lambda_layers(self, my_session, args, params): layers = [] if "SourceRuntime" in params: - if params["SourceRuntime"] in ["python3.7-lib", "python3.8-lib", "python3.9-lib"]: - if hasattr(args, "generated_lambda_layer") and args.generated_lambda_layer: + if params["SourceRuntime"] in [ + "python3.7-lib", + "python3.8-lib", + "python3.9-lib", + "python3.10-lib", + ]: + if ( + hasattr(args, "generated_lambda_layer") + and args.generated_lambda_layer + ): lambda_layer_version = self.__get_existing_lambda_layer( my_session, layer_name=args.custom_layer_name ) @@ -4115,7 +5003,9 @@ def __get_lambda_layers(self, my_session, args, params): print( f"{my_session.region_name} generated-lambda-layer flag received, but layer [{args.custom_layer_name}] not found in {my_session.region_name}. Creating one now" ) - self.__create_new_lambda_layer(my_session, layer_name=args.custom_layer_name) + self.__create_new_lambda_layer( + my_session, layer_name=args.custom_layer_name + ) lambda_layer_version = self.__get_existing_lambda_layer( my_session, layer_name=args.custom_layer_name ) @@ -4124,7 +5014,9 @@ def __get_lambda_layers(self, my_session, args, params): layers.append(args.rdklib_layer_arn) else: rdk_lib_version = RDKLIB_LAYER_VERSION[my_session.region_name] - rdklib_arn = RDKLIB_ARN_STRING.format(region=my_session.region_name, version=rdk_lib_version) + rdklib_arn = RDKLIB_ARN_STRING.format( + region=my_session.region_name, version=rdk_lib_version + ) layers.append(rdklib_arn) return layers @@ -4139,10 +5031,11 @@ def __get_existing_lambda_layer(self, my_session, layer_name="rdklib-layer"): return None def __create_new_lambda_layer(self, my_session, layer_name="rdklib-layer"): - successful_return = None if layer_name == "rdklib-layer": - successful_return = self.__create_new_lambda_layer_serverless_repo(my_session) + successful_return = self.__create_new_lambda_layer_serverless_repo( + my_session + ) # If that doesn't work, create it locally and upload - SAR doesn't support the custom layer name if layer_name != "rdklib-layer" or not successful_return: @@ -4173,7 +5066,9 @@ def __create_new_lambda_layer_serverless_repo(self, my_session): change_set_arn = sar_client.create_cloud_formation_change_set( ApplicationId=RDKLIB_LAYER_SAR_ID, StackName="rdklib" )["ChangeSetId"] - print(f"[{my_session.region_name}]: Creating change set to deploy rdklib-layer") + print( + f"[{my_session.region_name}]: Creating change set to deploy rdklib-layer" + ) code = self.__check_on_change_set(cfn_client, change_set_arn) if code == 1: print( @@ -4181,9 +5076,13 @@ def __create_new_lambda_layer_serverless_repo(self, my_session): ) return 1 if code == -1: - print(f"[{my_session.region_name}]: Error creating change set, attempting to use manual deployment") + print( + f"[{my_session.region_name}]: Error creating change set, attempting to use manual deployment" + ) raise ClientError() - print(f"[{my_session.region_name}]: Executing change set to deploy rdklib-layer") + print( + f"[{my_session.region_name}]: Executing change set to deploy rdklib-layer" + ) cfn_client.execute_change_set(ChangeSetName=change_set_arn) waiter = cfn_client.get_waiter(f"stack_{create_type}_complete") waiter.wait(StackName="serverlessrepo-rdklib") @@ -4197,7 +5096,9 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer region = my_session.region_name print(f"[{region}]: Creating new {layer_name}") folder_name = "lib" + str(uuid.uuid4()) - shell_command = f"pip3 install --target python boto3 botocore rdk rdklib future mock" + shell_command = ( + "pip3 install --target python boto3 botocore rdk rdklib future mock" + ) print(f"[{region}]: Installing Packages to {folder_name}/python") try: @@ -4206,7 +5107,7 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer print(e) sys.exit(1) os.chdir(folder_name) - ret = subprocess.run(shell_command, capture_output=True, shell=True) + _ = subprocess.run(shell_command, capture_output=True, shell=True) print(f"[{region}]: Creating rdk_lib_layer.zip") shutil.make_archive(f"rdk_lib_layer", "zip", ".", "python") @@ -4217,12 +5118,17 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer print(f"[{region}]: Creating temporary S3 Bucket") bucket_name = "rdkliblayertemp" + str(uuid.uuid4()) if region != "us-east-1": - s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={"LocationConstraint": region}) + s3_client.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": region}, + ) if region == "us-east-1": s3_client.create_bucket(Bucket=bucket_name) print(f"[{region}]: Uploading rdk_lib_layer.zip to S3") - s3_resource.Bucket(bucket_name).upload_file(f"{folder_name}/rdk_lib_layer.zip", layer_name) + s3_resource.Bucket(bucket_name).upload_file( + f"{folder_name}/rdk_lib_layer.zip", layer_name + ) lambda_client = my_session.client("lambda") @@ -4230,7 +5136,7 @@ def __create_new_lambda_layer_locally(self, my_session, layer_name="rdklib-layer lambda_client.publish_layer_version( LayerName=layer_name, Content={"S3Bucket": bucket_name, "S3Key": layer_name}, - CompatibleRuntimes=["python3.7", "python3.8", "python3.9"], + CompatibleRuntimes=["python3.7", "python3.8", "python3.9", "python3.10"], ) print(f"[{region}]: Deleting temporary S3 Bucket") @@ -4263,14 +5169,20 @@ def __init__(self, ci_type): ci_file = ci_type.replace("::", "_") + ".json" try: self.ci_json = json.load( - open(os.path.join(path.dirname(__file__), "template", example_ci_dir, ci_file), "r") + open( + os.path.join( + path.dirname(__file__), "template", example_ci_dir, ci_file + ), + "r", + ) ) except FileNotFoundError: + resource_url = "https://github.com/awslabs/aws-config-resource-schema/blob/master/config/properties/resource-types/" print( "No sample CI found for " + ci_type + ", even though it appears to be a supported CI. Please log an issue at https://github.com/awslabs/aws-config-rdk." - + "\nLook here: https://github.com/awslabs/aws-config-resource-schema/blob/master/config/properties/resource-types/ for additional info" + + f"\nLook here: {resource_url} for additional info" ) exit(1) diff --git a/rdk/template/configRule.json b/rdk/template/configRule.json index e8fe4fa9..43ab74a5 100644 --- a/rdk/template/configRule.json +++ b/rdk/template/configRule.json @@ -221,14 +221,6 @@ "PolicyDocument": { "Version": "2012-10-17", "Statement": [ - { - "Sid": "1", - "Action": [ - "s3:GetObject" - ], - "Effect": "Allow", - "Resource": { "Fn::Sub": "arn:${AWS::Partition}:s3:::${SourceBucket}/${SourcePath}" } - }, { "Sid": "2", "Action": [ @@ -252,7 +244,6 @@ "Sid": "4", "Action": [ "iam:List*", - "iam:Describe*", "iam:Get*" ], "Effect": "Allow", diff --git a/rdk/template/configRuleOrganization.json b/rdk/template/configRuleOrganization.json index 59d1c58f..52dd506e 100644 --- a/rdk/template/configRuleOrganization.json +++ b/rdk/template/configRuleOrganization.json @@ -199,14 +199,6 @@ "PolicyDocument": { "Version": "2012-10-17", "Statement": [ - { - "Sid": "1", - "Action": [ - "s3:GetObject" - ], - "Effect": "Allow", - "Resource": { "Fn::Sub": "arn:${AWS::Partition}:s3:::${SourceBucket}/${SourcePath}" } - }, { "Sid": "2", "Action": [ @@ -230,7 +222,6 @@ "Sid": "4", "Action": [ "iam:List*", - "iam:Describe*", "iam:Get*" ], "Effect": "Allow", diff --git a/rdk/template/example_ci/AWS_R53_HostedZone.json b/rdk/template/example_ci/AWS_R53_HostedZone.json new file mode 100644 index 00000000..e93a324a --- /dev/null +++ b/rdk/template/example_ci/AWS_R53_HostedZone.json @@ -0,0 +1,39 @@ +{ + "version": "1.3", + "accountId": "123456789012", + "configurationItemCaptureTime": "2023-05-01T18:00:07.672Z", + "configurationItemStatus": "ResourceDiscovered", + "configurationStateId": "1682964007672", + "configurationItemMD5Hash": "", + "arn": "arn:aws:route53:::hostedzone/Z017455410COBZEF0ABCD", + "resourceType": "AWS::Route53::HostedZone", + "resourceId": "Z017455410COBZEF0ABCD", + "resourceName": "testdomain.lab.", + "awsRegion": "us-east-1", + "availabilityZone": "Regional", + "tags": {}, + "relatedEvents": [], + "relationships": [], + "configuration": { + "Id": "Z017455410COBZEF0ABCD", + "HostedZoneConfig": { + "Comment": "This is a test domain" + }, + "Name": "testdomain.lab.", + "NameServers": [ + "ns-1965.awsdns-53.co.uk", + "ns-944.awsdns-54.net", + "ns-1144.awsdns-15.org", + "ns-430.awsdns-53.com" + ], + "VPCs": [], + "HostedZoneTags": [ + { + "Key": "cost_center", + "Value": "payroll" + } + ] + }, + "supplementaryConfiguration": {}, + "resourceTransitionStatus": "None" + } \ No newline at end of file diff --git a/rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json b/rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json new file mode 100644 index 00000000..23b6d759 --- /dev/null +++ b/rdk/template/example_ci/AWS_S3_AccountPublicAccessBlock.json @@ -0,0 +1,23 @@ +{ + "version": "1.3", + "accountId": "123456789012", + "configurationItemCaptureTime": "2022-05-20T15:53:57.732Z", + "configurationItemStatus": "ResourceDiscovered", + "configurationStateId": "1653062037732", + "configurationItemMD5Hash": "", + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": "123456789012", + "awsRegion": "us-east-1", + "availabilityZone": "Not Applicable", + "tags": {}, + "relatedEvents": [], + "relationships": [], + "configuration": { + "blockPublicAcls": true, + "ignorePublicAcls": true, + "blockPublicPolicy": true, + "restrictPublicBuckets": true + }, + "supplementaryConfiguration": {}, + "resourceTransitionStatus": "None" + } \ No newline at end of file diff --git a/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json b/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json index 2846342f..109534b5 100644 --- a/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json +++ b/rdk/template/example_ci/AWS_SSM_ManagedInstanceInventory.json @@ -48,1772 +48,1519 @@ "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2894856", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2896496", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2919355", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2919442", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2920189", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB2934520", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2938066", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2938772", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Tuesday, March 18, 2014 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB2949621", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2954879", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2955164", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB2959626", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2962409", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB2962806", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, May 17, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2965500", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2967917", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2969339", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2971203", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2973351", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Friday, June 20, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2973448", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, July 10, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2975061", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2975719", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2976627", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2977765", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2978041", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2978126", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2984006", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB2987107", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2989647", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2989930", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2993100", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2995004", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2995388", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB2996799", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, October 15, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB2998174", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB2999226", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3000483", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3000850", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3003057", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, February 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3004361", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3004365", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3004545", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3008923", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3012199", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3012702", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013172", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013769", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013791", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, December 9, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3013816", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, November 18, 2014 12:00:00 AM", "Description": "Update", "HotFixID": "KB3014442", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3019978", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, February 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3020338", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3021910", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, February 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3021952", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3022345", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3022777", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3023222", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, January 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3023266", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3024751", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3024755", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3029603", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3030377", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3030947", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3032359", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3032663", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3033446", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3035126", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 10, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3036612", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3037579", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3037924", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3038002", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3038314", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3038701", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3041857", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3042085", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3042553", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3044374", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3044673", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045634", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3045685", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045717", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045719", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3045755", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3045992", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, April 15, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3045999", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3046017", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3046737", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3048043", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3049563", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054169", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054203", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054256", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3054464", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3055323", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3055343", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, May 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3055642", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3059316", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3059317", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3060681", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3060793", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3061512", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3063843", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3064209", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3068708", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3071756", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3074228", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3074548", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3075220", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3075853", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3077715", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, August 13, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3078071", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3078405", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3078676", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3080042", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3080149", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3082089", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3083325", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3083711", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3083992", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3084135", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3084905", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3086255", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3087038", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3087041", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3087137", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3091297", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3092601", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3092627", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3093983", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3094486", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3095701", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, October 22, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3096433", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3097997", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3098779", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3098785", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3099834", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3100473", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3100773", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3100919", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3100956", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3102429", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3102467", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, November 12, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3102812", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3103616", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3103696", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3103709", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3104002", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3109094", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3109103", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3109976", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3110329", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3112148", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, December 9, 2015 12:00:00 AM", "Description": "Update", "HotFixID": "KB3112336", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3115224", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3118401", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3121255", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3121261", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3121461", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3121918", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3122654", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3122660", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3123242", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3123245", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3123479", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, January 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3124275", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3125424", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3126033", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3126434", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3126587", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3126593", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3127226", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3127231", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3128650", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3133043", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3133681", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3133690", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3133924", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3134179", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3134242", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3134814", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3134815", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3135449", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3135456", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3135998", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3137061", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3137725", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3137728", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3138602", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3138615", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3139164", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3139398", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3139914", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, March 8, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3139929", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3140219", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3140234", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, February 11, 2016 12:00:00 AM", "Description": "Hotfix", "HotFixID": "KB3141092", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3142036", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3145384", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3145432", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3146604", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3146723", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3146751", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3146963", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3147071", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3148198", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3148851", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, April 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3149090", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3149157", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3153704", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3154070", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3155784", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156016", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156017", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156019", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Thursday, May 12, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3156059", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3156418", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3159398", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3160005", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3161561", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3161949", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3161958", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3162343", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3162835", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164024", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164033", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164035", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, June 15, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3164294", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3167679", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3169704", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3170377", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3170455", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3172614", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3172727", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3172729", - "InstalledBy": "WIN-61TNU83K1V4 -Administrator" + "InstalledBy": "WIN-61TNU83K1V4\\Administrator" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3173424", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3174644", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3175024", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3175443", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3175887", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3177108", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3177186", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3177723", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3177725", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Saturday, August 13, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3178034", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3178539", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3179574", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, October 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3179948", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, October 11, 2016 12:00:00 AM", "Description": "Update", "HotFixID": "KB3182203", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3184122", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3184943", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3185319", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Wednesday, September 14, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3185911", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" }, { "InstalledTime": "Tuesday, October 11, 2016 12:00:00 AM", "Description": "Security Update", "HotFixID": "KB3185331", - "InstalledBy": "NT AUTHORITY -SYSTEM" + "InstalledBy": "NT AUTHORITY\\SYSTEM" } ] }, @@ -1922,13 +1669,13 @@ SYSTEM" "tags": {}, "configurationItemVersion": "1.2", "configurationItemCaptureTime": "2016-10-26T19:11:44.151Z", - "configurationStateId": 1477509104151, - "awsAccountId": "123456789012", + "configurationStateId": "1477509104151", + "awsAccountId": "138920347130", "configurationItemStatus": "ResourceDiscovered", "resourceType": "AWS::SSM::ManagedInstanceInventory", "resourceId": "i-07f6b44c44bab9e8e", "resourceName": "", - "ARN": "arn:aws:ssm:us-east-1:123456789012:managed-instance-inventory/i-07f6b44c44bab9e8e", + "ARN": "arn:aws:ssm:us-east-1:138920347130:managed-instance-inventory/i-07f6b44c44bab9e8e", "awsRegion": "us-east-1", "availabilityZone": null, "configurationStateMd5Hash": "f5edb28b271ef50dddb2c5b08a535f14", diff --git a/rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs b/rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs deleted file mode 100644 index d5fefc36..00000000 --- a/rdk/template/runtime/dotnetcore1.0/CustomConfigHandler.cs +++ /dev/null @@ -1,189 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.IO; -using System.Text; - -using System.Threading.Tasks; - -using Amazon.Lambda.Serialization.Json; -using Amazon.Lambda.Core; - -using Amazon.Lambda.ConfigEvents; -using Amazon.CloudWatchEvents; -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Runtime; -using Amazon.Lambda.Model; -using Newtonsoft.Json.Linq; - -// Assembly attribute to enable the Lambda function's JSON input to be converted into a .NET class. -[assembly: LambdaSerializer(typeof(Amazon.Lambda.Serialization.Json.JsonSerializer))] - -namespace Rdk -{ - public class CustomConfigHandler - { - public const String AWS_REGION_PROPERTY = "AWS_DEFAULT_REGION"; - public const String MESSAGE_TYPE_PROPERTY = "messageType"; - public const String HOST_ID = "hostId"; - public const String PLACEMENT = "placement"; - public const String CONFIGURATION = "configuration"; - public const String IMAGE_ID = "imageId"; - public const String STATUS_PATH = "configurationItemStatus"; - public const String TENANCY = "tenancy"; - public const String RESOURCE_DELETED = "ResourceDeleted"; - public const String RESOURCE_DELETED_NOT_RECORDED = "ResourceDeletedNotRecorded"; - public const String CAPTURE_TIME_PATH = "configurationItemCaptureTime"; - public const String CONFIGURATION_ITEM = "configurationItem"; - public const String RESOURCE_ID = "resourceId"; - public const String RESOURCE_NOT_RECORDED = "ResourceNotRecorded"; - public const String RESOURCE_TYPE = "resourceType"; - - - IAmazonConfigService ConfigService { get; set; } - - /// - /// Default constructor. This constructor is used by Lambda to construct the instance. When invoked in a Lambda environment - /// the AWS credentials will come from the IAM role associated with the function and the AWS region will be set to the - /// region the Lambda function is executed in. - /// - public CustomConfigHandler() - { - Console.WriteLine("inside constructor..."); - } - - /// - /// Constructs an instance with a preconfigured S3 client. This can be used for testing the outside of the Lambda environment. - /// - /// - public CustomConfigHandler(IAmazonConfigService configService) - { - this.ConfigService = configService; - } - - /// - /// This method is called for every Lambda invocation. This method takes in an Config event object and can be used - /// to respond to Config notifications. - /// - /// - /// - /// Nothing - public async Task FunctionHandler(ConfigEvent evnt, ILambdaContext context) - { - Console.WriteLine("inside function handler..."); - Amazon.RegionEndpoint region = Amazon.RegionEndpoint.GetBySystemName(System.Environment.GetEnvironmentVariable(AWS_REGION_PROPERTY)); - AmazonConfigServiceClient configServiceClient = new AmazonConfigServiceClient(region); - await DoHandle(evnt, context, configServiceClient); - } - - private async Task DoHandle(ConfigEvent configEvent, ILambdaContext context, AmazonConfigServiceClient configServiceClient) - { - JObject ruleParamsObj; - JObject configItem; - - if (configEvent.RuleParameters != null){ - ruleParamsObj = JObject.Parse(configEvent.RuleParameters.ToString()); - } else { - ruleParamsObj = new JObject(); - } - - JObject invokingEventObj = JObject.Parse(configEvent.InvokingEvent.ToString()); - if(invokingEventObj["configurationItem"] != null){ - configItem = JObject.Parse(invokingEventObj[CONFIGURATION_ITEM].ToString()); - } else { - configItem = new JObject(); - } - - FailForIncompatibleEventTypes(invokingEventObj); - ComplianceType myCompliance = ComplianceType.NOT_APPLICABLE; - - if (!IsEventNotApplicable(configItem, configEvent.EventLeftScope)) - { - myCompliance = RuleCode.EvaluateCompliance(invokingEventObj, ruleParamsObj, context); - } - - // Associates the evaluation result with the AWS account published in the event. - Evaluation evaluation = new Evaluation { - ComplianceResourceId = GetResourceId(configItem), - ComplianceResourceType = GetResourceType(configItem), - OrderingTimestamp = GetCiCapturedTime(configItem), - ComplianceType = myCompliance - }; - - await DoPutEvaluations(configServiceClient, configEvent, evaluation); - } - - private String GetResourceType(JObject configItem) - { - return (String) configItem[RESOURCE_TYPE]; - } - - private void FailForIncompatibleEventTypes(JObject invokingEventObj) - { - String messageType = (String) invokingEventObj[MESSAGE_TYPE_PROPERTY]; - if (!IsCompatibleMessageType(messageType)) - { - throw new Exception(String.Format("Events with the message type '{0}' are not evaluated for this Config rule.", messageType)); - } - } - - private String GetResourceId(JObject configItem) - { - return (String) configItem[RESOURCE_ID]; - } - - private DateTime GetCiCapturedTime(JObject configItem) - { - return DateTime.Parse((String) configItem[CAPTURE_TIME_PATH]); - } - - private bool IsCompatibleMessageType(String messageType) - { - return String.Equals(MessageType.ConfigurationItemChangeNotification.ToString(), messageType); - } - - private bool IsEventNotApplicable(JObject configItem, bool eventLeftScope) - { - String status = configItem[STATUS_PATH].ToString(); - return (IsStatusNotApplicable(status) || eventLeftScope); - } - - private bool IsStatusNotApplicable(String status) - { - return String.Equals(RESOURCE_DELETED, status) - || String.Equals(RESOURCE_DELETED_NOT_RECORDED, status) - || String.Equals(RESOURCE_NOT_RECORDED, status); - } - - // Sends the evaluation results to AWS Config. - private async Task DoPutEvaluations(AmazonConfigServiceClient configClient, ConfigEvent configEvent, Evaluation evaluation) - { - Console.WriteLine("inside DoPutEvaluations..."); - PutEvaluationsRequest req = new PutEvaluationsRequest(); - req.Evaluations.Add(evaluation); - req.ResultToken = configEvent.ResultToken; - - - Task taskResp = configClient.PutEvaluationsAsync(req); - PutEvaluationsResponse response = await taskResp; - - // Ends the function execution if any evaluation results are not successfully reported. - if (response.FailedEvaluations.Count > 0) { - throw new Exception(String.Format( - "The following evaluations were not successfully reported to AWS Config: %s", - response.FailedEvaluations)); - } - } - - private DateTime GetDate(String dateString) - { - return DateTime.Parse(dateString, null, System.Globalization.DateTimeStyles.RoundtripKind); - } - - static void Main(string[] args) - { - Console.WriteLine("Hello World!"); - } - } -} diff --git a/rdk/template/runtime/dotnetcore1.0/RuleCode.cs b/rdk/template/runtime/dotnetcore1.0/RuleCode.cs deleted file mode 100755 index 4d376498..00000000 --- a/rdk/template/runtime/dotnetcore1.0/RuleCode.cs +++ /dev/null @@ -1,27 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Lambda.Core; -using Amazon.Lambda.Model; -using Amazon.Lambda.ConfigEvents; -using Newtonsoft.Json.Linq; - -namespace Rdk -{ - class RuleCode - { - public static ComplianceType EvaluateCompliance(JObject invokingEvent, JObject ruleParameters, ILambdaContext context) - { - context.Logger.LogLine("Beginning Custom Config Rule Evaluation"); - - /* - YOUR CODE GOES HERE! - */ - - return ComplianceType.NON_COMPLIANT; - } - } -} diff --git a/rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json b/rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json deleted file mode 100755 index 7cf6db07..00000000 --- a/rdk/template/runtime/dotnetcore1.0/aws-lambda-tools-defaults.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Information": [ - "This file provides default values for the deployment wizard inside Visual Studio and the AWS Lambda commands added to the .NET Core CLI.", - "To learn more about the Lambda commands with the .NET Core CLI execute the following command at the command line in the project root directory.", - - "dotnet lambda help", - - "All the command line options for the Lambda command can be specified in this file." - ], - - "profile":"default", - "region" : "us-west-2", - "configuration": "Release", - "framework": "netcoreapp1.0", - "function-runtime": "dotnetcore1.0", - "function-memory-size": 256, - "function-timeout": 30, - "function-handler": "csharp7.0::Rdk.CustomConfigHandler::FunctionHandler" -} diff --git a/rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj b/rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj deleted file mode 100644 index ecbeb144..00000000 --- a/rdk/template/runtime/dotnetcore1.0/csharp7.0.csproj +++ /dev/null @@ -1,28 +0,0 @@ - - - - netcoreapp1.0 - - - Exe - - - - - - - - - - - - - - - - - - - - - diff --git a/rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs b/rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs deleted file mode 100644 index d5fefc36..00000000 --- a/rdk/template/runtime/dotnetcore2.0/CustomConfigHandler.cs +++ /dev/null @@ -1,189 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.IO; -using System.Text; - -using System.Threading.Tasks; - -using Amazon.Lambda.Serialization.Json; -using Amazon.Lambda.Core; - -using Amazon.Lambda.ConfigEvents; -using Amazon.CloudWatchEvents; -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Runtime; -using Amazon.Lambda.Model; -using Newtonsoft.Json.Linq; - -// Assembly attribute to enable the Lambda function's JSON input to be converted into a .NET class. -[assembly: LambdaSerializer(typeof(Amazon.Lambda.Serialization.Json.JsonSerializer))] - -namespace Rdk -{ - public class CustomConfigHandler - { - public const String AWS_REGION_PROPERTY = "AWS_DEFAULT_REGION"; - public const String MESSAGE_TYPE_PROPERTY = "messageType"; - public const String HOST_ID = "hostId"; - public const String PLACEMENT = "placement"; - public const String CONFIGURATION = "configuration"; - public const String IMAGE_ID = "imageId"; - public const String STATUS_PATH = "configurationItemStatus"; - public const String TENANCY = "tenancy"; - public const String RESOURCE_DELETED = "ResourceDeleted"; - public const String RESOURCE_DELETED_NOT_RECORDED = "ResourceDeletedNotRecorded"; - public const String CAPTURE_TIME_PATH = "configurationItemCaptureTime"; - public const String CONFIGURATION_ITEM = "configurationItem"; - public const String RESOURCE_ID = "resourceId"; - public const String RESOURCE_NOT_RECORDED = "ResourceNotRecorded"; - public const String RESOURCE_TYPE = "resourceType"; - - - IAmazonConfigService ConfigService { get; set; } - - /// - /// Default constructor. This constructor is used by Lambda to construct the instance. When invoked in a Lambda environment - /// the AWS credentials will come from the IAM role associated with the function and the AWS region will be set to the - /// region the Lambda function is executed in. - /// - public CustomConfigHandler() - { - Console.WriteLine("inside constructor..."); - } - - /// - /// Constructs an instance with a preconfigured S3 client. This can be used for testing the outside of the Lambda environment. - /// - /// - public CustomConfigHandler(IAmazonConfigService configService) - { - this.ConfigService = configService; - } - - /// - /// This method is called for every Lambda invocation. This method takes in an Config event object and can be used - /// to respond to Config notifications. - /// - /// - /// - /// Nothing - public async Task FunctionHandler(ConfigEvent evnt, ILambdaContext context) - { - Console.WriteLine("inside function handler..."); - Amazon.RegionEndpoint region = Amazon.RegionEndpoint.GetBySystemName(System.Environment.GetEnvironmentVariable(AWS_REGION_PROPERTY)); - AmazonConfigServiceClient configServiceClient = new AmazonConfigServiceClient(region); - await DoHandle(evnt, context, configServiceClient); - } - - private async Task DoHandle(ConfigEvent configEvent, ILambdaContext context, AmazonConfigServiceClient configServiceClient) - { - JObject ruleParamsObj; - JObject configItem; - - if (configEvent.RuleParameters != null){ - ruleParamsObj = JObject.Parse(configEvent.RuleParameters.ToString()); - } else { - ruleParamsObj = new JObject(); - } - - JObject invokingEventObj = JObject.Parse(configEvent.InvokingEvent.ToString()); - if(invokingEventObj["configurationItem"] != null){ - configItem = JObject.Parse(invokingEventObj[CONFIGURATION_ITEM].ToString()); - } else { - configItem = new JObject(); - } - - FailForIncompatibleEventTypes(invokingEventObj); - ComplianceType myCompliance = ComplianceType.NOT_APPLICABLE; - - if (!IsEventNotApplicable(configItem, configEvent.EventLeftScope)) - { - myCompliance = RuleCode.EvaluateCompliance(invokingEventObj, ruleParamsObj, context); - } - - // Associates the evaluation result with the AWS account published in the event. - Evaluation evaluation = new Evaluation { - ComplianceResourceId = GetResourceId(configItem), - ComplianceResourceType = GetResourceType(configItem), - OrderingTimestamp = GetCiCapturedTime(configItem), - ComplianceType = myCompliance - }; - - await DoPutEvaluations(configServiceClient, configEvent, evaluation); - } - - private String GetResourceType(JObject configItem) - { - return (String) configItem[RESOURCE_TYPE]; - } - - private void FailForIncompatibleEventTypes(JObject invokingEventObj) - { - String messageType = (String) invokingEventObj[MESSAGE_TYPE_PROPERTY]; - if (!IsCompatibleMessageType(messageType)) - { - throw new Exception(String.Format("Events with the message type '{0}' are not evaluated for this Config rule.", messageType)); - } - } - - private String GetResourceId(JObject configItem) - { - return (String) configItem[RESOURCE_ID]; - } - - private DateTime GetCiCapturedTime(JObject configItem) - { - return DateTime.Parse((String) configItem[CAPTURE_TIME_PATH]); - } - - private bool IsCompatibleMessageType(String messageType) - { - return String.Equals(MessageType.ConfigurationItemChangeNotification.ToString(), messageType); - } - - private bool IsEventNotApplicable(JObject configItem, bool eventLeftScope) - { - String status = configItem[STATUS_PATH].ToString(); - return (IsStatusNotApplicable(status) || eventLeftScope); - } - - private bool IsStatusNotApplicable(String status) - { - return String.Equals(RESOURCE_DELETED, status) - || String.Equals(RESOURCE_DELETED_NOT_RECORDED, status) - || String.Equals(RESOURCE_NOT_RECORDED, status); - } - - // Sends the evaluation results to AWS Config. - private async Task DoPutEvaluations(AmazonConfigServiceClient configClient, ConfigEvent configEvent, Evaluation evaluation) - { - Console.WriteLine("inside DoPutEvaluations..."); - PutEvaluationsRequest req = new PutEvaluationsRequest(); - req.Evaluations.Add(evaluation); - req.ResultToken = configEvent.ResultToken; - - - Task taskResp = configClient.PutEvaluationsAsync(req); - PutEvaluationsResponse response = await taskResp; - - // Ends the function execution if any evaluation results are not successfully reported. - if (response.FailedEvaluations.Count > 0) { - throw new Exception(String.Format( - "The following evaluations were not successfully reported to AWS Config: %s", - response.FailedEvaluations)); - } - } - - private DateTime GetDate(String dateString) - { - return DateTime.Parse(dateString, null, System.Globalization.DateTimeStyles.RoundtripKind); - } - - static void Main(string[] args) - { - Console.WriteLine("Hello World!"); - } - } -} diff --git a/rdk/template/runtime/dotnetcore2.0/RuleCode.cs b/rdk/template/runtime/dotnetcore2.0/RuleCode.cs deleted file mode 100644 index 4d376498..00000000 --- a/rdk/template/runtime/dotnetcore2.0/RuleCode.cs +++ /dev/null @@ -1,27 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -using Amazon.ConfigService.Model; -using Amazon.ConfigService; -using Amazon.Lambda.Core; -using Amazon.Lambda.Model; -using Amazon.Lambda.ConfigEvents; -using Newtonsoft.Json.Linq; - -namespace Rdk -{ - class RuleCode - { - public static ComplianceType EvaluateCompliance(JObject invokingEvent, JObject ruleParameters, ILambdaContext context) - { - context.Logger.LogLine("Beginning Custom Config Rule Evaluation"); - - /* - YOUR CODE GOES HERE! - */ - - return ComplianceType.NON_COMPLIANT; - } - } -} diff --git a/rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json b/rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json deleted file mode 100644 index 7cf6db07..00000000 --- a/rdk/template/runtime/dotnetcore2.0/aws-lambda-tools-defaults.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Information": [ - "This file provides default values for the deployment wizard inside Visual Studio and the AWS Lambda commands added to the .NET Core CLI.", - "To learn more about the Lambda commands with the .NET Core CLI execute the following command at the command line in the project root directory.", - - "dotnet lambda help", - - "All the command line options for the Lambda command can be specified in this file." - ], - - "profile":"default", - "region" : "us-west-2", - "configuration": "Release", - "framework": "netcoreapp1.0", - "function-runtime": "dotnetcore1.0", - "function-memory-size": 256, - "function-timeout": 30, - "function-handler": "csharp7.0::Rdk.CustomConfigHandler::FunctionHandler" -} diff --git a/rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj b/rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj deleted file mode 100644 index 6cb53d99..00000000 --- a/rdk/template/runtime/dotnetcore2.0/csharp7.0.csproj +++ /dev/null @@ -1,28 +0,0 @@ - - - - netcoreapp2.0 - - - Exe - - - - - - - - - - - - - - - - - - - - - diff --git a/rdk/template/runtime/nodejs4.3/rule_code.js b/rdk/template/runtime/nodejs4.3/rule_code.js deleted file mode 100644 index 5f9c9bd8..00000000 --- a/rdk/template/runtime/nodejs4.3/rule_code.js +++ /dev/null @@ -1,183 +0,0 @@ -'use strict'; - -const aws = require('aws-sdk'); - -const config = new aws.ConfigService(); - -function evaluateCompliance(configurationItem, ruleParameters, callback) { - - /* - ############################### - # Add your custom logic here. # - ############################### - */ - - callback('NOT_APPLICABLE'); -} - -//Boilerplate Code - You should not need to change anything below this comment. -function rule_handler(event, context, callback) { - //console.info(event); - const invokingEvent = JSON.parse(event.invokingEvent); - const configItem = invokingEvent.configurationItem; - const ruleParameters = JSON.parse(event.ruleParameters); - evaluateCompliance(configItem, ruleParameters, function(results){ - console.log(results); - callback(null, results); - }); -} - -// Helper function used to validate input -function checkDefined(reference, referenceName) { - if (!reference) { - throw new Error(`Error: ${referenceName} is not defined`); - } - return reference; -} - -// Check whether the message is OversizedConfigurationItemChangeNotification or not -function isOverSizedChangeNotification(messageType) { - checkDefined(messageType, 'messageType'); - return messageType === 'OversizedConfigurationItemChangeNotification'; -} - -// Check whether the message is a ScheduledNotification or not -function isScheduledNotification(messageType) { - checkDefined(messageType, 'messageType'); - return messageType === 'ScheduledNotification' -} - -// Get configurationItem using getResourceConfigHistory API. -function getConfiguration(resourceType, resourceId, configurationCaptureTime, callback) { - config.getResourceConfigHistory({ resourceType, resourceId, laterTime: new Date(configurationCaptureTime), limit: 1 }, (err, data) => { - if (err) { - callback(err, null); - } - const configurationItem = data.configurationItems[0]; - callback(null, configurationItem); - }); -} - -// Convert from the API model to the original invocation model -/*eslint no-param-reassign: ["error", { "props": false }]*/ -function convertApiConfiguration(apiConfiguration) { - apiConfiguration.awsAccountId = apiConfiguration.accountId; - apiConfiguration.ARN = apiConfiguration.arn; - apiConfiguration.configurationStateMd5Hash = apiConfiguration.configurationItemMD5Hash; - apiConfiguration.configurationItemVersion = apiConfiguration.version; - apiConfiguration.configuration = JSON.parse(apiConfiguration.configuration); - if ({}.hasOwnProperty.call(apiConfiguration, 'relationships')) { - for (let i = 0; i < apiConfiguration.relationships.length; i++) { - apiConfiguration.relationships[i].name = apiConfiguration.relationships[i].relationshipName; - } - } - return apiConfiguration; -} - -// Based on the type of message get the configuration item either from configurationItem in the invoking event or using the getResourceConfigHistiry API in getConfiguration function. -function getConfigurationItem(invokingEvent, callback) { - checkDefined(invokingEvent, 'invokingEvent'); - if (isOverSizedChangeNotification(invokingEvent.messageType)) { - const configurationItemSummary = checkDefined(invokingEvent.configurationItemSummary, 'configurationItemSummary'); - getConfiguration(configurationItemSummary.resourceType, configurationItemSummary.resourceId, configurationItemSummary.configurationItemCaptureTime, (err, apiConfigurationItem) => { - if (err) { - callback(err); - } - const configurationItem = convertApiConfiguration(apiConfigurationItem); - callback(null, configurationItem); - }); - } else if (isScheduledNotification(invokingEvent.messageType)) { - callback(null, null) - } else { - checkDefined(invokingEvent.configurationItem, 'configurationItem'); - callback(null, invokingEvent.configurationItem); - } -} - -// Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. -function isApplicable(configurationItem, event) { - //checkDefined(configurationItem, 'configurationItem'); - checkDefined(event, 'event'); - //const status = configurationItem.configurationItemStatus; - const eventLeftScope = event.eventLeftScope; - //return (status === 'OK' || status === 'ResourceDiscovered') && eventLeftScope === false; - return (eventLeftScope === false); -} - -// This is the handler that's invoked by Lambda -// Most of this code is boilerplate; use as is -exports.lambda_handler = function(event, context, callback) { - checkDefined(event, 'event'); - const invokingEvent = JSON.parse(event.invokingEvent); - const ruleParameters = JSON.parse(event.ruleParameters); - getConfigurationItem(invokingEvent, (err, configurationItem) => { - if (err) { - callback(err); - } - //let compliance = 'NOT_APPLICABLE'; - if (isApplicable(configurationItem, event)) { - invokingEvent.configurationItem = configurationItem; - event.invokingEvent = JSON.stringify(invokingEvent); - rule_handler(event, context, (err, compliance_results) => { - if (err) { - callback(err); - } - //compliance = computedCompliance; - var putEvaluationsRequest = {}; - - // Put together the request that reports the evaluation status - if (typeof compliance_results === 'string' || compliance_results instanceof String){ - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: compliance_results, - OrderingTimestamp: configurationItem.configurationItemCaptureTime - } - ]; - } else if (compliance_results instanceof Array) { - putEvaluationsRequest.Evaluations = []; - - var fields = ['ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp']; - - for (var i = 0; i < compliance_results.length; i++) { - var missing_fields = false; - for (var j = 0; j < fields.length; j++) { - if (!compliance_results[i].hasOwnProperty(fields[j])) { - console.info("Missing " + fields[j] + " from custom evaluation."); - missing_fields = true; - } - } - - if (!missing_fields){ - putEvaluationsRequest.Evaluations.push(compliance_results[i]); - } - } - } else { - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: 'INSUFFICIENT_DATA', - OrderingTimestamp: configurationItem.configurationItemCaptureTime - } - ]; - } - - putEvaluationsRequest.ResultToken = event.resultToken; - - // Invoke the Config API to report the result of the evaluation - config.putEvaluations(putEvaluationsRequest, (error, data) => { - if (error) { - callback(error, null); - } else if (data.FailedEvaluations.length > 0) { - // Ends the function execution if any evaluation results are not successfully reported. - callback(JSON.stringify(data), null); - } else { - callback(null, data); - } - }); - }); - } - }); -}; diff --git a/rdk/template/runtime/nodejs6.10/rule_code.js b/rdk/template/runtime/nodejs6.10/rule_code.js deleted file mode 100644 index 9d45e5a2..00000000 --- a/rdk/template/runtime/nodejs6.10/rule_code.js +++ /dev/null @@ -1,215 +0,0 @@ -"use strict"; - -const aws = require("aws-sdk"); - -const config = new aws.ConfigService(); - -function evaluateCompliance(configurationItem, ruleParameters, callback) { - /* - ############################### - # Add your custom logic here. # - ############################### - */ - - callback("NOT_APPLICABLE"); -} - -//Boilerplate Code - You should not need to change anything below this comment. -function rule_handler(event, context, callback) { - //console.info(event); - const invokingEvent = JSON.parse(event.invokingEvent); - const configItem = invokingEvent.configurationItem; - const ruleParameters = JSON.parse(event.ruleParameters); - evaluateCompliance(configItem, ruleParameters, function (results) { - console.log(results); - callback(null, results); - }); -} - -// Helper function used to validate input -function checkDefined(reference, referenceName) { - if (!reference) { - throw new Error(`Error: ${referenceName} is not defined`); - } - return reference; -} - -// Check whether the message is OversizedConfigurationItemChangeNotification or not -function isOverSizedChangeNotification(messageType) { - checkDefined(messageType, "messageType"); - return messageType === "OversizedConfigurationItemChangeNotification"; -} - -// Check whether the message is a ScheduledNotification or not -function isScheduledNotification(messageType) { - checkDefined(messageType, "messageType"); - return messageType === "ScheduledNotification"; -} - -// Get configurationItem using getResourceConfigHistory API. -function getConfiguration( - resourceType, - resourceId, - configurationCaptureTime, - callback -) { - config.getResourceConfigHistory( - { - resourceType, - resourceId, - laterTime: new Date(configurationCaptureTime), - limit: 1, - }, - (err, data) => { - if (err) { - callback(err, null); - } - const configurationItem = data.configurationItems[0]; - callback(null, configurationItem); - } - ); -} - -// Convert from the API model to the original invocation model -/*eslint no-param-reassign: ["error", { "props": false }]*/ -function convertApiConfiguration(apiConfiguration) { - apiConfiguration.awsAccountId = apiConfiguration.accountId; - apiConfiguration.ARN = apiConfiguration.arn; - apiConfiguration.configurationStateMd5Hash = - apiConfiguration.configurationItemMD5Hash; - apiConfiguration.configurationItemVersion = apiConfiguration.version; - apiConfiguration.configuration = JSON.parse(apiConfiguration.configuration); - if ({}.hasOwnProperty.call(apiConfiguration, "relationships")) { - for (let i = 0; i < apiConfiguration.relationships.length; i++) { - apiConfiguration.relationships[i].name = - apiConfiguration.relationships[i].relationshipName; - } - } - return apiConfiguration; -} - -// Based on the type of message get the configuration item either from configurationItem in the invoking event or using the getResourceConfigHistiry API in getConfiguration function. -function getConfigurationItem(invokingEvent, callback) { - checkDefined(invokingEvent, "invokingEvent"); - if (isOverSizedChangeNotification(invokingEvent.messageType)) { - const configurationItemSummary = checkDefined( - invokingEvent.configurationItemSummary, - "configurationItemSummary" - ); - getConfiguration( - configurationItemSummary.resourceType, - configurationItemSummary.resourceId, - configurationItemSummary.configurationItemCaptureTime, - (err, apiConfigurationItem) => { - if (err) { - callback(err); - } - const configurationItem = convertApiConfiguration(apiConfigurationItem); - callback(null, configurationItem); - } - ); - } else if (isScheduledNotification(invokingEvent.messageType)) { - callback(null, null); - } else { - checkDefined(invokingEvent.configurationItem, "configurationItem"); - callback(null, invokingEvent.configurationItem); - } -} - -// Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. -function isApplicable(configurationItem, event) { - //checkDefined(configurationItem, 'configurationItem'); - checkDefined(event, "event"); - //const status = configurationItem.configurationItemStatus; - const eventLeftScope = event.eventLeftScope; - //return (status === 'OK' || status === 'ResourceDiscovered') && eventLeftScope === false; - return eventLeftScope === false; -} - -// This is the handler that's invoked by Lambda -// Most of this code is boilerplate; use as is -exports.lambda_handler = function (event, context, callback) { - checkDefined(event, "event"); - const invokingEvent = JSON.parse(event.invokingEvent); - const ruleParameters = JSON.parse(event.ruleParameters); - getConfigurationItem(invokingEvent, (err, configurationItem) => { - if (err) { - callback(err); - } - //let compliance = 'NOT_APPLICABLE'; - if (isApplicable(configurationItem, event)) { - invokingEvent.configurationItem = configurationItem; - event.invokingEvent = JSON.stringify(invokingEvent); - rule_handler(event, context, (err, compliance_results) => { - if (err) { - callback(err); - } - //compliance = computedCompliance; - var putEvaluationsRequest = {}; - - // Put together the request that reports the evaluation status - if ( - typeof compliance_results === "string" || - compliance_results instanceof String - ) { - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: compliance_results, - OrderingTimestamp: configurationItem.configurationItemCaptureTime, - }, - ]; - } else if (compliance_results instanceof Array) { - putEvaluationsRequest.Evaluations = []; - - var fields = [ - "ComplianceResourceType", - "ComplianceResourceId", - "ComplianceType", - "OrderingTimestamp", - ]; - - for (var i = 0; i < compliance_results.length; i++) { - var missing_fields = false; - for (var j = 0; j < fields.length; j++) { - if (!compliance_results[i].hasOwnProperty(fields[j])) { - console.info( - "Missing " + fields[j] + " from custom evaluation." - ); - missing_fields = true; - } - } - - if (!missing_fields) { - putEvaluationsRequest.Evaluations.push(compliance_results[i]); - } - } - } else { - putEvaluationsRequest.Evaluations = [ - { - ComplianceResourceType: configurationItem.resourceType, - ComplianceResourceId: configurationItem.resourceId, - ComplianceType: "INSUFFICIENT_DATA", - OrderingTimestamp: configurationItem.configurationItemCaptureTime, - }, - ]; - } - - putEvaluationsRequest.ResultToken = event.resultToken; - - // Invoke the Config API to report the result of the evaluation - config.putEvaluations(putEvaluationsRequest, (error, data) => { - if (error) { - callback(error, null); - } else if (data.FailedEvaluations.length > 0) { - // Ends the function execution if any evaluation results are not successfully reported. - callback(JSON.stringify(data), null); - } else { - callback(null, data); - } - }); - }); - } - }); -}; diff --git a/rdk/template/runtime/python3.10-lib/rule_code.py b/rdk/template/runtime/python3.10-lib/rule_code.py new file mode 100644 index 00000000..90fdebe2 --- /dev/null +++ b/rdk/template/runtime/python3.10-lib/rule_code.py @@ -0,0 +1,25 @@ +from rdklib import Evaluator, Evaluation, ConfigRule, ComplianceType +<%ApplicableResources1%> +class <%RuleName%>(ConfigRule): + def evaluate_change(self, event, client_factory, configuration_item, valid_rule_parameters): + ############################### + # Add your custom logic here. # + ############################### + + return [Evaluation(ComplianceType.NOT_APPLICABLE)] + + #def evaluate_periodic(self, event, client_factory, valid_rule_parameters): + # pass + + def evaluate_parameters(self, rule_parameters): + valid_rule_parameters = rule_parameters + return valid_rule_parameters + + +################################ +# DO NOT MODIFY ANYTHING BELOW # +################################ +def lambda_handler(event, context): + my_rule = <%RuleName%>() + evaluator = Evaluator(my_rule<%ApplicableResources2%>) + return evaluator.handle(event, context) diff --git a/rdk/template/runtime/python3.10-lib/rule_test.py b/rdk/template/runtime/python3.10-lib/rule_test.py new file mode 100644 index 00000000..db0cf30c --- /dev/null +++ b/rdk/template/runtime/python3.10-lib/rule_test.py @@ -0,0 +1,157 @@ +import datetime +import json +import logging +import unittest +from unittest.mock import patch, MagicMock +from botocore.exceptions import ClientError +from rdklib import Evaluation, ComplianceType +import rdklibtest + +############## +# Parameters # +############## + +# Define the default resource to report to Config Rules +# TODO - Replace with your resource type +RESOURCE_TYPE = "AWS::IAM::Role" + +############# +# Main Code # +############# + +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() + +CLIENT_FACTORY = MagicMock() + +# example for mocking IAM API calls +IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() + + +def mock_get_client(client_name, *args, **kwargs): + if client_name == "iam": + return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK + raise Exception("Attempting to create an unknown client") + + +@patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) +class ComplianceTest(unittest.TestCase): + rule_parameters = { + "SomeParameterKey": "SomeParameterValue", + "SomeParameterKey2": "SomeParameterValue2", + } + + role_sample_configuration_abridged = {"arn": "some-arn", "roleName": "testrole"} + + invoking_event_iam_role_sample = { + "configurationItem": { + "relatedEvents": [], + "relationships": [], + "configuration": role_sample_configuration_abridged, + "tags": {}, + "configurationItemCaptureTime": "2018-07-02T03:37:52.418Z", + "awsAccountId": "123456789012", + "configurationItemStatus": "ResourceDiscovered", + "resourceType": "AWS::IAM::Role", + "resourceId": "some-resource-id", + "resourceName": "some-resource-name", + "ARN": "some-arn", + }, + "notificationCreationTime": "2018-07-02T23:05:34.445Z", + "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", + } + + list_roles_response = { + "Roles": [ + { + "Path": "/", + "RoleName": "testrole", + "RoleId": "some-role-id", + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), + "Description": "this is a test role", + "MaxSessionDuration": 123, + "Tags": [ + {"Key": "one_tag", "Value": "its_value"}, + ], + "RoleLastUsed": { + "LastUsedDate": datetime.datetime(2015, 1, 1), + "Region": "us-east-1", + }, + }, + ] + } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} + + def setUp(self): + STS_CLIENT_MOCK.reset_mock() + + def test_sample(self): + self.assertTrue(True) + + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/rdk/template/runtime/python3.10/rule_code.py b/rdk/template/runtime/python3.10/rule_code.py new file mode 100644 index 00000000..682297b0 --- /dev/null +++ b/rdk/template/runtime/python3.10/rule_code.py @@ -0,0 +1,437 @@ +import json +import sys +import datetime +import boto3 +import botocore + +try: + import liblogging +except ImportError: + pass + +############## +# Parameters # +############## + +# Define the default resource to report to Config Rules +DEFAULT_RESOURCE_TYPE = "AWS::::Account" + +# Set to True to get the lambda to assume the Role attached on the Config Service (useful for cross-account). +ASSUME_ROLE_MODE = False + +# Other parameters (no change needed) +CONFIG_ROLE_TIMEOUT_SECONDS = 900 + +############# +# Main Code # +############# + + +def evaluate_compliance(event, configuration_item, valid_rule_parameters): + """Form the evaluation(s) to be return to Config Rules + + Return either: + None -- when no result needs to be displayed + a string -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE + a dictionary -- the evaluation dictionary, usually built by build_evaluation_from_config_item() + a list of dictionary -- a list of evaluation dictionary , usually built by build_evaluation() + + Keyword arguments: + event -- the event variable given in the lambda handler + configuration_item -- the configurationItem dictionary in the invokingEvent + valid_rule_parameters -- the output of the evaluate_parameters() representing validated parameters of the Config Rule + + Advanced Notes: + 1 -- if a resource is deleted and generate a configuration change with ResourceDeleted status, the Boilerplate code will put a NOT_APPLICABLE on this resource automatically. + 2 -- if a None or a list of dictionary is returned, the old evaluation(s) which are not returned in the new evaluation list are returned as NOT_APPLICABLE by the Boilerplate code + 3 -- if None or an empty string, list or dict is returned, the Boilerplate code will put a "shadow" evaluation to feedback that the evaluation took place properly + """ + + ############################### + # Add your custom logic here. # + ############################### + + return "NOT_APPLICABLE" + + +def evaluate_parameters(rule_parameters): + """Evaluate the rule parameters dictionary validity. Raise a ValueError for invalid parameters. + + Return: + anything suitable for the evaluate_compliance() + + Keyword arguments: + rule_parameters -- the Key/Value dictionary of the Config Rules parameters + """ + valid_rule_parameters = rule_parameters + return valid_rule_parameters + + +#################### +# Helper Functions # +#################### + +# Build an error to be displayed in the logs when the parameter is invalid. +def build_parameters_value_error_response(ex): + """Return an error dictionary when the evaluate_parameters() raises a ValueError. + + Keyword arguments: + ex -- Exception text + """ + return build_error_response( + internal_error_message="Parameter value is invalid", + internal_error_details="An ValueError was raised during the validation of the Parameter value", + customer_error_code="InvalidParameterValueException", + customer_error_message=str(ex), + ) + + +# This gets the client after assuming the Config service role +# either in the same AWS account or cross-account. +def get_client(service, event, region=None): + """Return the service boto client. It should be used instead of directly calling the client. + + Keyword arguments: + service -- the service name used for calling the boto.client() + event -- the event variable given in the lambda handler + region -- the region where the client is called (default: None) + """ + if not ASSUME_ROLE_MODE: + return boto3.client(service, region) + credentials = get_assume_role_credentials(get_execution_role_arn(event), region) + return boto3.client( + service, + aws_access_key_id=credentials["AccessKeyId"], + aws_secret_access_key=credentials["SecretAccessKey"], + aws_session_token=credentials["SessionToken"], + region_name=region, + ) + + +# This generates an evaluation for config +def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None): + """Form an evaluation as a dictionary. Usually suited to report on scheduled rules. + + Keyword arguments: + resource_id -- the unique id of the resource to report + compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE + event -- the event variable given in the lambda handler + resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule (default DEFAULT_RESOURCE_TYPE) + annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer. + """ + eval_cc = {} + if annotation: + eval_cc["Annotation"] = build_annotation(annotation) + eval_cc["ComplianceResourceType"] = resource_type + eval_cc["ComplianceResourceId"] = resource_id + eval_cc["ComplianceType"] = compliance_type + eval_cc["OrderingTimestamp"] = str(json.loads(event["invokingEvent"])["notificationCreationTime"]) + return eval_cc + + +def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None): + """Form an evaluation as a dictionary. Usually suited to report on configuration change rules. + + Keyword arguments: + configuration_item -- the configurationItem dictionary in the invokingEvent + compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE + annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer. + """ + eval_ci = {} + if annotation: + eval_ci["Annotation"] = build_annotation(annotation) + eval_ci["ComplianceResourceType"] = configuration_item["resourceType"] + eval_ci["ComplianceResourceId"] = configuration_item["resourceId"] + eval_ci["ComplianceType"] = compliance_type + eval_ci["OrderingTimestamp"] = configuration_item["configurationItemCaptureTime"] + return eval_ci + + +#################### +# Boilerplate Code # +#################### + +# Get execution role for Lambda function +def get_execution_role_arn(event): + role_arn = None + if "ruleParameters" in event: + rule_params = json.loads(event["ruleParameters"]) + role_name = rule_params.get("ExecutionRoleName") + if role_name: + execution_role_prefix = event["executionRoleArn"].split("/")[0] + role_arn = "{}/{}".format(execution_role_prefix, role_name) + + if not role_arn: + role_arn = event["executionRoleArn"] + + return role_arn + + +# Build annotation within Service constraints +def build_annotation(annotation_string): + if len(annotation_string) > 256: + return annotation_string[:244] + " [truncated]" + return annotation_string + + +# Helper function used to validate input +def check_defined(reference, reference_name): + if not reference: + raise Exception("Error: ", reference_name, "is not defined") + return reference + + +# Check whether the message is OversizedConfigurationItemChangeNotification or not +def is_oversized_changed_notification(message_type): + check_defined(message_type, "messageType") + return message_type == "OversizedConfigurationItemChangeNotification" + + +# Check whether the message is a ScheduledNotification or not. +def is_scheduled_notification(message_type): + check_defined(message_type, "messageType") + return message_type == "ScheduledNotification" + + +# Get configurationItem using getResourceConfigHistory API +# in case of OversizedConfigurationItemChangeNotification +def get_configuration(resource_type, resource_id, configuration_capture_time): + result = AWS_CONFIG_CLIENT.get_resource_config_history( + resourceType=resource_type, resourceId=resource_id, laterTime=configuration_capture_time, limit=1 + ) + configuration_item = result["configurationItems"][0] + return convert_api_configuration(configuration_item) + + +# Convert from the API model to the original invocation model +def convert_api_configuration(configuration_item): + for k, v in configuration_item.items(): + if isinstance(v, datetime.datetime): + configuration_item[k] = str(v) + configuration_item["awsAccountId"] = configuration_item["accountId"] + configuration_item["ARN"] = configuration_item["arn"] + configuration_item["configurationStateMd5Hash"] = configuration_item["configurationItemMD5Hash"] + configuration_item["configurationItemVersion"] = configuration_item["version"] + configuration_item["configuration"] = json.loads(configuration_item["configuration"]) + if "relationships" in configuration_item: + for i in range(len(configuration_item["relationships"])): + configuration_item["relationships"][i]["name"] = configuration_item["relationships"][i]["relationshipName"] + return configuration_item + + +# Based on the type of message get the configuration item +# either from configurationItem in the invoking event +# or using the getResourceConfigHistory API in getConfiguration function. +def get_configuration_item(invoking_event): + check_defined(invoking_event, "invokingEvent") + if is_oversized_changed_notification(invoking_event["messageType"]): + configuration_item_summary = check_defined( + invoking_event["configurationItemSummary"], "configurationItemSummary" + ) + return get_configuration( + configuration_item_summary["resourceType"], + configuration_item_summary["resourceId"], + configuration_item_summary["configurationItemCaptureTime"], + ) + if is_scheduled_notification(invoking_event["messageType"]): + return None + return check_defined(invoking_event["configurationItem"], "configurationItem") + + +# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. +def is_applicable(configuration_item, event): + try: + check_defined(configuration_item, "configurationItem") + check_defined(event, "event") + except: + return True + status = configuration_item["configurationItemStatus"] + event_left_scope = event["eventLeftScope"] + if status == "ResourceDeleted": + print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.") + + return status in ("OK", "ResourceDiscovered") and not event_left_scope + + +def get_assume_role_credentials(role_arn, region=None): + sts_client = boto3.client("sts", region) + try: + assume_role_response = sts_client.assume_role( + RoleArn=role_arn, RoleSessionName="configLambdaExecution", DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS + ) + if "liblogging" in sys.modules: + liblogging.logSession(role_arn, assume_role_response) + return assume_role_response["Credentials"] + except botocore.exceptions.ClientError as ex: + # Scrub error message for any internal account info leaks + print(str(ex)) + if "AccessDenied" in ex.response["Error"]["Code"]: + ex.response["Error"]["Message"] = "AWS Config does not have permission to assume the IAM role." + else: + ex.response["Error"]["Message"] = "InternalError" + ex.response["Error"]["Code"] = "InternalError" + raise ex + + +# This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account). +def clean_up_old_evaluations(latest_evaluations, event): + + cleaned_evaluations = [] + + old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule( + ConfigRuleName=event["configRuleName"], ComplianceTypes=["COMPLIANT", "NON_COMPLIANT"], Limit=100 + ) + + old_eval_list = [] + + while True: + for old_result in old_eval["EvaluationResults"]: + old_eval_list.append(old_result) + if "NextToken" in old_eval: + next_token = old_eval["NextToken"] + old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule( + ConfigRuleName=event["configRuleName"], + ComplianceTypes=["COMPLIANT", "NON_COMPLIANT"], + Limit=100, + NextToken=next_token, + ) + else: + break + + for old_eval in old_eval_list: + old_resource_id = old_eval["EvaluationResultIdentifier"]["EvaluationResultQualifier"]["ResourceId"] + newer_founded = False + for latest_eval in latest_evaluations: + if old_resource_id == latest_eval["ComplianceResourceId"]: + newer_founded = True + if not newer_founded: + cleaned_evaluations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event)) + + return cleaned_evaluations + latest_evaluations + + +def lambda_handler(event, context): + if "liblogging" in sys.modules: + liblogging.logEvent(event) + + global AWS_CONFIG_CLIENT + + # print(event) + check_defined(event, "event") + invoking_event = json.loads(event["invokingEvent"]) + rule_parameters = {} + if "ruleParameters" in event: + rule_parameters = json.loads(event["ruleParameters"]) + + try: + valid_rule_parameters = evaluate_parameters(rule_parameters) + except ValueError as ex: + return build_parameters_value_error_response(ex) + + try: + AWS_CONFIG_CLIENT = get_client("config", event) + if invoking_event["messageType"] in [ + "ConfigurationItemChangeNotification", + "ScheduledNotification", + "OversizedConfigurationItemChangeNotification", + ]: + configuration_item = get_configuration_item(invoking_event) + if is_applicable(configuration_item, event): + compliance_result = evaluate_compliance(event, configuration_item, valid_rule_parameters) + else: + compliance_result = "NOT_APPLICABLE" + else: + return build_internal_error_response("Unexpected message type", str(invoking_event)) + except botocore.exceptions.ClientError as ex: + if is_internal_error(ex): + return build_internal_error_response("Unexpected error while completing API request", str(ex)) + return build_error_response( + "Customer error while making API request", + str(ex), + ex.response["Error"]["Code"], + ex.response["Error"]["Message"], + ) + except ValueError as ex: + return build_internal_error_response(str(ex), str(ex)) + + evaluations = [] + latest_evaluations = [] + + if not compliance_result: + latest_evaluations.append( + build_evaluation(event["accountId"], "NOT_APPLICABLE", event, resource_type="AWS::::Account") + ) + evaluations = clean_up_old_evaluations(latest_evaluations, event) + elif isinstance(compliance_result, str): + if configuration_item: + evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result)) + else: + evaluations.append( + build_evaluation(event["accountId"], compliance_result, event, resource_type=DEFAULT_RESOURCE_TYPE) + ) + elif isinstance(compliance_result, list): + for evaluation in compliance_result: + missing_fields = False + for field in ("ComplianceResourceType", "ComplianceResourceId", "ComplianceType", "OrderingTimestamp"): + if field not in evaluation: + print("Missing " + field + " from custom evaluation.") + missing_fields = True + + if not missing_fields: + latest_evaluations.append(evaluation) + evaluations = clean_up_old_evaluations(latest_evaluations, event) + elif isinstance(compliance_result, dict): + missing_fields = False + for field in ("ComplianceResourceType", "ComplianceResourceId", "ComplianceType", "OrderingTimestamp"): + if field not in compliance_result: + print("Missing " + field + " from custom evaluation.") + missing_fields = True + if not missing_fields: + evaluations.append(compliance_result) + else: + evaluations.append(build_evaluation_from_config_item(configuration_item, "NOT_APPLICABLE")) + + # Put together the request that reports the evaluation status + result_token = event["resultToken"] + test_mode = False + if result_token == "TESTMODE": + # Used solely for RDK test to skip actual put_evaluation API call + test_mode = True + + # Invoke the Config API to report the result of the evaluation + evaluation_copy = [] + evaluation_copy = evaluations[:] + while evaluation_copy: + AWS_CONFIG_CLIENT.put_evaluations( + Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode + ) + del evaluation_copy[:100] + + # Used solely for RDK test to be able to test Lambda function + return evaluations + + +def is_internal_error(exception): + return ( + (not isinstance(exception, botocore.exceptions.ClientError)) + or exception.response["Error"]["Code"].startswith("5") + or "InternalError" in exception.response["Error"]["Code"] + or "ServiceError" in exception.response["Error"]["Code"] + ) + + +def build_internal_error_response(internal_error_message, internal_error_details=None): + return build_error_response(internal_error_message, internal_error_details, "InternalError", "InternalError") + + +def build_error_response( + internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None +): + error_response = { + "internalErrorMessage": internal_error_message, + "internalErrorDetails": internal_error_details, + "customerErrorMessage": customer_error_message, + "customerErrorCode": customer_error_code, + } + print(error_response) + return error_response diff --git a/rdk/template/runtime/python3.10/rule_test.py b/rdk/template/runtime/python3.10/rule_test.py new file mode 100644 index 00000000..e0f8c974 --- /dev/null +++ b/rdk/template/runtime/python3.10/rule_test.py @@ -0,0 +1,177 @@ +import sys +import unittest +from unittest.mock import MagicMock +import botocore + +############## +# Parameters # +############## + +# Define the default resource to report to Config Rules +DEFAULT_RESOURCE_TYPE = "AWS::::Account" + +############# +# Main Code # +############# + +CONFIG_CLIENT_MOCK = MagicMock() +STS_CLIENT_MOCK = MagicMock() + + +class Boto3Mock: + @staticmethod + def client(client_name, *args, **kwargs): + if client_name == "config": + return CONFIG_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK + raise Exception("Attempting to create an unknown client") + + +sys.modules["boto3"] = Boto3Mock() + +RULE = __import__("<%RuleName%>") + + +class ComplianceTest(unittest.TestCase): + + rule_parameters = '{"SomeParameterKey":"SomeParameterValue","SomeParameterKey2":"SomeParameterValue2"}' + + invoking_event_iam_role_sample = '{"configurationItem":{"relatedEvents":[],"relationships":[],"configuration":{},"tags":{},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","awsAccountId":"123456789012","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::IAM::Role","resourceId":"some-resource-id","resourceName":"some-resource-name","ARN":"some-arn"},"notificationCreationTime":"2018-07-02T23:05:34.445Z","messageType":"ConfigurationItemChangeNotification"}' + + def setUp(self): + pass + + def test_sample(self): + self.assertTrue(True) + + # def test_sample_2(self): + # RULE.ASSUME_ROLE_MODE = False + # response = RULE.lambda_handler(build_lambda_configurationchange_event(self.invoking_event_iam_role_sample, self.rule_parameters), {}) + # resp_expected = [] + # resp_expected.append(build_expected_response('NOT_APPLICABLE', 'some-resource-id', 'AWS::IAM::Role')) + # assert_successful_evaluation(self, response, resp_expected) + + +#################### +# Helper Functions # +#################### + + +def build_lambda_configurationchange_event(invoking_event, rule_parameters=None): + event_to_return = { + "configRuleName": "myrule", + "executionRoleArn": "roleArn", + "eventLeftScope": False, + "invokingEvent": invoking_event, + "accountId": "123456789012", + "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", + "resultToken": "token", + } + if rule_parameters: + event_to_return["ruleParameters"] = rule_parameters + return event_to_return + + +def build_lambda_scheduled_event(rule_parameters=None): + invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}' + event_to_return = { + "configRuleName": "myrule", + "executionRoleArn": "roleArn", + "eventLeftScope": False, + "invokingEvent": invoking_event, + "accountId": "123456789012", + "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", + "resultToken": "token", + } + if rule_parameters: + event_to_return["ruleParameters"] = rule_parameters + return event_to_return + + +def build_expected_response( + compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None +): + if not annotation: + return { + "ComplianceType": compliance_type, + "ComplianceResourceId": compliance_resource_id, + "ComplianceResourceType": compliance_resource_type, + } + return { + "ComplianceType": compliance_type, + "ComplianceResourceId": compliance_resource_id, + "ComplianceResourceType": compliance_resource_type, + "Annotation": annotation, + } + + +def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1): + if isinstance(response, dict): + test_class.assertEquals(resp_expected["ComplianceResourceType"], response["ComplianceResourceType"]) + test_class.assertEquals(resp_expected["ComplianceResourceId"], response["ComplianceResourceId"]) + test_class.assertEquals(resp_expected["ComplianceType"], response["ComplianceType"]) + test_class.assertTrue(response["OrderingTimestamp"]) + if "Annotation" in resp_expected or "Annotation" in response: + test_class.assertEquals(resp_expected["Annotation"], response["Annotation"]) + elif isinstance(response, list): + test_class.assertEquals(evaluations_count, len(response)) + for i, response_expected in enumerate(resp_expected): + test_class.assertEquals(response_expected["ComplianceResourceType"], response[i]["ComplianceResourceType"]) + test_class.assertEquals(response_expected["ComplianceResourceId"], response[i]["ComplianceResourceId"]) + test_class.assertEquals(response_expected["ComplianceType"], response[i]["ComplianceType"]) + test_class.assertTrue(response[i]["OrderingTimestamp"]) + if "Annotation" in response_expected or "Annotation" in response[i]: + test_class.assertEquals(response_expected["Annotation"], response[i]["Annotation"]) + + +def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None): + if customer_error_code: + test_class.assertEqual(customer_error_code, response["customerErrorCode"]) + if customer_error_message: + test_class.assertEqual(customer_error_message, response["customerErrorMessage"]) + test_class.assertTrue(response["customerErrorCode"]) + test_class.assertTrue(response["customerErrorMessage"]) + if "internalErrorMessage" in response: + test_class.assertTrue(response["internalErrorMessage"]) + if "internalErrorDetails" in response: + test_class.assertTrue(response["internalErrorDetails"]) + + +def sts_mock(): + assume_role_response = { + "Credentials": {"AccessKeyId": "string", "SecretAccessKey": "string", "SessionToken": "string"} + } + STS_CLIENT_MOCK.reset_mock(return_value=True) + STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response) + + +################## +# Common Testing # +################## + + +class TestStsErrors(unittest.TestCase): + def test_sts_unknown_error(self): + RULE.ASSUME_ROLE_MODE = True + RULE.evaluate_parameters = MagicMock(return_value=True) + STS_CLIENT_MOCK.assume_role = MagicMock( + side_effect=botocore.exceptions.ClientError( + {"Error": {"Code": "unknown-code", "Message": "unknown-message"}}, "operation" + ) + ) + response = RULE.lambda_handler(build_lambda_configurationchange_event("{}"), {}) + assert_customer_error_response(self, response, "InternalError", "InternalError") + + def test_sts_access_denied(self): + RULE.ASSUME_ROLE_MODE = True + RULE.evaluate_parameters = MagicMock(return_value=True) + STS_CLIENT_MOCK.assume_role = MagicMock( + side_effect=botocore.exceptions.ClientError( + {"Error": {"Code": "AccessDenied", "Message": "access-denied"}}, "operation" + ) + ) + response = RULE.lambda_handler(build_lambda_configurationchange_event("{}"), {}) + assert_customer_error_response( + self, response, "AccessDenied", "AWS Config does not have permission to assume the IAM role." + ) diff --git a/rdk/template/runtime/python3.6-managed/managed-rule-code/rule_code.py b/rdk/template/runtime/python3.6-managed/managed-rule-code/rule_code.py deleted file mode 100644 index 85b67ae1..00000000 --- a/rdk/template/runtime/python3.6-managed/managed-rule-code/rule_code.py +++ /dev/null @@ -1,168 +0,0 @@ -import json -import datetime - - -def evaluate_compliance(configuration_item, rule_parameters): - - ############################### - # Add your custom logic here. # - ############################### - - return "NOT_APPLICABLE" - - -# USE AS IS -# Helper function to check if rule parameters exist -def parameters_exist(parameters): - return len(parameters) != 0 - - -# Helper function used to validate input -def check_defined(reference, referenceName): - if not reference: - raise Exception("Error: ", referenceName, "is not defined") - return reference - - -# Check whether the message is OversizedConfigurationItemChangeNotification or not -def is_oversized_changed_notification(messageType): - check_defined(messageType, "messageType") - return messageType == "OversizedConfigurationItemChangeNotification" - - -# Check whether the message is a ScheduledNotification or not. -def is_scheduled_notification(messageType): - check_defined(messageType, "messageType") - return messageType == "ScheduledNotification" - - -# Get configurationItem using getResourceConfigHistory API. in case of OversizedConfigurationItemChangeNotification -def get_configuration(resourceType, resourceId, configurationCaptureTime): - result = aws_config.get_resource_config_history( - resourceType=resourceType, resourceId=resourceId, laterTime=configurationCaptureTime, limit=1 - ) - configurationItem = result["configurationItems"][0] - return convert_api_configuration(configurationItem) - - -# Convert from the API model to the original invocation model -def convert_api_configuration(configurationItem): - for k, v in configurationItem.items(): - if isinstance(v, datetime.datetime): - configurationItem[k] = str(v) - configurationItem["awsAccountId"] = configurationItem["accountId"] - configurationItem["ARN"] = configurationItem["arn"] - configurationItem["configurationStateMd5Hash"] = configurationItem["configurationItemMD5Hash"] - configurationItem["configurationItemVersion"] = configurationItem["version"] - configurationItem["configuration"] = json.loads(configurationItem["configuration"]) - if "relationships" in configurationItem: - for i in range(len(configurationItem["relationships"])): - configurationItem["relationships"][i]["name"] = configurationItem["relationships"][i]["relationshipName"] - return configurationItem - - -# Based on the type of message get the configuration item either from configurationItem in the invoking event or using the getResourceConfigHistiry API in getConfiguration function. -def get_configuration_item(invokingEvent): - check_defined(invokingEvent, "invokingEvent") - if is_oversized_changed_notification(invokingEvent["messageType"]): - configurationItemSummary = check_defined(invokingEvent["configurationItemSummary"], "configurationItemSummary") - return get_configuration( - configurationItemSummary["resourceType"], - configurationItemSummary["resourceId"], - configurationItemSummary["configurationItemCaptureTime"], - ) - elif is_scheduled_notification(invokingEvent["messageType"]): - return None - else: - return check_defined(invokingEvent["configurationItem"], "configurationItem") - - -# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. -def is_applicable(configurationItem, event): - check_defined(configurationItem, "configurationItem") - check_defined(event, "event") - status = configurationItem["configurationItemStatus"] - eventLeftScope = event["eventLeftScope"] - return (status == "OK" or status == "ResourceDiscovered") and eventLeftScope == False - - -# This decorates the lambda_handler in rule_code with the actual PutEvaluation call -def rule_handler(lambda_handler): - def handler_wrapper(event, context): - evaluations = [] - - print(event) - check_defined(event, "event") - invokingEvent = json.loads(event["invokingEvent"]) - ruleParameters = {} - if "ruleParameters" in event: - ruleParameters = json.loads(event["ruleParameters"]) - - configurationItem = get_configuration_item(invokingEvent) - - if configurationItem is None: - compliance = lambda_handler(event, context) - - if isinstance(compliance, list): - for evaluation in compliance: - missing_fields = False - for field in ( - "ComplianceResourceType", - "ComplianceResourceId", - "ComplianceType", - "OrderingTimestamp", - ): - if field not in evaluation: - print("Missing " + field + " from custom evaluation.") - missing_fields = True - - if not missing_fields: - evaluations.append(evaluation) - else: - return "NOT_APPLICABLE" - else: - invokingEvent["configurationItem"] = configurationItem - event["invokingEvent"] = json.dumps(invokingEvent) - compliance = "NOT_APPLICABLE" - - if is_applicable(configurationItem, event): - # Invoke the compliance checking function. - compliance = lambda_handler(event, context) - - evaluations = [ - { - "ComplianceResourceType": configurationItem["resourceType"], - "ComplianceResourceId": configurationItem["resourceId"], - "ComplianceType": compliance, - "OrderingTimestamp": configurationItem["configurationItemCaptureTime"], - } - ] - - # Put together the request that reports the evaluation status - - resultToken = event["resultToken"] - testMode = False - if resultToken == "TESTMODE": - # Used solely for RDK test to skip actual put_evaluation API call - testMode = True - # Invoke the Config API to report the result of the evaluation - aws_config.put_evaluations(Evaluations=evaluations, ResultToken=resultToken, TestMode=testMode) - # Used solely for RDK test to be able to test Lambda function - return evaluations - - return handler_wrapper - - -# This is the handler that's invoked by Lambda -@rule_handler -def lambda_handler(event, context): - invoking_event = json.loads(event["invokingEvent"]) - - configuration_item = None - if "configurationItem" in invoking_event: - configuration_item = invoking_event["configurationItem"] - - rule_parameters = {} - if "ruleParameters" in event: - rule_parameters = json.loads(event["ruleParameters"]) - return evaluate_compliance(configuration_item, rule_parameters) diff --git a/rdk/template/runtime/python3.6-managed/managed-rule-code/rule_util.py b/rdk/template/runtime/python3.6-managed/managed-rule-code/rule_util.py deleted file mode 100644 index 90298768..00000000 --- a/rdk/template/runtime/python3.6-managed/managed-rule-code/rule_util.py +++ /dev/null @@ -1,147 +0,0 @@ -import json -import boto3 -import datetime - -# USE ENTIRE FILE AS IS - -aws_config = boto3.client("config") - -# Helper function to check if rule parameters exist -def parameters_exist(parameters): - return len(parameters) != 0 - - -# Helper function used to validate input -def check_defined(reference, referenceName): - if not reference: - raise Exception("Error: ", referenceName, "is not defined") - return reference - - -# Check whether the message is OversizedConfigurationItemChangeNotification or not -def is_oversized_changed_notification(messageType): - check_defined(messageType, "messageType") - return messageType == "OversizedConfigurationItemChangeNotification" - - -# Check whether the message is a ScheduledNotification or not. -def is_scheduled_notification(messageType): - check_defined(messageType, "messageType") - return messageType == "ScheduledNotification" - - -# Get configurationItem using getResourceConfigHistory API. in case of OversizedConfigurationItemChangeNotification -def get_configuration(resourceType, resourceId, configurationCaptureTime): - result = aws_config.get_resource_config_history( - resourceType=resourceType, resourceId=resourceId, laterTime=configurationCaptureTime, limit=1 - ) - configurationItem = result["configurationItems"][0] - return convert_api_configuration(configurationItem) - - -# Convert from the API model to the original invocation model -def convert_api_configuration(configurationItem): - for k, v in configurationItem.items(): - if isinstance(v, datetime.datetime): - configurationItem[k] = str(v) - configurationItem["awsAccountId"] = configurationItem["accountId"] - configurationItem["ARN"] = configurationItem["arn"] - configurationItem["configurationStateMd5Hash"] = configurationItem["configurationItemMD5Hash"] - configurationItem["configurationItemVersion"] = configurationItem["version"] - configurationItem["configuration"] = json.loads(configurationItem["configuration"]) - if "relationships" in configurationItem: - for i in range(len(configurationItem["relationships"])): - configurationItem["relationships"][i]["name"] = configurationItem["relationships"][i]["relationshipName"] - return configurationItem - - -# Based on the type of message get the configuration item either from configurationItem in the invoking event or using the getResourceConfigHistiry API in getConfiguration function. -def get_configuration_item(invokingEvent): - check_defined(invokingEvent, "invokingEvent") - if is_oversized_changed_notification(invokingEvent["messageType"]): - configurationItemSummary = check_defined(invokingEvent["configurationItemSummary"], "configurationItemSummary") - return get_configuration( - configurationItemSummary["resourceType"], - configurationItemSummary["resourceId"], - configurationItemSummary["configurationItemCaptureTime"], - ) - elif is_scheduled_notification(invokingEvent["messageType"]): - return None - else: - return check_defined(invokingEvent["configurationItem"], "configurationItem") - - -# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. -def is_applicable(configurationItem, event): - check_defined(configurationItem, "configurationItem") - check_defined(event, "event") - status = configurationItem["configurationItemStatus"] - eventLeftScope = event["eventLeftScope"] - return (status == "OK" or status == "ResourceDiscovered") and eventLeftScope == False - - -# This decorates the lambda_handler in rule_code with the actual PutEvaluation call -def rule_handler(lambda_handler): - def handler_wrapper(event, context): - evaluations = [] - - print(event) - check_defined(event, "event") - invokingEvent = json.loads(event["invokingEvent"]) - ruleParameters = {} - if "ruleParameters" in event: - ruleParameters = json.loads(event["ruleParameters"]) - - configurationItem = get_configuration_item(invokingEvent) - - if configurationItem is None: - compliance = lambda_handler(event, context) - - if isinstance(compliance, list): - for evaluation in compliance: - missing_fields = False - for field in ( - "ComplianceResourceType", - "ComplianceResourceId", - "ComplianceType", - "OrderingTimestamp", - ): - if field not in evaluation: - print("Missing " + field + " from custom evaluation.") - missing_fields = True - - if not missing_fields: - evaluations.append(evaluation) - else: - return "NOT_APPLICABLE" - else: - invokingEvent["configurationItem"] = configurationItem - event["invokingEvent"] = json.dumps(invokingEvent) - compliance = "NOT_APPLICABLE" - - if is_applicable(configurationItem, event): - # Invoke the compliance checking function. - compliance = lambda_handler(event, context) - - evaluations = [ - { - "ComplianceResourceType": configurationItem["resourceType"], - "ComplianceResourceId": configurationItem["resourceId"], - "ComplianceType": compliance, - "OrderingTimestamp": configurationItem["configurationItemCaptureTime"], - } - ] - - # Put together the request that reports the evaluation status - - resultToken = event["resultToken"] - testMode = False - if resultToken == "TESTMODE": - # Used solely for RDK test to skip actual put_evaluation API call - testMode = True - # Invoke the Config API to report the result of the evaluation - aws_config.put_evaluations(Evaluations=evaluations, ResultToken=resultToken, TestMode=testMode) - # Used solely for RDK test to be able to test Lambda function - return compliance - - return handler_wrapper diff --git a/rdk/template/runtime/python3.6-managed/managed-rule-definitions/SIMPLE_DELEGATE_PREPROD_TESTING.json b/rdk/template/runtime/python3.6-managed/managed-rule-definitions/SIMPLE_DELEGATE_PREPROD_TESTING.json deleted file mode 100644 index 6e89ff79..00000000 --- a/rdk/template/runtime/python3.6-managed/managed-rule-definitions/SIMPLE_DELEGATE_PREPROD_TESTING.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "identifier": "<%Identifier%>", - "lambdaFunctionName": "<%LambdaFunctionName%>", - "defaultName": "<%defaultName%>", - "createDate": "<%CreateDate%>", - "sourceDetails": [ - { - "eventSource": "AWS_CONFIG", - "messageType": "ConfigurationItemChangeNotification" - }, - { - "eventSource": "AWS_CONFIG", - "messageType": "OversizedConfigurationItemChangeNotification" - } - ], - "compulsoryInputParameterDetails": { - "delegateFunctionArn": { - "type": "String" - } - }, - "optionalInputParameterDetails": { - "delegateFunctionParameters": { - "type": "String" - } - }, - "isReadyToUse": "false", - "labels": [] -} diff --git a/rdk/template/runtime/python3.6-managed/tst/managed-rule-code/sample_test.py b/rdk/template/runtime/python3.6-managed/tst/managed-rule-code/sample_test.py deleted file mode 100644 index 8e7cc23a..00000000 --- a/rdk/template/runtime/python3.6-managed/tst/managed-rule-code/sample_test.py +++ /dev/null @@ -1,9 +0,0 @@ -import unittest - - -class SampleTest(unittest.TestCase): - def setUp(self): - pass - - def test_sample(self): - self.assertTrue(True) diff --git a/rdk/template/runtime/python3.7-lib/rule_test.py b/rdk/template/runtime/python3.7-lib/rule_test.py index 8315d034..db0cf30c 100644 --- a/rdk/template/runtime/python3.7-lib/rule_test.py +++ b/rdk/template/runtime/python3.7-lib/rule_test.py @@ -1,7 +1,9 @@ +import datetime +import json +import logging import unittest from unittest.mock import patch, MagicMock from botocore.exceptions import ClientError -import rdklib from rdklib import Evaluation, ComplianceType import rdklibtest @@ -10,40 +12,146 @@ ############## # Define the default resource to report to Config Rules -RESOURCE_TYPE = 'AWS::::Account' +# TODO - Replace with your resource type +RESOURCE_TYPE = "AWS::IAM::Role" ############# # Main Code # ############# -MODULE = __import__('<%RuleName%>') -RULE = MODULE.<%RuleName%>() +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() CLIENT_FACTORY = MagicMock() -#example for mocking S3 API calls -S3_CLIENT_MOCK = MagicMock() +# example for mocking IAM API calls +IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() + def mock_get_client(client_name, *args, **kwargs): - if client_name == 's3': - return S3_CLIENT_MOCK + if client_name == "iam": + return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK raise Exception("Attempting to create an unknown client") -@patch.object(CLIENT_FACTORY, 'build_client', MagicMock(side_effect=mock_get_client)) + +@patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) class ComplianceTest(unittest.TestCase): + rule_parameters = { + "SomeParameterKey": "SomeParameterValue", + "SomeParameterKey2": "SomeParameterValue2", + } + + role_sample_configuration_abridged = {"arn": "some-arn", "roleName": "testrole"} - rule_parameters = '{"SomeParameterKey":"SomeParameterValue","SomeParameterKey2":"SomeParameterValue2"}' + invoking_event_iam_role_sample = { + "configurationItem": { + "relatedEvents": [], + "relationships": [], + "configuration": role_sample_configuration_abridged, + "tags": {}, + "configurationItemCaptureTime": "2018-07-02T03:37:52.418Z", + "awsAccountId": "123456789012", + "configurationItemStatus": "ResourceDiscovered", + "resourceType": "AWS::IAM::Role", + "resourceId": "some-resource-id", + "resourceName": "some-resource-name", + "ARN": "some-arn", + }, + "notificationCreationTime": "2018-07-02T23:05:34.445Z", + "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", + } - invoking_event_iam_role_sample = '{"configurationItem":{"relatedEvents":[],"relationships":[],"configuration":{},"tags":{},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","awsAccountId":"123456789012","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::IAM::Role","resourceId":"some-resource-id","resourceName":"some-resource-name","ARN":"some-arn"},"notificationCreationTime":"2018-07-02T23:05:34.445Z","messageType":"ConfigurationItemChangeNotification"}' + list_roles_response = { + "Roles": [ + { + "Path": "/", + "RoleName": "testrole", + "RoleId": "some-role-id", + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), + "Description": "this is a test role", + "MaxSessionDuration": 123, + "Tags": [ + {"Key": "one_tag", "Value": "its_value"}, + ], + "RoleLastUsed": { + "LastUsedDate": datetime.datetime(2015, 1, 1), + "Region": "us-east-1", + }, + }, + ] + } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} def setUp(self): - pass + STS_CLIENT_MOCK.reset_mock() def test_sample(self): self.assertTrue(True) - #def test_sample_2(self): - # response = MODULE.lambda_handler(rdklib.build_lambda_configurationchange_event(self.invoking_event_iam_role_sample, self.rule_parameters), {}) - # resp_expected = [] - # resp_expected.append(rdklib.build_expected_response('NOT_APPLICABLE', 'some-resource-id', 'AWS::IAM::Role')) - # rdklib.assert_successful_evaluation(self, response, resp_expected) + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/rdk/template/runtime/python3.7/rule_code.py b/rdk/template/runtime/python3.7/rule_code.py index 8063192e..682297b0 100644 --- a/rdk/template/runtime/python3.7/rule_code.py +++ b/rdk/template/runtime/python3.7/rule_code.py @@ -108,7 +108,7 @@ def get_client(service, event, region=None): ) -# This generate an evaluation for config +# This generates an evaluation for config def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None): """Form an evaluation as a dictionary. Usually suited to report on scheduled rules. @@ -221,7 +221,7 @@ def convert_api_configuration(configuration_item): # Based on the type of message get the configuration item # either from configurationItem in the invoking event -# or using the getResourceConfigHistiry API in getConfiguration function. +# or using the getResourceConfigHistory API in getConfiguration function. def get_configuration_item(invoking_event): check_defined(invoking_event, "invokingEvent") if is_oversized_changed_notification(invoking_event["messageType"]): diff --git a/rdk/template/runtime/python3.8-lib/rule_test.py b/rdk/template/runtime/python3.8-lib/rule_test.py index 8315d034..db0cf30c 100644 --- a/rdk/template/runtime/python3.8-lib/rule_test.py +++ b/rdk/template/runtime/python3.8-lib/rule_test.py @@ -1,7 +1,9 @@ +import datetime +import json +import logging import unittest from unittest.mock import patch, MagicMock from botocore.exceptions import ClientError -import rdklib from rdklib import Evaluation, ComplianceType import rdklibtest @@ -10,40 +12,146 @@ ############## # Define the default resource to report to Config Rules -RESOURCE_TYPE = 'AWS::::Account' +# TODO - Replace with your resource type +RESOURCE_TYPE = "AWS::IAM::Role" ############# # Main Code # ############# -MODULE = __import__('<%RuleName%>') -RULE = MODULE.<%RuleName%>() +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() CLIENT_FACTORY = MagicMock() -#example for mocking S3 API calls -S3_CLIENT_MOCK = MagicMock() +# example for mocking IAM API calls +IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() + def mock_get_client(client_name, *args, **kwargs): - if client_name == 's3': - return S3_CLIENT_MOCK + if client_name == "iam": + return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK raise Exception("Attempting to create an unknown client") -@patch.object(CLIENT_FACTORY, 'build_client', MagicMock(side_effect=mock_get_client)) + +@patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) class ComplianceTest(unittest.TestCase): + rule_parameters = { + "SomeParameterKey": "SomeParameterValue", + "SomeParameterKey2": "SomeParameterValue2", + } + + role_sample_configuration_abridged = {"arn": "some-arn", "roleName": "testrole"} - rule_parameters = '{"SomeParameterKey":"SomeParameterValue","SomeParameterKey2":"SomeParameterValue2"}' + invoking_event_iam_role_sample = { + "configurationItem": { + "relatedEvents": [], + "relationships": [], + "configuration": role_sample_configuration_abridged, + "tags": {}, + "configurationItemCaptureTime": "2018-07-02T03:37:52.418Z", + "awsAccountId": "123456789012", + "configurationItemStatus": "ResourceDiscovered", + "resourceType": "AWS::IAM::Role", + "resourceId": "some-resource-id", + "resourceName": "some-resource-name", + "ARN": "some-arn", + }, + "notificationCreationTime": "2018-07-02T23:05:34.445Z", + "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", + } - invoking_event_iam_role_sample = '{"configurationItem":{"relatedEvents":[],"relationships":[],"configuration":{},"tags":{},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","awsAccountId":"123456789012","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::IAM::Role","resourceId":"some-resource-id","resourceName":"some-resource-name","ARN":"some-arn"},"notificationCreationTime":"2018-07-02T23:05:34.445Z","messageType":"ConfigurationItemChangeNotification"}' + list_roles_response = { + "Roles": [ + { + "Path": "/", + "RoleName": "testrole", + "RoleId": "some-role-id", + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), + "Description": "this is a test role", + "MaxSessionDuration": 123, + "Tags": [ + {"Key": "one_tag", "Value": "its_value"}, + ], + "RoleLastUsed": { + "LastUsedDate": datetime.datetime(2015, 1, 1), + "Region": "us-east-1", + }, + }, + ] + } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} def setUp(self): - pass + STS_CLIENT_MOCK.reset_mock() def test_sample(self): self.assertTrue(True) - #def test_sample_2(self): - # response = MODULE.lambda_handler(rdklib.build_lambda_configurationchange_event(self.invoking_event_iam_role_sample, self.rule_parameters), {}) - # resp_expected = [] - # resp_expected.append(rdklib.build_expected_response('NOT_APPLICABLE', 'some-resource-id', 'AWS::IAM::Role')) - # rdklib.assert_successful_evaluation(self, response, resp_expected) + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/rdk/template/runtime/python3.8/rule_code.py b/rdk/template/runtime/python3.8/rule_code.py index 8063192e..682297b0 100644 --- a/rdk/template/runtime/python3.8/rule_code.py +++ b/rdk/template/runtime/python3.8/rule_code.py @@ -108,7 +108,7 @@ def get_client(service, event, region=None): ) -# This generate an evaluation for config +# This generates an evaluation for config def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None): """Form an evaluation as a dictionary. Usually suited to report on scheduled rules. @@ -221,7 +221,7 @@ def convert_api_configuration(configuration_item): # Based on the type of message get the configuration item # either from configurationItem in the invoking event -# or using the getResourceConfigHistiry API in getConfiguration function. +# or using the getResourceConfigHistory API in getConfiguration function. def get_configuration_item(invoking_event): check_defined(invoking_event, "invokingEvent") if is_oversized_changed_notification(invoking_event["messageType"]): diff --git a/rdk/template/runtime/python3.9-lib/rule_test.py b/rdk/template/runtime/python3.9-lib/rule_test.py index 8315d034..db0cf30c 100644 --- a/rdk/template/runtime/python3.9-lib/rule_test.py +++ b/rdk/template/runtime/python3.9-lib/rule_test.py @@ -1,7 +1,9 @@ +import datetime +import json +import logging import unittest from unittest.mock import patch, MagicMock from botocore.exceptions import ClientError -import rdklib from rdklib import Evaluation, ComplianceType import rdklibtest @@ -10,40 +12,146 @@ ############## # Define the default resource to report to Config Rules -RESOURCE_TYPE = 'AWS::::Account' +# TODO - Replace with your resource type +RESOURCE_TYPE = "AWS::IAM::Role" ############# # Main Code # ############# -MODULE = __import__('<%RuleName%>') -RULE = MODULE.<%RuleName%>() +MODULE = __import__("check_security_hub_aggregator") +RULE = MODULE.check_security_hub_aggregator() CLIENT_FACTORY = MagicMock() -#example for mocking S3 API calls -S3_CLIENT_MOCK = MagicMock() +# example for mocking IAM API calls +IAM_CLIENT_MOCK = MagicMock() +# STS client for getting account ID +STS_CLIENT_MOCK = MagicMock() + def mock_get_client(client_name, *args, **kwargs): - if client_name == 's3': - return S3_CLIENT_MOCK + if client_name == "iam": + return IAM_CLIENT_MOCK + if client_name == "sts": + return STS_CLIENT_MOCK raise Exception("Attempting to create an unknown client") -@patch.object(CLIENT_FACTORY, 'build_client', MagicMock(side_effect=mock_get_client)) + +@patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) class ComplianceTest(unittest.TestCase): + rule_parameters = { + "SomeParameterKey": "SomeParameterValue", + "SomeParameterKey2": "SomeParameterValue2", + } + + role_sample_configuration_abridged = {"arn": "some-arn", "roleName": "testrole"} - rule_parameters = '{"SomeParameterKey":"SomeParameterValue","SomeParameterKey2":"SomeParameterValue2"}' + invoking_event_iam_role_sample = { + "configurationItem": { + "relatedEvents": [], + "relationships": [], + "configuration": role_sample_configuration_abridged, + "tags": {}, + "configurationItemCaptureTime": "2018-07-02T03:37:52.418Z", + "awsAccountId": "123456789012", + "configurationItemStatus": "ResourceDiscovered", + "resourceType": "AWS::IAM::Role", + "resourceId": "some-resource-id", + "resourceName": "some-resource-name", + "ARN": "some-arn", + }, + "notificationCreationTime": "2018-07-02T23:05:34.445Z", + "messageType": "ConfigurationItemChangeNotification", + "executionRoleArn": "arn:aws:dummy", + } - invoking_event_iam_role_sample = '{"configurationItem":{"relatedEvents":[],"relationships":[],"configuration":{},"tags":{},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","awsAccountId":"123456789012","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::IAM::Role","resourceId":"some-resource-id","resourceName":"some-resource-name","ARN":"some-arn"},"notificationCreationTime":"2018-07-02T23:05:34.445Z","messageType":"ConfigurationItemChangeNotification"}' + list_roles_response = { + "Roles": [ + { + "Path": "/", + "RoleName": "testrole", + "RoleId": "some-role-id", + "Arn": "arn:aws:iam::111111111111:role/testrole", + "CreateDate": datetime.datetime(2015, 1, 1), + "Description": "this is a test role", + "MaxSessionDuration": 123, + "Tags": [ + {"Key": "one_tag", "Value": "its_value"}, + ], + "RoleLastUsed": { + "LastUsedDate": datetime.datetime(2015, 1, 1), + "Region": "us-east-1", + }, + }, + ] + } + test_account_id = "111111111111" + get_caller_identity_response = {"Account": test_account_id} def setUp(self): - pass + STS_CLIENT_MOCK.reset_mock() def test_sample(self): self.assertTrue(True) - #def test_sample_2(self): - # response = MODULE.lambda_handler(rdklib.build_lambda_configurationchange_event(self.invoking_event_iam_role_sample, self.rule_parameters), {}) - # resp_expected = [] - # resp_expected.append(rdklib.build_expected_response('NOT_APPLICABLE', 'some-resource-id', 'AWS::IAM::Role')) - # rdklib.assert_successful_evaluation(self, response, resp_expected) + # Example of how to evaluate a configuration change rule + def test_configurationchange_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + response = RULE.evaluate_change( + event=json.dumps(self.invoking_event_iam_role_sample), + client_factory=CLIENT_FACTORY, + configuration_item=self.role_sample_configuration_abridged, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + annotation="This is a configuration change rule's annotation.", + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("resourceId", None), + resourceType=RESOURCE_TYPE, + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + # Example of how to mock the client response for a list_roles API call + def test_periodic_rule(self): + # Mock any usage of get_caller_identity + STS_CLIENT_MOCK.get_caller_identity = MagicMock( + return_value=self.get_caller_identity_response + ) + IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response) + # Example of how to evaluate a periodic rule + response = RULE.evaluate_periodic( + event=rdklibtest.create_test_scheduled_event(self.rule_parameters), + client_factory=CLIENT_FACTORY, + valid_rule_parameters=json.dumps(self.rule_parameters), + ) + resp_expected = [] + resp_expected.append( + Evaluation( + complianceType=ComplianceType.NOT_APPLICABLE, + resourceId=self.invoking_event_iam_role_sample.get( + "configurationItem", {} + ).get("awsAccountId", None), + resourceType="AWS::::Account", + annotation="This is a periodic rule's annotation.", + ) + ) + if vars(response[0]) != vars(resp_expected[0]): + logging.warning(f"Actual response: {vars(response[0])}") + logging.warning(f"Expected response: {vars(resp_expected[0])}") + rdklibtest.assert_successful_evaluation(self, response, resp_expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/rdk/template/runtime/python3.9/rule_code.py b/rdk/template/runtime/python3.9/rule_code.py index 8063192e..682297b0 100644 --- a/rdk/template/runtime/python3.9/rule_code.py +++ b/rdk/template/runtime/python3.9/rule_code.py @@ -108,7 +108,7 @@ def get_client(service, event, region=None): ) -# This generate an evaluation for config +# This generates an evaluation for config def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None): """Form an evaluation as a dictionary. Usually suited to report on scheduled rules. @@ -221,7 +221,7 @@ def convert_api_configuration(configuration_item): # Based on the type of message get the configuration item # either from configurationItem in the invoking event -# or using the getResourceConfigHistiry API in getConfiguration function. +# or using the getResourceConfigHistory API in getConfiguration function. def get_configuration_item(invoking_event): check_defined(invoking_event, "invokingEvent") if is_oversized_changed_notification(invoking_event["messageType"]): diff --git a/rdk/template/terraform/0.11/config_rule.tf b/rdk/template/terraform/0.11/config_rule.tf index 4df8f241..6e190217 100644 --- a/rdk/template/terraform/0.11/config_rule.tf +++ b/rdk/template/terraform/0.11/config_rule.tf @@ -9,13 +9,6 @@ data "aws_iam_policy" "read_only_access" { } data "aws_iam_policy_document" "config_iam_policy" { - - statement{ - actions=["s3:GetObject"] - resources =["arn:${data.aws_partition.current.partition}:s3:::${var.source_bucket}/${var.rule_name}.zip"] - effect = "Allow" - sid= "1" - } statement{ actions=[ "logs:CreateLogGroup", @@ -36,7 +29,6 @@ data "aws_iam_policy_document" "config_iam_policy" { statement{ actions=[ "iam:List*", - "iam:Describe*", "iam:Get*" ] resources = ["*"] diff --git a/rdk/template/terraform/0.12/config_rule.tf b/rdk/template/terraform/0.12/config_rule.tf index 13e2b6f6..ab384bdc 100644 --- a/rdk/template/terraform/0.12/config_rule.tf +++ b/rdk/template/terraform/0.12/config_rule.tf @@ -10,12 +10,6 @@ data "aws_iam_policy" "read_only_access" { data "aws_iam_policy_document" "config_iam_policy" { - statement{ - actions=["s3:GetObject"] - resources = [format("arn:%s:s3:::%s/%s",data.aws_partition.current.partition,var.source_bucket,local.rule_name_source)] - effect = "Allow" - sid= "1" - } statement{ actions=[ "logs:CreateLogGroup", @@ -36,7 +30,6 @@ data "aws_iam_policy_document" "config_iam_policy" { statement{ actions=[ "iam:List*", - "iam:Describe*", "iam:Get*" ] resources = ["*"] diff --git a/setup.py b/setup.py deleted file mode 100644 index 42ffceff..00000000 --- a/setup.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2017-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -from setuptools import setup - -import rdk -from rdk import MY_VERSION - - -def readme(): - with open("README.rst") as f: - return f.read() - - -setup( - name="rdk", - version=MY_VERSION, - description="Rule Development Kit CLI for AWS Config", - long_description=readme(), - url="https://github.com/awslabs/aws-config-rdk/", - author="RDK maintainer", - author_email="rdk-maintainers@amazon.com", - license="Apache License Version 2.0", - packages=["rdk"], - install_requires=[ - "boto3", - "pyyaml", - ], - classifiers=[ - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - ], - entry_points={ - "console_scripts": [ - "rdk=rdk.cli:main", - ], - }, - zip_safe=False, - include_package_data=True, -) diff --git a/testing/linux-python3-buildspec.yaml b/testing/linux-python3-buildspec.yaml index 74930af5..715d06ac 100644 --- a/testing/linux-python3-buildspec.yaml +++ b/testing/linux-python3-buildspec.yaml @@ -14,7 +14,7 @@ phases: commands: - rdk create-region-set -o test-region - rdk -f test-region.yaml init - - rdk create MFA_ENABLED_RULE --runtime python3.8 --resource-types AWS::IAM::User + - rdk create MFA_ENABLED_RULE --runtime python3.10 --resource-types AWS::IAM::User - rdk -f test-region.yaml deploy MFA_ENABLED_RULE - sleep 30 - python3 testing/multi_region_execution_test.py @@ -22,22 +22,26 @@ phases: - rdk -f test-region.yaml undeploy --force MFA_ENABLED_RULE - python3 testing/partition_test.py - rdk init --generate-lambda-layer + - rdk create LP3_TestRule_P310_lib --runtime python3.10-lib --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P39_lib --runtime python3.9-lib --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P38_lib --runtime python3.8-lib --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P37_lib --runtime python3.7-lib --resource-types AWS::EC2::SecurityGroup + - rdk -f test-region.yaml deploy LP3_TestRule_P310_lib --generated-lambda-layer - rdk -f test-region.yaml deploy LP3_TestRule_P39_lib --generated-lambda-layer - rdk -f test-region.yaml deploy LP3_TestRule_P38_lib --generated-lambda-layer - rdk -f test-region.yaml deploy LP3_TestRule_P37_lib --generated-lambda-layer + - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P310_lib - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P39_lib - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P38_lib - yes | rdk -f test-region.yaml undeploy LP3_TestRule_P37_lib + - rdk create LP3_TestRule_P310 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P39 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P38 --runtime python3.8 --resource-types AWS::EC2::SecurityGroup - rdk create LP3_TestRule_P37 --runtime python3.7 --resource-types AWS::EC2::SecurityGroup - - rdk create LP3_TestRule_P3 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - - rdk create LP3_TestRule_EFSFS --runtime python3.9 --resource-types AWS::EFS::FileSystem - - rdk create LP3_TestRule_ECSTD --runtime python3.7 --resource-types AWS::ECS::TaskDefinition - - rdk create LP3_TestRule_ECSS --runtime python3.9 --resource-types AWS::ECS::Service + - rdk create LP3_TestRule_P3 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup + - rdk create LP3_TestRule_EFSFS --runtime python3.10 --resource-types AWS::EFS::FileSystem + - rdk create LP3_TestRule_ECSTD --runtime python3.10 --resource-types AWS::ECS::TaskDefinition + - rdk create LP3_TestRule_ECSS --runtime python3.10 --resource-types AWS::ECS::Service - rdk modify LP3_TestRule_P3 --input-parameters '{"TestParameter":"TestValue"}' - rdk create LP3_TestRule_P37_Periodic --runtime python3.7 --maximum-frequency One_Hour - rdk create LP3_TestRule_P37lib_Periodic --runtime python3.7-lib --maximum-frequency One_Hour @@ -45,6 +49,8 @@ phases: - rdk create LP3_TestRule_P38lib_Periodic --runtime python3.8-lib --maximum-frequency One_Hour - rdk create LP3_TestRule_P39_Periodic --runtime python3.9 --maximum-frequency One_Hour - rdk create LP3_TestRule_P39lib_Periodic --runtime python3.9-lib --maximum-frequency One_Hour + - rdk create LP3_TestRule_P310_Periodic --runtime python3.10 --maximum-frequency One_Hour + - rdk create LP3_TestRule_P310lib_Periodic --runtime python3.10-lib --maximum-frequency One_Hour - rdk test-local --all - rdk deploy --all - yes | rdk undeploy LP3_TestRule_P3 @@ -54,6 +60,8 @@ phases: - yes | rdk undeploy LP3_TestRule_P38_Periodic - yes | rdk undeploy LP3_TestRule_P39 - yes | rdk undeploy LP3_TestRule_P39_Periodic + - yes | rdk undeploy LP3_TestRule_P310 + - yes | rdk undeploy LP3_TestRule_P310_Periodic - sleep 30 - rdk logs LP3_TestRule_P3 - yes | rdk undeploy -a diff --git a/testing/windows-python2-buildspec.yaml b/testing/windows-python2-buildspec.yaml deleted file mode 100644 index 57dd3042..00000000 --- a/testing/windows-python2-buildspec.yaml +++ /dev/null @@ -1,13 +0,0 @@ -version: 0.1 - -phases: - install: - commands: - - apt-get update -y - build: - commands: - - echo Creating Windows build server and running tests - - bash testing/test_windows.sh 2 - post_build: - commands: - - echo Build completed on `date` diff --git a/testing/windows-python3-buildspec.yaml b/testing/windows-python3-buildspec.yaml index 4e385fd6..5c92b963 100644 --- a/testing/windows-python3-buildspec.yaml +++ b/testing/windows-python3-buildspec.yaml @@ -14,35 +14,41 @@ phases: commands: - rdk create-region-set -o test-region - rdk -f test-region.yaml init - - rdk create W_MFA_ENABLED_RULE --runtime python3.8 --resource-types AWS::IAM::User + - rdk create W_MFA_ENABLED_RULE --runtime python3.10 --resource-types AWS::IAM::User - rdk -f test-region.yaml deploy W_MFA_ENABLED_RULE - python testing/win_multi_region_execution_test.py - rdk -f test-region.yaml undeploy --force W_MFA_ENABLED_RULE - python testing/win_partition_test.py - rdk init --generate-lambda-layer + - rdk create WP3_TestRule_P310_lib --runtime python3.10-lib --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P39_lib --runtime python3.9-lib --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P38_lib --runtime python3.8-lib --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P37_lib --runtime python3.7-lib --resource-types AWS::EC2::SecurityGroup + - rdk -f test-region.yaml deploy WP3_TestRule_P310_lib --generated-lambda-layer - rdk -f test-region.yaml deploy WP3_TestRule_P39_lib --generated-lambda-layer - rdk -f test-region.yaml deploy WP3_TestRule_P38_lib --generated-lambda-layer - rdk -f test-region.yaml deploy WP3_TestRule_P37_lib --generated-lambda-layer + - rdk -f test-region.yaml undeploy WP3_TestRule_P310_lib --force - rdk -f test-region.yaml undeploy WP3_TestRule_P39_lib --force - rdk -f test-region.yaml undeploy WP3_TestRule_P38_lib --force - rdk -f test-region.yaml undeploy WP3_TestRule_P37_lib --force + - rdk create WP3_TestRule_P310 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P39 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P38 --runtime python3.8 --resource-types AWS::EC2::SecurityGroup - rdk create WP3_TestRule_P37 --runtime python3.7 --resource-types AWS::EC2::SecurityGroup - - rdk create WP3_TestRule_P3 --runtime python3.9 --resource-types AWS::EC2::SecurityGroup - - rdk create WP3_TestRule_EFSFS --runtime python3.9 --resource-types AWS::EFS::FileSystem - - rdk create WP3_TestRule_ECSTD --runtime python3.7 --resource-types AWS::ECS::TaskDefinition - - rdk create WP3_TestRule_ECSS --runtime python3.9 --resource-types AWS::ECS::Service - - rdk modify WP3_TestRule_P3 --runtime python3.8 + - rdk create WP3_TestRule_P3 --runtime python3.10 --resource-types AWS::EC2::SecurityGroup + - rdk create WP3_TestRule_EFSFS --runtime python3.10 --resource-types AWS::EFS::FileSystem + - rdk create WP3_TestRule_ECSTD --runtime python3.10 --resource-types AWS::ECS::TaskDefinition + - rdk create WP3_TestRule_ECSS --runtime python3.10 --resource-types AWS::ECS::Service + - rdk modify WP3_TestRule_P3 --runtime python3.10 - rdk create WP3_TestRule_P37_Periodic --runtime python3.7 --maximum-frequency One_Hour - rdk create WP3_TestRule_P37lib_Periodic --runtime python3.7-lib --maximum-frequency One_Hour - rdk create WP3_TestRule_P38_Periodic --runtime python3.8 --maximum-frequency One_Hour - rdk create WP3_TestRule_P38lib_Periodic --runtime python3.8-lib --maximum-frequency One_Hour - rdk create WP3_TestRule_P39_Periodic --runtime python3.9 --maximum-frequency One_Hour - rdk create WP3_TestRule_P39lib_Periodic --runtime python3.9-lib --maximum-frequency One_Hour + - rdk create WP3_TestRule_P310_Periodic --runtime python3.10 --maximum-frequency One_Hour + - rdk create WP3_TestRule_P310lib_Periodic --runtime python3.10-lib --maximum-frequency One_Hour - rdk test-local --all - rdk deploy --all - rdk undeploy WP3_TestRule_P3 --force @@ -51,7 +57,9 @@ phases: - rdk undeploy WP3_TestRule_P38 --force - rdk undeploy WP3_TestRule_P38_Periodic --force - rdk undeploy WP3_TestRule_P39 --force - - rdk undeploy WP3_TestRule_P39_Periodic --force + - rdk undeploy WP3_TestRule_P39_Periodic --force + - rdk undeploy WP3_TestRule_P310 --force + - rdk undeploy WP3_TestRule_P310_Periodic --force - rdk logs WP3_TestRule_P3 - rdk undeploy -a --force post_build: diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..66a14670 --- /dev/null +++ b/tox.ini @@ -0,0 +1,2 @@ +[flake8] +max-line-length=140 \ No newline at end of file From ddd6c18fe384e682c5266a1e630f8e1a1e55b6f4 Mon Sep 17 00:00:00 2001 From: Benjamin Morris Date: Thu, 25 May 2023 08:52:50 -0700 Subject: [PATCH 12/12] remove tox --- tox.ini | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 tox.ini diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 66a14670..00000000 --- a/tox.ini +++ /dev/null @@ -1,2 +0,0 @@ -[flake8] -max-line-length=140 \ No newline at end of file