diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml new file mode 100644 index 0000000000..3da877a732 --- /dev/null +++ b/.github/workflows/close-stale-issues.yml @@ -0,0 +1,53 @@ +name: Close stale issues + +# Controls when the action will run. +on: + schedule: + # Uses UTC so it runs at 10PM PDT + - cron: "0 6 * * *" + +jobs: + cleanup: + runs-on: ubuntu-latest + name: Stale issue job + steps: + - uses: aws-actions/stale-issue-cleanup@v3 + with: + issue-types: issues + + # Setting messages to an empty string will cause the automation to skip + # that category + ancient-issue-message: | + This issue has not received any attention in 1 year. + If you want to keep this issue open, please leave a comment below and auto-close will be canceled. + stale-issue-message: | + This issue has not received a response in 14 days. + If you want to keep this issue open, please leave a comment below and auto-close will be canceled. + stale-pr-message: | + This PR has not received a response in 14 days. + If you want to keep this issue open, please leave a comment below and auto-close will be canceled. + + # These labels are required + stale-issue-label: blocked/close-if-inactive + exempt-issue-labels: no-autoclose, stage/needs-attention + stale-pr-label: blocked/close-if-inactive + exempt-pr-labels: no-autoclose, type/feature + response-requested-label: blocked/more-info-needed + + # Don't set this to not apply a label when closing issues + closed-for-staleness-label: stage/closed-for-inactivity + + # Issue timing + days-before-stale: 14 + days-before-close: 7 + days-before-ancient: 365 + + # If you don't want to mark a issue as being ancient based on a + # threshold of "upvotes", you can set this here. An "upvote" is + # the total number of +1, heart, hooray, and rocket reactions + # on an issue. + minimum-upvotes-to-exempt: 10 + + # need a repo scope token here to make this action can trigger other github actions + repo-token: ${{ secrets.STALE_BOT_PERSONAL_TOKEN }} + diff --git a/.github/workflows/closed-issue-message.yml b/.github/workflows/closed-issue-message.yml new file mode 100644 index 0000000000..5128523727 --- /dev/null +++ b/.github/workflows/closed-issue-message.yml @@ -0,0 +1,18 @@ +name: Closed issue message + +on: + issues: + types: [ closed ] +jobs: + auto_comment: + runs-on: ubuntu-latest + steps: + - uses: aws-actions/closed-issue-message@v1 + with: + # These inputs are both required + repo-token: "${{ secrets.GITHUB_TOKEN }}" + message: | + ### ⚠️COMMENT VISIBILITY WARNING⚠️ + Comments on closed issues are hard for our team to see. + If you need more assistance, please either tag a team member or open a new issue that references this one. + If you wish to keep having a conversation with other community members under this issue feel free to do so. diff --git a/.github/workflows/need-attention-label.yml b/.github/workflows/need-attention-label.yml new file mode 100644 index 0000000000..a0261dcd4b --- /dev/null +++ b/.github/workflows/need-attention-label.yml @@ -0,0 +1,22 @@ +name: Add need attention label + +on: + issue_comment: + types: [created, edited] + +jobs: + apply-label: + runs-on: ubuntu-latest + steps: + - uses: actions/github-script@v3 + # the login of our bot called 'sam-cli-bot' + if: github.event.sender.login != 'aws-sam-cli-stale-bot' + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + github.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ['stage/needs-attention'] + }) diff --git a/.gitignore b/.gitignore index deb14508b0..3b9e300d5b 100644 --- a/.gitignore +++ b/.gitignore @@ -389,6 +389,9 @@ tests/integration/testdata/buildcmd/Dotnetcore2.0/bin tests/integration/testdata/buildcmd/Dotnetcore2.0/obj tests/integration/testdata/buildcmd/Dotnetcore2.1/bin tests/integration/testdata/buildcmd/Dotnetcore2.1/obj +tests/integration/testdata/buildcmd/Dotnetcore3.1/bin +tests/integration/testdata/buildcmd/Dotnetcore3.1/obj +tests/integration/testdata/invoke/credential_tests/inprocess/dotnet/STS/obj # End of https://www.gitignore.io/api/osx,node,macos,linux,python,windows,pycharm,intellij,sublimetext,visualstudiocode diff --git a/DEVELOPMENT_GUIDE.md b/DEVELOPMENT_GUIDE.md index b399682ae2..994c1e3220 100644 --- a/DEVELOPMENT_GUIDE.md +++ b/DEVELOPMENT_GUIDE.md @@ -1,5 +1,4 @@ -DEVELOPMENT GUIDE -================= +# AWS SAM CLI Development Guide **Welcome hacker!** @@ -8,11 +7,8 @@ development environment, IDEs, tests, coding practices, or anything that will help you be more productive. If you found something is missing or inaccurate, update this guide and send a Pull Request. -**Note**: `pyenv` currently only supports macOS and Linux. If you are a -Windows users, consider using [pipenv](https://docs.pipenv.org/). +## 1-Click Ready to Hack IDE (this section might be outdated, to be verified) -1-Click Ready to Hack IDE -------------------------- For setting up a local development environment, we recommend using Gitpod - a service that allows you to spin up an in-browser Visual Studio Code-compatible editor, with everything set up and ready to go for development on this project. Just click the button below to create your private workspace: [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/awslabs/aws-sam-cli) @@ -21,125 +17,216 @@ This will start a new Gitpod workspace, and immediately kick off a build of the Gitpod is free for 50 hours per month - make sure to stop your workspace when you're done (you can always resume it later, and it won't need to run the build again). -Environment Setup ------------------ +## Environment Setup +### 1. Prerequisites (Python Virtual Environment) -### 1. Install Python Versions +AWS SAM CLI is mainly written in Python 3 and we support Python 3.6, 3.7 and 3.8. +So having a Python environment with aforementioned versions is required. -We support 3.6 and 3.7 versions. Our CI/CD pipeline is setup to run -unit tests against both Python versions. So make sure you test it -with both versions before sending a Pull Request. -See [Unit testing with multiple Python versions](#unit-testing-with-multiple-python-versions). +Having a dedicated Python virtual environment ensures it won't "pollute" or get "polluted" +by other python packages. Here we introduce two ways of setting up a Python virtual environment: +(1) Python's built in [`venv`](https://docs.python.org/3/tutorial/venv.html) and (2) [`pyenv`](https://github.com/pyenv/pyenv). -[pyenv](https://github.com/pyenv/pyenv) is a great tool to -easily setup multiple Python versions. - -> Note: For Windows, type -> `export PATH="/c/Users//.pyenv/libexec:$PATH"` to add pyenv to -> your path. - -1. Install PyEnv - - `curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash` -2. `pyenv install 3.6.8` -3. `pyenv install 3.7.2` -4. Make Python versions available in the project: - `pyenv local 3.6.8 3.7.2` - -### 2. Install Additional Tooling -#### Black -We format our code using [Black](https://github.com/python/black) and verify the source code is black compliant -in Appveyor during PRs. Black will be installed automatically with `make init`. - -After installing, you can run our formatting through our Makefile by `make black` or integrating Black directly in your favorite IDE (instructions -can be found [here](https://black.readthedocs.io/en/stable/editor_integration.html)) - -##### (workaround) Integrating Black directly in your favorite IDE -Since black is installed in virtualenv, when you follow [this instruction](https://black.readthedocs.io/en/stable/editor_integration.html), `which black` might give you this +**Note**: `pyenv` currently only supports macOS and Linux. If you are a +Windows users, consider using [pyenv-win](https://github.com/pyenv-win/pyenv-win). -```bash -(samcli37) $ where black -/Users//.pyenv/shims/black +| | `venv` | `pyenv` | +| -- | -------- | ------------ | +| Pick if you want ... | Easy setup | You want to develop and test SAM CLI in different Python versions | + + +#### `venv` setup + +```sh +python3 -m venv .venv # one time setup: create a virtual environment to directory .venv +source .venv/bin/activate # activate the virtual environment ``` +#### `pyenv` setup -However, IDEs such PyChaim (using FileWatcher) will have a hard time invoking `/Users//.pyenv/shims/black` -and this will happen: +Install `pyenv` and [`pyenv-virtualenv` plugin](https://github.com/pyenv/pyenv-virtualenv) +On macOS with [Homebrew](https://brew.sh/) + +```sh +brew install pyenv +brew install pyenv-virtualenv ``` -pyenv: black: command not found -The `black' command exists in these Python versions: - 3.7.2/envs/samcli37 - samcli37 -``` +or using [pyenv-installer](https://github.com/pyenv/pyenv-installer) and git -A simple workaround is to use `/Users//.pyenv/versions/samcli37/bin/black` -instead of `/Users//.pyenv/shims/black`. +```sh +curl https://pyenv.run | bash # https://github.com/pyenv/pyenv-installer +exec $SHELL # restart your shell so the path changes take effect +git clone https://github.com/pyenv/pyenv-virtualenv.git $(pyenv root)/plugins/pyenv-virtualenv +exec $SHELL # restart your shell to enable pyenv-virtualenv +``` + +Next, setup a virtual environment and activate it: -#### Pre-commit -If you don't wish to manually run black on each pr or install black manually, we have integrated black into git hooks through [pre-commit](https://pre-commit.com/). -After installing pre-commit, run `pre-commit install` in the root of the project. This will install black for you and run the black formatting on -commit. +```sh +# Assuming you want to develop AWS SAM CLI in Python 3.8.9 +pyenv install 3.8.9 # install Python 3.8.9 using pyenv +pyenv virtualenv 3.8.9 samcli38 # create a virtual environment using 3.8.9 named "samcli38" +pyenv activate samcli38 # activate the virtual environment +``` -### 3. Activate Virtualenv +### 2. Initialize dependencies and create `samdev` available in `$PATH` -Virtualenv allows you to install required libraries outside of the -Python installation. A good practice is to setup a different virtualenv -for each project. [pyenv](https://github.com/pyenv/pyenv) comes with a -handy plugin that can create virtualenv. +Clone the AWS SAM CLI repository to your local machine if you haven't done that yet. -Depending on the python version, the following commands would change to -be the appropriate python version. +```sh +# Using SSH +git clone git@github.com:aws/aws-sam-cli.git +``` +or +```sh +# Using HTTPS +git clone https://github.com/aws/aws-sam-cli.git +``` -1. `pyenv virtualenv 3.7.2 samcli37` -2. `pyenv activate samcli37` for Python3.7 +(make sure you have virtual environment activated) -### 4. Install dev version of SAM CLI +```sh +cd aws-sam-cli +make init # this will put a file `samdev` available in $PATH +``` -We will install a development version of SAM CLI from source into the -virtualenv for you to try out the CLI as you make changes. We will -install in a command called `samdev` to keep it separate from a global -SAM CLI installation, if any. +Now you can verify whether the dev AWS SAM CLI is available: -1. Activate Virtualenv: `pyenv activate samcli37` -2. Install dev CLI: `make init` -3. Make sure installation succeeded: `which samdev` +```sh +samdev --version # this will print something like "SAM CLI, version x.xx.x" +``` -### 5. (Optional) Install development version of SAM Transformer +#### Try out to make change to AWS SAM CLI (Optional) -If you want to run the latest version of [SAM -Transformer](https://github.com/awslabs/serverless-application-model/), -you can clone it locally and install it in your pyenv. This is useful if -you want to validate your templates against any new, unreleased SAM -features ahead of time. +```sh +# Change the AWS SAM CLI version to 123.456.789 +echo '__version__ = "123.456.789"' >> samcli/__init__.py +samdev --version # this will print "SAM CLI, version 123.456.789" +``` + +### 3. (Optional) Install development version of SAM Transformer -This step is optional and will use the specified version of -aws-sam-transformer from PyPi by default. +If you want to run the latest version of [SAM Transformer](https://github.com/aws/serverless-application-model/) +or work on it at the same time, you can clone it locally and install it in your virtual environment. +This is useful if you want to validate your templates against any new, unreleased SAM features ahead of time. -``cd ~/projects (cd into the directory where you usually place projects)`` +```sh +# Make sure it is not in AWS SAM CLI repository -``git clone https://github.com/awslabs/serverless-application-model/`` +# clone the AWS SAM repo +git clone git@github.com:aws/serverless-application-model.git +# or using HTTPS: git clone https://github.com/aws/serverless-application-model.git -``git checkout develop `` +cd serverless-application-model +``` -Install the SAM Transformer in editable mode so that all changes you make to the SAM Transformer locally are immediately picked up for SAM CLI. +Make sure you are in the same virtual environment as the one you are using with SAM CLI. +```sh +source /.venv/bin/activate # if you chose to use venv to setup the virtual environment +# or +pyenv activate samcli38 # if you chose to use pyenv to setup the virtual environment +``` -``pip install -e . `` +Install the SAM Transformer in editable mode so that +all changes you make to the SAM Transformer locally are immediately picked up for SAM CLI. + +```sh +pip install -e . +``` Move back to your SAM CLI directory and re-run init, If necessary: open requirements/base.txt and replace the version number of aws-sam-translator with the ``version number`` specified in your local version of `serverless-application-model/samtranslator/__init__.py` -``cd ../aws-sam-cli`` - -``make init`` +```sh +# Make sure you are back to your SAM CLI directory +make init +``` + +## Making a Pull Request + +Above demonstrates how to setup the environment, which is enough +to play with the AWS SAM CLI source code. However, if you want to +contribute to the repository, there are a few more things to consider. + +### Make Sure AWS SAM CLI Work in Multiple Python Versions + +We support 3.6, 3.7 and 3.8 versions. Our CI/CD pipeline is setup to run +unit tests against all Python versions. So make sure you test it +with all versions before sending a Pull Request. +See [Unit testing with multiple Python versions](#unit-testing-with-multiple-python-versions). -Running Tests -------------- +If you chose to use `pyenv` in the previous session, setting up a +different Python version should be easy: -### Unit testing with one Python version +(assuming you are in virtual environment `samcli38`) + +```sh +# Your shell now should looks like "(samcli38) $" +pyenv deactivate samcli38 # "(samcli38)" will disappear +pyenv install 3.7.10 # one time setup +pyenv virtualenv 3.7.10 samcli37 # one time setup +pyenv activate samcli37 +# Your shell now should looks like "(samcli37) $" + +# You can verify the version of Python +python --version # Python 3.7.10 + +make init # one time setup, this will put a file `samdev` available in $PATH +``` + +### Format Python Code + +We format our code using [Black](https://github.com/python/black) and verify the source code is +black compliant in AppVeyor during PRs. Black will be installed automatically with `make init`. + +There are generally 3 options to make sure your change is compliant with our formatting standard: + +#### (Option 1) Run `make black` + +```sh +make black +``` + +#### (Option 2) Integrating Black directly in your favorite IDE + +Since black is installed in virtualenv, when you follow [this instruction](https://black.readthedocs.io/en/stable/editor_integration.html), `which black` might give you this + +``` +/Users//.pyenv/shims/black +``` + +However, IDEs such PyChaim (using FileWatcher) will have a hard time +invoking `/Users//.pyenv/shims/black` +and this will happen: + +``` +pyenv: black: command not found + +The `black' command exists in these Python versions: + 3.8.9/envs/samcli38 + samcli38 +``` + +A simple workaround is to use `/Users//.pyenv/versions/samcli37/bin/black` +instead of `/Users//.pyenv/shims/black`. + +#### (Option 3) Pre-commit + +We have integrated black into git hooks through [pre-commit](https://pre-commit.com/). +After installing pre-commit, run `pre-commit install` in the root of the project. This will install black for you and run the black formatting on commit. + +### Do a Local PR Check + +This commands will run the AWS SAM CLI code through various checks including +lint, formatter, unit tests, function tests, and so on. +```sh +make pr +``` -If you're trying to do a quick run, it's ok to use the current python version. Run `make pr`. +We also suggest to run `make pr` in all Python versions. -### Unit testing with multiple Python versions +#### Unit Testing with Multiple Python Versions (Optional) Currently, SAM CLI only supports Python3 versions (see setup.py for exact versions). For the most part, code that works in Python3.6 will work in Python3.7. You only run into problems if you are @@ -148,7 +235,7 @@ will not work in Python3.6). If you want to test in many versions, you can creat each version and flip between them (sourcing the activate script). Typically, we run all tests in one python version locally and then have our ci (appveyor) run all supported versions. -### Integration Test +#### Integration Test (Optional) `make integ-test` - To run integration test against global SAM CLI installation. It looks for a command named `sam` in your shell. @@ -158,8 +245,8 @@ development version of SAM CLI. This is useful if you are making changes to the CLI and want to verify that it works. It is a good practice to run integration tests before submitting a pull request. -Code Conventions ----------------- +## Other Topics +### Code Conventions Please follow these code conventions when making your changes. This will align your code to the same conventions used in rest of the package and @@ -198,8 +285,7 @@ conventions are best practices that we have learnt over time. comments. -Testing -------- +### Our Testing Practices We need thorough test coverage to ensure the code change works today, and continues to work in future. When you make a code change, use the @@ -230,8 +316,7 @@ following framework to decide the kinds of tests to write: calling AWS APIs, spinning up Docker containers, mutating files etc. -Design Document ---------------- +### Design Document A design document is a written description of the feature/capability you are building. We have a [design document diff --git a/installer/pyinstaller/build-linux.sh b/installer/pyinstaller/build-linux.sh index 09c8f20591..bdde24d294 100644 --- a/installer/pyinstaller/build-linux.sh +++ b/installer/pyinstaller/build-linux.sh @@ -2,7 +2,8 @@ binary_zip_filename=$1 python_library_zip_filename=$2 python_version=$3 -nightly_build=$4 +build_binary_name=$4 +build_folder=$5 if [ "$python_library_zip_filename" = "" ]; then python_library_zip_filename="python-libraries.zip"; @@ -12,8 +13,8 @@ if [ "$python_version" = "" ]; then python_version="3.7.9"; fi -if ! [ "$nightly_build" = "" ]; then - echo "Building native installer with nightly build" +if ! [ "$build_binary_name" = "" ]; then + echo "Building native installer with nightly/beta build" is_nightly="true" else echo "Building native installer with normal build" @@ -58,8 +59,8 @@ echo "Installing PyInstaller" echo "Building Binary" cd src if [ "$is_nightly" = "true" ]; then - echo "Updating samcli.spec with nightly build" - sed -i.bak "s/'sam'/'sam-nightly'/g" installer/pyinstaller/samcli.spec + echo "Updating samcli.spec with nightly/beta build" + sed -i.bak "s/'sam'/'$build_binary_name'/g" installer/pyinstaller/samcli.spec rm installer/pyinstaller/samcli.spec.bak fi echo "samcli.spec content is:" @@ -70,17 +71,17 @@ cat installer/pyinstaller/samcli.spec mkdir pyinstaller-output dist_folder="sam" if [ "$is_nightly" = "true" ]; then - echo "using dist_folder with nightly build" - dist_folder="sam-nightly" + echo "using dist_folder with nightly/beta build" + dist_folder=$build_binary_name fi echo "dist_folder=$dist_folder" mv "dist/$dist_folder" pyinstaller-output/dist cp installer/assets/* pyinstaller-output chmod 755 pyinstaller-output/install if [ "$is_nightly" = "true" ]; then - echo "Updating install script with nightly build" - sed -i.bak "s/\/usr\/local\/aws-sam-cli/\/usr\/local\/aws-sam-cli-nightly/g" pyinstaller-output/install - sed -i.bak 's/EXE_NAME=\"sam\"/EXE_NAME=\"sam-nightly\"/g' pyinstaller-output/install + echo "Updating install script with nightly/beta build" + sed -i.bak "s/\/usr\/local\/aws-sam-cli/\/usr\/local\/$build_folder/g" pyinstaller-output/install + sed -i.bak 's/EXE_NAME=\"sam\"/EXE_NAME=\"'$build_binary_name'\"/g' pyinstaller-output/install rm pyinstaller-output/install.bak fi echo "install script content is:" diff --git a/mypy.ini b/mypy.ini index 30040750a0..497c022c95 100644 --- a/mypy.ini +++ b/mypy.ini @@ -59,6 +59,6 @@ ignore_missing_imports=True ignore_missing_imports=True # progressive add typechecks and these modules already complete the process, let's keep them clean -[mypy-samcli.commands.build,samcli.lib.build.*,samcli.commands.local.cli_common.invoke_context,samcli.commands.local.lib.local_lambda,samcli.lib.providers.*] +[mypy-samcli.commands.build,samcli.lib.build.*,samcli.commands.local.cli_common.invoke_context,samcli.commands.local.lib.local_lambda,samcli.lib.providers.*,samcli.lib.utils.git_repo.py] disallow_untyped_defs=True disallow_incomplete_defs=True \ No newline at end of file diff --git a/requirements/base.txt b/requirements/base.txt index 839399daa2..3586eef55d 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -6,12 +6,12 @@ boto3~=1.14 jmespath~=0.10.0 PyYAML~=5.3 cookiecutter~=1.7.2 -aws-sam-translator==1.35.0 +aws-sam-translator==1.36.0 #docker minor version updates can include breaking changes. Auto update micro version only. docker~=4.2.0 dateparser~=0.7 requests==2.23.0 serverlessrepo==0.1.10 -aws_lambda_builders==1.3.0 +aws_lambda_builders==1.4.0 tomlkit==0.7.0 -watchdog==0.10.3 +watchdog==2.1.2 diff --git a/requirements/reproducible-linux.txt b/requirements/reproducible-linux.txt index aee4688122..a0bf9623b9 100644 --- a/requirements/reproducible-linux.txt +++ b/requirements/reproducible-linux.txt @@ -12,15 +12,15 @@ attrs==20.3.0 \ --hash=sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6 \ --hash=sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700 # via jsonschema -aws-lambda-builders==1.3.0 \ - --hash=sha256:1e1d66173f19a1c40e3db96588bdea07a5bd16cd98d9d20ea2088dca2e7299d7 \ - --hash=sha256:a84521eea781967eb0a146c746b27195d29e7ac0d896d0dfb27608c53400eebb \ - --hash=sha256:d19724b51939bf9a8d78364b9ad63d8a7aa958942a963e19e6eff096c02b05d2 +aws-lambda-builders==1.4.0 \ + --hash=sha256:3f885433bb71bae653b520e3cf4c31fe5f5b977cb770d42c631af155cd60fd2b \ + --hash=sha256:5d4e4ecb3d3290f0eec1f62b7b0d9d6b91160ae71447d95899eede392d05f75f \ + --hash=sha256:d32f79cf67b189a7598793f69797f284b2eb9a9fada562175b1e854187f95aed # via aws-sam-cli (setup.py) -aws-sam-translator==1.35.0 \ - --hash=sha256:2f8904fd4a631752bc441a8fd928c444ed98ceb86b94d25ed7b84982e2eff1cd \ - --hash=sha256:5cf7faab3566843f3b44ef1a42a9c106ffb50809da4002faab818076dcc7bff8 \ - --hash=sha256:c35075e7e804490d6025598ed4878ad3ab8668e37cafb7ae75120b1c37a6d212 +aws-sam-translator==1.36.0 \ + --hash=sha256:4195ae8196f04803e7f0384a2b5ccd8c2b06ce0d8dc408aa1f1ce96c23bcf39d \ + --hash=sha256:f7d51b661fe1f5613a882f4733d1c92eff4dac36a076eafd18031d209b178695 \ + --hash=sha256:fa1b990d9329d19052e7b91cf0b19371ed9d31a529054b616005884cd662b584 # via aws-sam-cli (setup.py) binaryornot==0.4.4 \ --hash=sha256:359501dfc9d40632edc9fac890e19542db1a287bbcfa58175b66658392018061 \ @@ -80,9 +80,9 @@ idna==2.10 \ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 # via requests -importlib-metadata==3.7.3 \ - --hash=sha256:742add720a20d0467df2f444ae41704000f50e1234f46174b51f9c6031a1bd71 \ - --hash=sha256:b74159469b464a99cb8cc3e21973e4d96e05d3024d337313fedb618a6e86e6f4 +importlib-metadata==4.0.1 \ + --hash=sha256:8c501196e49fb9df5df43833bdb1e4328f64847763ec8a50703148b73784d581 \ + --hash=sha256:d7eb1dea6d6a6086f8be21784cc9e3bcfa55872b52309bc5fad53a8ea444465d # via jsonschema itsdangerous==1.1.0 \ --hash=sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19 \ @@ -166,9 +166,6 @@ markupsafe==1.1.1 \ # via # cookiecutter # jinja2 -pathtools==0.1.2 \ - --hash=sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0 - # via watchdog poyo==0.5.0 \ --hash=sha256:3e2ca8e33fdc3c411cd101ca395668395dd5dc7ac775b8e809e3def9f9fe041a \ --hash=sha256:e26956aa780c45f011ca9886f044590e2d8fd8b61db7b1c1cf4e0869f48ed4dd @@ -303,10 +300,10 @@ tomlkit==0.7.0 \ --hash=sha256:6babbd33b17d5c9691896b0e68159215a9387ebfa938aa3ac42f4a4beeb2b831 \ --hash=sha256:ac57f29693fab3e309ea789252fcce3061e19110085aa31af5446ca749325618 # via aws-sam-cli (setup.py) -typing-extensions==3.7.4.3 \ - --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \ - --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \ - --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f +typing-extensions==3.10.0.0 \ + --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ + --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \ + --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 # via # arrow # importlib-metadata @@ -320,8 +317,24 @@ urllib3==1.25.11 \ # via # botocore # requests -watchdog==0.10.3 \ - --hash=sha256:4214e1379d128b0588021880ccaf40317ee156d4603ac388b9adcf29165e0c04 +watchdog==2.1.2 \ + --hash=sha256:0237db4d9024859bea27d0efb59fe75eef290833fd988b8ead7a879b0308c2db \ + --hash=sha256:104266a778906ae0e971368d368a65c4cd032a490a9fca5ba0b78c6c7ae11720 \ + --hash=sha256:188145185c08c73c56f1478ccf1f0f0f85101191439679b35b6b100886ce0b39 \ + --hash=sha256:1a62a4671796dc93d1a7262286217d9e75823c63d4c42782912d39a506d30046 \ + --hash=sha256:255a32d44bbbe62e52874ff755e2eefe271b150e0ec240ad7718a62a7a7a73c4 \ + --hash=sha256:3d6405681471ebe0beb3aa083998c4870e48b57f8afdb45ea1b5957cc5cf1014 \ + --hash=sha256:4b219d46d89cfa49af1d73175487c14a318a74cb8c5442603fd13c6a5b418c86 \ + --hash=sha256:581e3548159fe7d2a9f377a1fbcb41bdcee46849cca8ab803c7ac2e5e04ec77c \ + --hash=sha256:58ebb1095ee493008a7789d47dd62e4999505d82be89fc884d473086fccc6ebd \ + --hash=sha256:598d772beeaf9c98d0df946fbabf0c8365dd95ea46a250c224c725fe0c4730bc \ + --hash=sha256:668391e6c32742d76e5be5db6bf95c455fa4b3d11e76a77c13b39bccb3a47a72 \ + --hash=sha256:6ef9fe57162c4c361692620e1d9167574ba1975ee468b24051ca11c9bba6438e \ + --hash=sha256:91387ee2421f30b75f7ff632c9d48f76648e56bf346a7c805c0a34187a93aab4 \ + --hash=sha256:a42e6d652f820b2b94cd03156c62559a2ea68d476476dfcd77d931e7f1012d4a \ + --hash=sha256:a6471517315a8541a943c00b45f1d252e36898a3ae963d2d52509b89a50cb2b9 \ + --hash=sha256:d34ce2261f118ecd57eedeef95fc2a495fc4a40b3ed7b3bf0bd7a8ccc1ab4f8f \ + --hash=sha256:edcd9ef3fd460bb8a98eb1fcf99941e9fd9f275f45f1a82cb1359ec92975d647 # via aws-sam-cli (setup.py) websocket-client==0.58.0 \ --hash=sha256:44b5df8f08c74c3d82d28100fdc81f4536809ce98a17f0757557813275fbb663 \ diff --git a/samcli/__init__.py b/samcli/__init__.py index 790cb5d197..727ea250c1 100644 --- a/samcli/__init__.py +++ b/samcli/__init__.py @@ -2,4 +2,4 @@ SAM CLI version """ -__version__ = "1.23.0" +__version__ = "1.24.0" diff --git a/samcli/commands/_utils/options.py b/samcli/commands/_utils/options.py index 0e23c7dc09..70ca59657c 100644 --- a/samcli/commands/_utils/options.py +++ b/samcli/commands/_utils/options.py @@ -14,7 +14,7 @@ from samcli.commands._utils.custom_options.option_nargs import OptionNargs from samcli.commands._utils.template import get_template_artifacts_format -_TEMPLATE_OPTION_DEFAULT_VALUE = "template.[yaml|yml]" +_TEMPLATE_OPTION_DEFAULT_VALUE = "template.[yaml|yml|json]" DEFAULT_STACK_NAME = "sam-app" LOG = logging.getLogger(__name__) @@ -35,7 +35,7 @@ def get_or_default_template_file_name(ctx, param, provided_value, include_build) original_template_path = os.path.abspath(provided_value) - search_paths = ["template.yaml", "template.yml"] + search_paths = ["template.yaml", "template.yml", "template.json"] if include_build: search_paths.insert(0, os.path.join(".aws-sam", "build", "template.yaml")) @@ -43,7 +43,7 @@ def get_or_default_template_file_name(ctx, param, provided_value, include_build) if provided_value == _TEMPLATE_OPTION_DEFAULT_VALUE: # "--template" is an alias of "--template-file", however, only the first option name "--template-file" in # ctx.default_map is used as default value of provided value. Here we add "--template"'s value as second - # default value in this option, so that the command line paramerters from config file can load it. + # default value in this option, so that the command line parameters from config file can load it. if ctx and ctx.default_map.get("template", None): provided_value = ctx.default_map.get("template") else: diff --git a/samcli/commands/init/__init__.py b/samcli/commands/init/__init__.py index ebf1d18fff..ef92432511 100644 --- a/samcli/commands/init/__init__.py +++ b/samcli/commands/init/__init__.py @@ -163,7 +163,7 @@ def wrapped(*args, **kwargs): default=None, help="Lambda Image of your app", cls=Mutex, - not_required=["location", "app_template", "runtime"], + not_required=["location", "runtime"], ) @click.option( "-d", @@ -182,7 +182,7 @@ def wrapped(*args, **kwargs): help="Identifier of the managed application template you want to use. " "If not sure, call 'sam init' without options for an interactive workflow.", cls=Mutex, - not_required=["location", "base_image"], + not_required=["location"], ) @click.option( "--no-input", @@ -256,7 +256,6 @@ def do_cli( app_template, no_input, extra_context, - auto_clone=True, ): """ Implementation of the ``cli`` method @@ -274,17 +273,18 @@ def do_cli( image_bool = name and pt_explicit and base_image if location or zip_bool or image_bool: # need to turn app_template into a location before we generate - templates = InitTemplates(no_interactive, auto_clone) + templates = InitTemplates(no_interactive) if package_type == IMAGE and image_bool: base_image, runtime = _get_runtime_from_image(base_image) options = templates.init_options(package_type, runtime, base_image, dependency_manager) - if len(options) == 1: - app_template = options[0].get("appTemplate") - elif len(options) > 1: - raise LambdaImagesTemplateException( - "Multiple lambda image application templates found. " - "This should not be possible, please raise an issue." - ) + if not app_template: + if len(options) == 1: + app_template = options[0].get("appTemplate") + elif len(options) > 1: + raise LambdaImagesTemplateException( + "Multiple lambda image application templates found. " + "Please specify one using the --app-template parameter." + ) if app_template and not location: location = templates.location_from_app_template( diff --git a/samcli/commands/init/init_templates.py b/samcli/commands/init/init_templates.py index 303a2632af..7b85ed3d26 100644 --- a/samcli/commands/init/init_templates.py +++ b/samcli/commands/init/init_templates.py @@ -4,12 +4,8 @@ import itertools import json -import os import logging -import platform -import shutil -import subprocess - +import os from pathlib import Path from typing import Dict @@ -17,12 +13,13 @@ from samcli.cli.main import global_cfg from samcli.commands.exceptions import UserException, AppTemplateUpdateException -from samcli.lib.utils import osutils -from samcli.lib.utils.osutils import rmtree_callback -from samcli.local.common.runtime_template import RUNTIME_DEP_TEMPLATE_MAPPING, get_local_lambda_images_location +from samcli.lib.utils.git_repo import GitRepo, CloneRepoException, CloneRepoUnstableStateException from samcli.lib.utils.packagetype import IMAGE +from samcli.local.common.runtime_template import RUNTIME_DEP_TEMPLATE_MAPPING, get_local_lambda_images_location LOG = logging.getLogger(__name__) +APP_TEMPLATES_REPO_URL = "https://github.com/aws/aws-sam-cli-app-templates" +APP_TEMPLATES_REPO_NAME = "aws-sam-cli-app-templates" class InvalidInitTemplateError(UserException): @@ -30,14 +27,9 @@ class InvalidInitTemplateError(UserException): class InitTemplates: - def __init__(self, no_interactive=False, auto_clone=True): - self._repo_url = "https://github.com/aws/aws-sam-cli-app-templates" - self._repo_name = "aws-sam-cli-app-templates" - self._temp_repo_name = "TEMP-aws-sam-cli-app-templates" - self.repo_path = None - self.clone_attempted = False + def __init__(self, no_interactive=False): self._no_interactive = no_interactive - self._auto_clone = auto_clone + self._git_repo: GitRepo = GitRepo(url=APP_TEMPLATES_REPO_URL) def prompt_for_location(self, package_type, runtime, base_image, dependency_manager): """ @@ -89,7 +81,7 @@ def prompt_for_location(self, package_type, runtime, base_image, dependency_mana if template_md.get("init_location") is not None: return (template_md["init_location"], template_md["appTemplate"]) if template_md.get("directory") is not None: - return (os.path.join(self.repo_path, template_md["directory"]), template_md["appTemplate"]) + return os.path.join(self._git_repo.local_path, template_md["directory"]), template_md["appTemplate"] raise InvalidInitTemplateError("Invalid template. This should not be possible, please raise an issue.") def location_from_app_template(self, package_type, runtime, base_image, dependency_manager, app_template): @@ -99,7 +91,7 @@ def location_from_app_template(self, package_type, runtime, base_image, dependen if template.get("init_location") is not None: return template["init_location"] if template.get("directory") is not None: - return os.path.join(self.repo_path, template["directory"]) + return os.path.normpath(os.path.join(self._git_repo.local_path, template["directory"])) raise InvalidInitTemplateError("Invalid template. This should not be possible, please raise an issue.") except StopIteration as ex: msg = "Can't find application template " + app_template + " - check valid values in interactive init." @@ -112,14 +104,23 @@ def _check_app_template(entry: Dict, app_template: str) -> bool: return bool(entry["appTemplate"] == app_template) def init_options(self, package_type, runtime, base_image, dependency_manager): - if not self.clone_attempted: - self._clone_repo() - if self.repo_path is None: + if not self._git_repo.clone_attempted: + shared_dir: Path = global_cfg.config_dir + try: + self._git_repo.clone(clone_dir=shared_dir, clone_name=APP_TEMPLATES_REPO_NAME, replace_existing=True) + except CloneRepoUnstableStateException as ex: + raise AppTemplateUpdateException(str(ex)) from ex + except (OSError, CloneRepoException): + # If can't clone, try using an old clone from a previous run if already exist + expected_previous_clone_local_path: Path = shared_dir.joinpath(APP_TEMPLATES_REPO_NAME) + if expected_previous_clone_local_path.exists(): + self._git_repo.local_path = expected_previous_clone_local_path + if self._git_repo.local_path is None: return self._init_options_from_bundle(package_type, runtime, dependency_manager) return self._init_options_from_manifest(package_type, runtime, base_image, dependency_manager) def _init_options_from_manifest(self, package_type, runtime, base_image, dependency_manager): - manifest_path = os.path.join(self.repo_path, "manifest.json") + manifest_path = os.path.join(self._git_repo.local_path, "manifest.json") with open(str(manifest_path)) as fp: body = fp.read() manifest_body = json.loads(body) @@ -154,109 +155,6 @@ def _init_options_from_bundle(package_type, runtime, dependency_manager): ) raise InvalidInitTemplateError(msg) - @staticmethod - def _shared_dir_check(shared_dir: Path) -> bool: - try: - shared_dir.mkdir(mode=0o700, parents=True, exist_ok=True) - return True - except OSError as ex: - LOG.warning("WARN: Unable to create shared directory.", exc_info=ex) - return False - - def _clone_repo(self): - if not self._auto_clone: - return # Unit test escape hatch - # check if we have templates stored already - shared_dir = global_cfg.config_dir - if not self._shared_dir_check(shared_dir): - # Nothing we can do if we can't access the shared config directory, use bundled. - return - expected_path = os.path.normpath(os.path.join(shared_dir, self._repo_name)) - if self._template_directory_exists(expected_path): - self._overwrite_existing_templates(expected_path) - else: - # simply create the app templates repo - self._clone_new_app_templates(shared_dir, expected_path) - self.clone_attempted = True - - def _overwrite_existing_templates(self, expected_path: str): - self.repo_path = expected_path - # workflow to clone a copy to a new directory and overwrite - with osutils.mkdir_temp(ignore_errors=True) as tempdir: - try: - expected_temp_path = os.path.normpath(os.path.join(tempdir, self._repo_name)) - LOG.info("\nCloning app templates from %s", self._repo_url) - subprocess.check_output( - [self._git_executable(), "clone", self._repo_url, self._repo_name], - cwd=tempdir, - stderr=subprocess.STDOUT, - ) - # Now we need to delete the old repo and move this one. - self._replace_app_templates(expected_temp_path, expected_path) - self.repo_path = expected_path - except OSError as ex: - LOG.warning("WARN: Could not clone app template repo.", exc_info=ex) - except subprocess.CalledProcessError as clone_error: - output = clone_error.output.decode("utf-8") - if "not found" in output.lower(): - click.echo("WARN: Could not clone app template repo.") - - @staticmethod - def _replace_app_templates(temp_path: str, dest_path: str) -> None: - try: - LOG.debug("Removing old templates from %s", dest_path) - shutil.rmtree(dest_path, onerror=rmtree_callback) - LOG.debug("Copying templates from %s to %s", temp_path, dest_path) - shutil.copytree(temp_path, dest_path, ignore=shutil.ignore_patterns("*.git")) - except (OSError, shutil.Error) as ex: - # UNSTABLE STATE - # it's difficult to see how this scenario could happen except weird permissions, user will need to debug - raise AppTemplateUpdateException( - "Unstable state when updating app templates. " - "Check that you have permissions to create/delete files in the AWS SAM shared directory " - "or file an issue at https://github.com/awslabs/aws-sam-cli/issues" - ) from ex - - def _clone_new_app_templates(self, shared_dir, expected_path): - with osutils.mkdir_temp(ignore_errors=True) as tempdir: - expected_temp_path = os.path.normpath(os.path.join(tempdir, self._repo_name)) - try: - LOG.info("\nCloning app templates from %s", self._repo_url) - subprocess.check_output( - [self._git_executable(), "clone", self._repo_url], - cwd=tempdir, - stderr=subprocess.STDOUT, - ) - shutil.copytree(expected_temp_path, expected_path, ignore=shutil.ignore_patterns("*.git")) - self.repo_path = expected_path - except OSError as ex: - LOG.warning("WARN: Can't clone app repo, git executable not found", exc_info=ex) - except subprocess.CalledProcessError as clone_error: - output = clone_error.output.decode("utf-8") - if "not found" in output.lower(): - click.echo("WARN: Could not clone app template repo.") - - @staticmethod - def _template_directory_exists(expected_path: str) -> bool: - path = Path(expected_path) - return path.exists() - - @staticmethod - def _git_executable() -> str: - execname = "git" - if platform.system().lower() == "windows": - options = [execname, "{}.cmd".format(execname), "{}.exe".format(execname), "{}.bat".format(execname)] - else: - options = [execname] - for name in options: - try: - subprocess.Popen([name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - # No exception. Let's pick this - return name - except OSError as ex: - LOG.debug("Unable to find executable %s", name, exc_info=ex) - raise OSError("Cannot find git, was looking at executables: {}".format(options)) - def is_dynamic_schemas_template(self, package_type, app_template, runtime, base_image, dependency_manager): """ Check if provided template is dynamic template e.g: AWS Schemas template. diff --git a/samcli/commands/logs/command.py b/samcli/commands/logs/command.py index 03723c08bc..7042970a3a 100644 --- a/samcli/commands/logs/command.py +++ b/samcli/commands/logs/command.py @@ -111,24 +111,13 @@ def do_cli(function_name, stack_name, filter_pattern, tailing, start_time, end_t filter_pattern=filter_pattern, start_time=start_time, end_time=end_time, - # output_file is not yet supported by CLI - output_file=None, ) as context: if tailing: - events_iterable = context.fetcher.tail( - context.log_group_name, filter_pattern=context.filter_pattern, start=context.start_time - ) + context.fetcher.tail(start_time=context.start_time, filter_pattern=context.filter_pattern) else: - events_iterable = context.fetcher.fetch( - context.log_group_name, + context.fetcher.load_time_period( + start_time=context.start_time, + end_time=context.end_time, filter_pattern=context.filter_pattern, - start=context.start_time, - end=context.end_time, ) - - formatted_events = context.formatter.do_format(events_iterable) - - for event in formatted_events: - # New line is not necessary. It is already in the log events sent by CloudWatch - click.echo(event, nl=False) diff --git a/samcli/commands/logs/console_consumers.py b/samcli/commands/logs/console_consumers.py new file mode 100644 index 0000000000..2f77e34ab0 --- /dev/null +++ b/samcli/commands/logs/console_consumers.py @@ -0,0 +1,18 @@ +""" +Consumers that will print out events to console +""" + +import click + +from samcli.lib.observability.cw_logs.cw_log_event import CWLogEvent +from samcli.lib.observability.observability_info_puller import ObservabilityEventConsumer + + +class CWConsoleEventConsumer(ObservabilityEventConsumer[CWLogEvent]): + """ + Consumer implementation that will consume given event as outputting into console + """ + + # pylint: disable=R0201 + def consume(self, event: CWLogEvent): + click.echo(event.message, nl=False) diff --git a/samcli/commands/logs/logs_context.py b/samcli/commands/logs/logs_context.py index 668cffb66d..5504895a70 100644 --- a/samcli/commands/logs/logs_context.py +++ b/samcli/commands/logs/logs_context.py @@ -3,13 +3,21 @@ """ import logging + import boto3 import botocore from samcli.commands.exceptions import UserException -from samcli.lib.logs.fetcher import LogsFetcher -from samcli.lib.logs.formatter import LogsFormatter, LambdaLogMsgFormatters, JSONMsgFormatter, KeywordHighlighter -from samcli.lib.logs.provider import LogGroupProvider +from samcli.commands.logs.console_consumers import CWConsoleEventConsumer +from samcli.lib.observability.cw_logs.cw_log_formatters import ( + CWColorizeErrorsFormatter, + CWJsonFormatter, + CWKeywordHighlighterFormatter, + CWPrettyPrintFormatter, +) +from samcli.lib.observability.cw_logs.cw_log_group_provider import LogGroupProvider +from samcli.lib.observability.cw_logs.cw_log_puller import CWLogPuller +from samcli.lib.observability.observability_info_puller import ObservabilityEventConsumerDecorator from samcli.lib.utils.colors import Colored from samcli.lib.utils.time import to_utc, parse_date @@ -97,26 +105,20 @@ def __exit__(self, *args): @property def fetcher(self): - return LogsFetcher(self._logs_client) - - @property - def formatter(self): - """ - Creates and returns a Formatter capable of nicely formatting Lambda function logs - - Returns - ------- - LogsFormatter - """ - formatter_chain = [ - LambdaLogMsgFormatters.colorize_errors, - # Format JSON "before" highlighting the keywords. Otherwise, JSON will be invalid from all the - # ANSI color codes and fail to pretty print - JSONMsgFormatter.format_json, - KeywordHighlighter(self._filter_pattern).highlight_keywords, - ] - - return LogsFormatter(self.colored, formatter_chain) + return CWLogPuller( + logs_client=self._logs_client, + consumer=ObservabilityEventConsumerDecorator( + mappers=[ + CWColorizeErrorsFormatter(self.colored), + CWJsonFormatter(), + CWKeywordHighlighterFormatter(self.colored, self._filter_pattern), + CWPrettyPrintFormatter(self.colored), + ], + consumer=CWConsoleEventConsumer(), + ), + cw_log_group=self.log_group_name, + resource_name=self._function_name, + ) @property def start_time(self): diff --git a/samcli/lib/build/build_graph.py b/samcli/lib/build/build_graph.py index 836a412a86..981e36c459 100644 --- a/samcli/lib/build/build_graph.py +++ b/samcli/lib/build/build_graph.py @@ -351,7 +351,7 @@ def __init__( def __str__(self) -> str: return ( f"LayerBuildDefinition({self.name}, {self.codeuri}, {self.source_md5}, {self.uuid}, " - f"{self.build_method}, {self.compatible_runtimes}, {self.env_vars}, {self.layer.name})" + f"{self.build_method}, {self.compatible_runtimes}, {self.env_vars})" ) def __eq__(self, other: Any) -> bool: diff --git a/samcli/lib/build/build_strategy.py b/samcli/lib/build/build_strategy.py index 829946b6bf..ecded3a743 100644 --- a/samcli/lib/build/build_strategy.py +++ b/samcli/lib/build/build_strategy.py @@ -214,7 +214,7 @@ def build_single_function_definition(self, build_definition: FunctionBuildDefini return self._delegate_build_strategy.build_single_function_definition(build_definition) code_dir = str(pathlib.Path(self._base_dir, cast(str, build_definition.codeuri)).resolve()) - source_md5 = dir_checksum(code_dir) + source_md5 = dir_checksum(code_dir, ignore_list=[".aws-sam"]) cache_function_dir = pathlib.Path(self._cache_dir, build_definition.uuid) function_build_results = {} @@ -253,7 +253,7 @@ def build_single_layer_definition(self, layer_definition: LayerBuildDefinition) Builds single layer definition with caching """ code_dir = str(pathlib.Path(self._base_dir, cast(str, layer_definition.codeuri)).resolve()) - source_md5 = dir_checksum(code_dir) + source_md5 = dir_checksum(code_dir, ignore_list=[".aws-sam"]) cache_function_dir = pathlib.Path(self._cache_dir, layer_definition.uuid) layer_build_result = {} diff --git a/samcli/lib/cookiecutter/interactive_flow.py b/samcli/lib/cookiecutter/interactive_flow.py index 486e8c4d30..996ac89ce3 100644 --- a/samcli/lib/cookiecutter/interactive_flow.py +++ b/samcli/lib/cookiecutter/interactive_flow.py @@ -1,5 +1,6 @@ """A flow of questions to be asked to the user in an interactive way.""" from typing import Any, Dict, Optional + from .question import Question @@ -40,7 +41,10 @@ def advance_to_next_question(self, current_answer: Optional[Any] = None) -> Opti self._current_question = self._questions.get(next_question_key) if next_question_key else None return self._current_question - def run(self, context: Dict) -> Dict: + def run( + self, + context: Dict, + ) -> Dict: """ starts the flow, collects user's answers to the question and return a new copy of the passed context with the answers appended to the copy @@ -49,14 +53,17 @@ def run(self, context: Dict) -> Dict: ---------- context: Dict The cookiecutter context before prompting this flow's questions + The context can be used to provide default values, and support both str keys and List[str] keys. - Returns: A new copy of the context with user's answers added to the copy such that each answer is - associated to the key of the corresponding question + Returns + ------- + A new copy of the context with user's answers added to the copy such that each answer is + associated to the key of the corresponding question """ context = context.copy() question = self.advance_to_next_question() while question: - answer = question.ask() + answer = question.ask(context=context) context[question.key] = answer question = self.advance_to_next_question(answer) return context diff --git a/samcli/lib/cookiecutter/interactive_flow_creator.py b/samcli/lib/cookiecutter/interactive_flow_creator.py index d1a227f1c8..d861174951 100644 --- a/samcli/lib/cookiecutter/interactive_flow_creator.py +++ b/samcli/lib/cookiecutter/interactive_flow_creator.py @@ -42,6 +42,19 @@ def create_flow(flow_definition_path: str, extra_context: Optional[Dict] = None) "True": "key of the question to jump to if the user answered 'Yes'", "False": "key of the question to jump to if the user answered 'Yes'", } + "default": "default_answer", + # the default value can also be loaded from cookiecutter context + # with a key path whose key path item can be loaded from cookiecutter as well. + "default": { + "keyPath": [ + { + "valueOf": "key-of-another-question" + }, + "pipeline_user" + ] + } + # assuming the answer of "key-of-another-question" is "ABC" + # the default value will be load from cookiecutter context with key "['ABC', 'pipeline_user]" }, ... ] @@ -63,15 +76,18 @@ def _load_questions( questions: Dict[str, Question] = {} questions_definition = InteractiveFlowCreator._parse_questions_definition(flow_definition_path, extra_context) - for question in questions_definition.get("questions"): - q = QuestionFactory.create_question_from_json(question) - if not first_question_key: - first_question_key = q.key - elif previous_question and not previous_question.default_next_question_key: - previous_question.set_default_next_question_key(q.key) - questions[q.key] = q - previous_question = q - return questions, first_question_key + try: + for question in questions_definition.get("questions"): + q = QuestionFactory.create_question_from_json(question) + if not first_question_key: + first_question_key = q.key + elif previous_question and not previous_question.default_next_question_key: + previous_question.set_default_next_question_key(q.key) + questions[q.key] = q + previous_question = q + return questions, first_question_key + except (KeyError, ValueError, AttributeError, TypeError) as ex: + raise QuestionsFailedParsingException(f"Failed to parse questions: {str(ex)}") from ex @staticmethod def _parse_questions_definition(file_path, extra_context: Optional[Dict] = None): diff --git a/samcli/lib/cookiecutter/question.py b/samcli/lib/cookiecutter/question.py index 71c30d98da..786836a400 100644 --- a/samcli/lib/cookiecutter/question.py +++ b/samcli/lib/cookiecutter/question.py @@ -1,6 +1,7 @@ """ This module represents the questions to ask to the user to fulfill the cookiecutter context. """ from enum import Enum -from typing import Any, Dict, List, Optional, Type +from typing import Any, Dict, List, Optional, Type, Union + import click @@ -26,8 +27,10 @@ class Question: The text to prompt to the user _required: bool Whether the user must provide an answer for this question or not. - _default_answer: Optional[str] - A default answer that is suggested to the user + _default_answer: Optional[Union[str, Dict]] + A default answer that is suggested to the user, + it can be directly provided (a string) + or resolved from cookiecutter context (a Dict, in the form of {"keyPath": [...,]}) _next_question_map: Optional[Dict[str, str]] A simple branching mechanism, it refers to what is the next question to ask the user if he answered a particular answer to this question. this map is in the form of {answer: next-question-key}. this @@ -48,7 +51,7 @@ def __init__( self, key: str, text: str, - default: Optional[str] = None, + default: Optional[Union[str, Dict]] = None, is_required: Optional[bool] = None, next_question_map: Optional[Dict[str, str]] = None, default_next_question_key: Optional[str] = None, @@ -87,8 +90,21 @@ def next_question_map(self): def default_next_question_key(self): return self._default_next_question_key - def ask(self) -> Any: - return click.prompt(text=self._text, default=self._default_answer) + def ask(self, context: Dict) -> Any: + """ + prompt the user this question + + Parameters + ---------- + context + The cookiecutter context dictionary containing previous questions' answers and default values + + Returns + ------- + The user provided answer. + """ + resolved_default_answer = self._resolve_default_answer(context) + return click.prompt(text=self._text, default=resolved_default_answer) def get_next_question_key(self, answer: Any) -> Optional[str]: # _next_question_map is a Dict[str(answer), str(next question key)] @@ -99,14 +115,83 @@ def get_next_question_key(self, answer: Any) -> Optional[str]: def set_default_next_question_key(self, next_question_key): self._default_next_question_key = next_question_key + def _resolve_key_path(self, key_path: List, context: Dict) -> List[str]: + """ + key_path element is a list of str and Dict. + When the element is a dict, in the form of { "valueOf": question_key }, + it means it refers to the answer to another questions. + _resolve_key_path() will replace such dict with the actual question answer + + Parameters + ---------- + key_path + The key_path list containing str and dict + context + The cookiecutter context containing answers to previous answered questions + Returns + ------- + The key_path list containing only str + """ + resolved_key_path: List[str] = [] + for unresolved_key in key_path: + if isinstance(unresolved_key, str): + resolved_key_path.append(unresolved_key) + elif isinstance(unresolved_key, dict): + if "valueOf" not in unresolved_key: + raise KeyError(f'Missing key "valueOf" in question default keyPath element "{unresolved_key}".') + query_question_key: str = unresolved_key.get("valueOf", "") + if query_question_key not in context: + raise KeyError( + f'Invalid question key "{query_question_key}" referenced ' + f"in default answer of question {self.key}" + ) + resolved_key_path.append(context[query_question_key]) + else: + raise ValueError(f'Invalid value "{unresolved_key}" in key path') + return resolved_key_path + + def _resolve_default_answer(self, context: Dict) -> Optional[Any]: + """ + a question may have a default answer provided directly through the "default_answer" value + or indirectly from cookiecutter context using a key path + + Parameters + ---------- + context + Cookiecutter context used to resolve default values and answered questions' answers. + + Raises + ------ + KeyError + When default value depends on the answer to a non-existent question + ValueError + The default value is malformed + + Returns + ------- + Optional default answer, it might be resolved from cookiecutter context using specified key path. + + """ + if isinstance(self._default_answer, dict): + # load value using key path from cookiecutter + if "keyPath" not in self._default_answer: + raise KeyError(f'Missing key "keyPath" in question default "{self._default_answer}".') + unresolved_key_path = self._default_answer.get("keyPath", []) + if not isinstance(unresolved_key_path, list): + raise ValueError(f'Invalid default answer "{self._default_answer}" for question {self.key}') + + return context.get(str(self._resolve_key_path(unresolved_key_path, context))) + + return self._default_answer + class Info(Question): - def ask(self) -> None: + def ask(self, context: Dict) -> None: return click.echo(message=self._text) class Confirm(Question): - def ask(self) -> bool: + def ask(self, context: Dict) -> bool: return click.confirm(text=self._text) @@ -126,7 +211,8 @@ def __init__( self._options = options super().__init__(key, text, default, is_required, next_question_map, default_next_question_key) - def ask(self) -> str: + def ask(self, context: Dict) -> str: + resolved_default_answer = self._resolve_default_answer(context) click.echo(self._text) for index, option in enumerate(self._options): click.echo(f"\t{index + 1} - {option}") @@ -134,7 +220,7 @@ def ask(self) -> str: choices = list(map(str, options_indexes)) choice = click.prompt( text="Choice", - default=self._default_answer, + default=resolved_default_answer, show_choices=False, type=click.Choice(choices), ) @@ -145,7 +231,6 @@ def _get_options_indexes(self, base: int = 0) -> List[int]: class QuestionFactory: - question_classes: Dict[QuestionKind, Type[Question]] = { QuestionKind.info: Info, QuestionKind.choice: Choice, diff --git a/samcli/lib/logs/event.py b/samcli/lib/logs/event.py deleted file mode 100644 index 0c05232d33..0000000000 --- a/samcli/lib/logs/event.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -Represents CloudWatch Log Event -""" - -import logging - -from samcli.lib.utils.time import timestamp_to_iso - -LOG = logging.getLogger(__name__) - - -class LogEvent: - """ - Data object representing a CloudWatch Log Event - """ - - log_group_name = None - log_stream_name = None - timestamp = None - message = None - - def __init__(self, log_group_name, event_dict): - """ - Creates instance of the class - - Parameters - ---------- - log_group_name : str - The log group name - event_dict : dict - Dict of log event data returned by CloudWatch Logs API. - https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_FilteredLogEvent.html - """ - - self.log_group_name = log_group_name - - if not event_dict: - # If event is empty, just use default values for properties. We don't raise an error here because - # this class is a data wrapper to the `events_dict`. It doesn't try to be smart. - return - - self.log_stream_name = event_dict.get("logStreamName") - self.message = event_dict.get("message", "") - - self.timestamp_millis = event_dict.get("timestamp") - - # Convert the timestamp from epoch to readable ISO timestamp, easier for formatting. - if self.timestamp_millis: - self.timestamp = timestamp_to_iso(int(self.timestamp_millis)) - - def __eq__(self, other): - - if not isinstance(other, LogEvent): - return False - - return ( - self.log_group_name == other.log_group_name - and self.log_stream_name == other.log_stream_name - and self.timestamp == other.timestamp - and self.message == other.message - ) - - def __repr__(self): # pragma: no cover - # Used to print pretty diff when testing - return str( - { - "log_group_name": self.log_group_name, - "log_stream_name": self.log_stream_name, - "message": self.message, - "timestamp": self.timestamp, - } - ) diff --git a/samcli/lib/logs/fetcher.py b/samcli/lib/logs/fetcher.py deleted file mode 100644 index c6709fe28e..0000000000 --- a/samcli/lib/logs/fetcher.py +++ /dev/null @@ -1,145 +0,0 @@ -""" -Filters & fetches logs from CloudWatch Logs -""" - -import time -import logging - -from samcli.lib.utils.time import to_timestamp, to_datetime -from .event import LogEvent - - -LOG = logging.getLogger(__name__) - - -class LogsFetcher: - """ - Fetch logs from a CloudWatch Logs group with the ability to scope to a particular time, filter by - a pattern, and in the future possibly multiplex from from multiple streams together. - """ - - def __init__(self, cw_client=None): - """ - Initialize the fetcher - - Parameters - ---------- - cw_client - CloudWatch Logs Client from AWS SDK - """ - self.cw_client = cw_client - - def fetch(self, log_group_name, start=None, end=None, filter_pattern=None): - """ - Fetch logs from all streams under the given CloudWatch Log Group and yields in the output. Optionally, caller - can filter the logs using a pattern or a start/end time. - - Parameters - ---------- - log_group_name : string - Name of CloudWatch Logs Group to query. - start : datetime.datetime - Optional start time for logs. - end : datetime.datetime - Optional end time for logs. - filter_pattern : str - Expression to filter the logs by. This is passed directly to CloudWatch, so any expression supported by - CloudWatch Logs API is supported here. - - Yields - ------ - - samcli.lib.logs.event.LogEvent - Object containing the information from each log event returned by CloudWatch Logs - """ - - kwargs = {"logGroupName": log_group_name, "interleaved": True} - - if start: - kwargs["startTime"] = to_timestamp(start) - - if end: - kwargs["endTime"] = to_timestamp(end) - - if filter_pattern: - kwargs["filterPattern"] = filter_pattern - - while True: - LOG.debug("Fetching logs from CloudWatch with parameters %s", kwargs) - result = self.cw_client.filter_log_events(**kwargs) - - # Several events will be returned. Yield one at a time - for event in result.get("events", []): - yield LogEvent(log_group_name, event) - - # Keep iterating until there are no more logs left to query. - next_token = result.get("nextToken", None) - kwargs["nextToken"] = next_token - if not next_token: - break - - def tail(self, log_group_name, start=None, filter_pattern=None, max_retries=1000, poll_interval=0.3): - """ - ** This is a long blocking call ** - - Fetches logs from CloudWatch logs similar to the ``fetch`` method, but instead of stopping after all logs have - been fetched, this method continues to poll CloudWatch for new logs. So this essentially simulates the - ``tail -f`` bash command. - - If no logs are available, then it keep polling for ``timeout`` number of seconds before exiting. This method - polls CloudWatch at around ~3 Calls Per Second to stay below the 5TPS limit. - - Parameters - ---------- - log_group_name : str - Name of CloudWatch Logs Group to query. - start : datetime.datetime - Optional start time for logs. Defaults to '5m ago' - filter_pattern : str - Expression to filter the logs by. This is passed directly to CloudWatch, so any expression supported by - CloudWatch Logs API is supported here. - max_retries : int - When logs are not available, this value determines the number of times to retry fetching logs before giving - up. This counter is reset every time new logs are available. - poll_interval : float - Number of fractional seconds wait before polling again. Defaults to 300milliseconds. - If no new logs available, this method will stop polling after ``max_retries * poll_interval`` seconds - - Yields - ------ - samcli.lib.logs.event.LogEvent - Object containing the information from each log event returned by CloudWatch Logs - """ - - # On every poll, startTime of the API call is the timestamp of last record observed - latest_event_time = 0 # Start of epoch - if start: - latest_event_time = to_timestamp(start) - - counter = max_retries - while counter > 0: - - LOG.debug("Tailing logs from %s starting at %s", log_group_name, str(latest_event_time)) - - has_data = False - counter -= 1 - events_itr = self.fetch(log_group_name, start=to_datetime(latest_event_time), filter_pattern=filter_pattern) - - # Find the timestamp of the most recent log event. - for event in events_itr: - has_data = True - - if event.timestamp_millis > latest_event_time: - latest_event_time = event.timestamp_millis - - # Yield the event back so it behaves similar to ``fetch`` - yield event - - # This poll fetched logs. Reset the retry counter and set the timestamp for next poll - if has_data: - counter = max_retries - latest_event_time += 1 # one extra millisecond to fetch next log event - - # We already fetched logs once. Sleep for some time before querying again. - # This also helps us scoot under the TPS limit for CloudWatch API call. - time.sleep(poll_interval) diff --git a/samcli/lib/logs/formatter.py b/samcli/lib/logs/formatter.py deleted file mode 100644 index 6e21619f36..0000000000 --- a/samcli/lib/logs/formatter.py +++ /dev/null @@ -1,181 +0,0 @@ -""" -Format log events produced by CloudWatch Logs -""" - -import json -import functools - - -class LogsFormatter: - """ - Formats log messages returned by CloudWatch Logs service. - """ - - def __init__(self, colored, formatter_chain=None): - # the docstring contains an example function which contains another docstring, - # pylint is confused so disable it for this method. - # pylint: disable=missing-param-doc,differing-param-doc,differing-type-doc,redundant-returns-doc - """ - - ``formatter_chain`` is a list of methods that can format an event. Each method must take an - ``samcli.lib.logs.event.LogEvent`` object as input and return the same object back. This allows us to easily - chain formatter methods one after another. This class will apply all the formatters from this list on each - log event. - - After running the formatter chain, this class will convert the event object to string by appending - the timestamp to message. To skip all custom formatting and simply convert event to string, you can leave - the ``formatter_chain`` list empty. - - Formatter Method - ================ - Formatter method needs to accept two arguments at a minimum: ``event`` and ``colored``. It can make - modifications to the contents of ``event`` and must return the same object. - - Example: - .. code-block:: python - - def my_formatter(event, colored): - \""" - Example of a custom log formatter - - Parameters - ---------- - event : samcli.lib.logs.event.LogEvent - Log event to format - - colored : samcli.lib.utils.colors.Colored - Instance of ``Colored`` object to add colors to the message - - Returns - ------- - samcli.lib.logs.event.LogEvent - Object representing the log event that has been formatted. It could be the same event object passed - via input. - \""" - - # Do your formatting - - return event - - Parameters - ---------- - colored : samcli.lib.utils.colors.Colored - Used to add color to the string when pretty printing. Colors are useful only when pretty printing on a - Terminal. To turn off coloring, set the appropriate property when instantiating the - ``samcli.lib.utils.colors.Colored`` class. - - formatter_chain : List[str] - list of formatter methods - """ - - self.colored = colored - self.formatter_chain = formatter_chain or [] - - # At end of the chain, pretty print the Event object as string. - self.formatter_chain.append(LogsFormatter._pretty_print_event) - - def do_format(self, event_iterable): - """ - Formats the given CloudWatch Logs Event dictionary as necessary and returns an iterable that will - return the formatted string. This can be used to parse and format the events based on context - ie. In Lambda Function logs, a formatter may wish to color the "ERROR" keywords red, - or highlight a filter keyword separately etc. - - This method takes an iterable as input and returns an iterable. It does not immediately format the event. - Instead, it sets up the formatter chain appropriately and returns the iterable. Actual formatting happens - only when the iterable is used by the caller. - - Parameters - ---------- - event_iterable : iterable of samcli.lib.logs.event.LogEvent - Iterable that returns an object containing information about each log event. - - Returns - ------- - iterable of string - Iterable that returns a formatted event as a string. - """ - - for operation in self.formatter_chain: - - # Make sure the operation has access to certain basic objects like colored - partial_op = functools.partial(operation, colored=self.colored) - event_iterable = map(partial_op, event_iterable) - - return event_iterable - - @staticmethod - def _pretty_print_event(event, colored): - """ - Basic formatter to convert an event object to string - """ - event.timestamp = colored.yellow(event.timestamp) - event.log_stream_name = colored.cyan(event.log_stream_name) - - return " ".join([event.log_stream_name, event.timestamp, event.message]) - - -class LambdaLogMsgFormatters: - """ - Format logs printed by AWS Lambda functions. - - This class is a collection of static methods that can be used within a formatter chain. - """ - - @staticmethod - def colorize_errors(event, colored): - """ - Highlights some commonly known Lambda error cases in red: - - Nodejs process crashes - - Lambda function timeouts - """ - - nodejs_crash_msg = "Process exited before completing request" - timeout_msg = "Task timed out" - - if nodejs_crash_msg in event.message or timeout_msg in event.message: - event.message = colored.red(event.message) - - return event - - -class KeywordHighlighter: - """ - Highlight certain keywords in the log line - """ - - def __init__(self, keyword=None): - self.keyword = keyword - - def highlight_keywords(self, event, colored): - """ - Highlight the keyword in the log statement by drawing an underline - """ - if self.keyword: - highlight = colored.underline(self.keyword) - event.message = event.message.replace(self.keyword, highlight) - - return event - - -class JSONMsgFormatter: - """ - Pretty print JSONs within a message - """ - - @staticmethod - def format_json(event, colored): - """ - If the event message is a JSON string, then pretty print the JSON with 2 indents and sort the keys. This makes - it very easy to visually parse and search JSON data - """ - - try: - if event.message.startswith("{"): - msg_dict = json.loads(event.message) - event.message = json.dumps(msg_dict, indent=2) - except Exception: - # Skip if the event message was not JSON - pass - - return event diff --git a/samcli/lib/logs/__init__.py b/samcli/lib/observability/__init__.py similarity index 100% rename from samcli/lib/logs/__init__.py rename to samcli/lib/observability/__init__.py diff --git a/tests/unit/lib/logs/__init__.py b/samcli/lib/observability/cw_logs/__init__.py similarity index 100% rename from tests/unit/lib/logs/__init__.py rename to samcli/lib/observability/cw_logs/__init__.py diff --git a/samcli/lib/observability/cw_logs/cw_log_event.py b/samcli/lib/observability/cw_logs/cw_log_event.py new file mode 100644 index 0000000000..49b9a4e889 --- /dev/null +++ b/samcli/lib/observability/cw_logs/cw_log_event.py @@ -0,0 +1,40 @@ +""" +CloudWatch log event type +""" +from typing import Optional + +from samcli.lib.observability.observability_info_puller import ObservabilityEvent + + +class CWLogEvent(ObservabilityEvent[dict]): + """ + An event class which represents a Cloud Watch log + """ + + def __init__(self, cw_log_group: str, event: dict, resource_name: Optional[str] = None): + """ + Parameters + ---------- + cw_log_group : str + Name of the CloudWatch log group + event : dict + Event dictionary of the CloudWatch log event + resource_name : Optional[str] + Resource name that is related to this CloudWatch log event + """ + self.cw_log_group = cw_log_group + self.message: str = event.get("message", "") + self.log_stream_name: str = event.get("logStreamName", "") + timestamp: int = event.get("timestamp", 0) + super().__init__(event, timestamp, resource_name) + + def __eq__(self, other): + if not isinstance(other, CWLogEvent): + return False + + return ( + self.cw_log_group == other.cw_log_group + and self.log_stream_name == other.log_stream_name + and self.timestamp == other.timestamp + and self.message == other.message + ) diff --git a/samcli/lib/observability/cw_logs/cw_log_formatters.py b/samcli/lib/observability/cw_logs/cw_log_formatters.py new file mode 100644 index 0000000000..f0d35a18a6 --- /dev/null +++ b/samcli/lib/observability/cw_logs/cw_log_formatters.py @@ -0,0 +1,94 @@ +""" +Contains all mappers (formatters) for CloudWatch logs +""" +import json +import logging +from json import JSONDecodeError + +from samcli.lib.observability.cw_logs.cw_log_event import CWLogEvent +from samcli.lib.observability.observability_info_puller import ObservabilityEventMapper +from samcli.lib.utils.colors import Colored +from samcli.lib.utils.time import timestamp_to_iso + +LOG = logging.getLogger(__name__) + + +class CWKeywordHighlighterFormatter(ObservabilityEventMapper[CWLogEvent]): + """ + Mapper implementation which will highlight given keywords in CloudWatch logs + """ + + def __init__(self, colored: Colored, keyword=None): + """ + Parameters + ---------- + colored : Colored + Colored class that will be used to highlight the keywords in log event + keyword : str + Keyword that will be highlighted + """ + self._keyword = keyword + self._colored = colored + + def map(self, event: CWLogEvent) -> CWLogEvent: + if self._keyword: + highlight = self._colored.underline(self._keyword) + event.message = event.message.replace(self._keyword, highlight) + + return event + + +class CWColorizeErrorsFormatter(ObservabilityEventMapper[CWLogEvent]): + """ + Mapper implementation which will colorize some pre-defined error messages + """ + + # couple of pre-defined error messages for lambda functions which will be colorized when getting the logs + NODEJS_CRASH_MESSAGE = "Process exited before completing request" + TIMEOUT_MSG = "Task timed out" + + def __init__(self, colored: Colored): + self._colored = colored + + def map(self, event: CWLogEvent) -> CWLogEvent: + if ( + CWColorizeErrorsFormatter.NODEJS_CRASH_MESSAGE in event.message + or CWColorizeErrorsFormatter.TIMEOUT_MSG in event.message + ): + event.message = self._colored.red(event.message) + return event + + +class CWJsonFormatter(ObservabilityEventMapper[CWLogEvent]): + """ + Mapper implementation which will auto indent the input if the input is a JSON object + """ + + # pylint: disable=R0201 + # Pylint recommends converting this method to a static one but we want it to stay as it is + # since formatters/mappers are combined in an array of ObservabilityEventMapper class + def map(self, event: CWLogEvent) -> CWLogEvent: + try: + if event.message.startswith("{"): + msg_dict = json.loads(event.message) + event.message = json.dumps(msg_dict, indent=2) + except JSONDecodeError as err: + LOG.debug("Can't decode string (%s) as JSON. Error (%s)", event.message, err) + + return event + + +class CWPrettyPrintFormatter(ObservabilityEventMapper[CWLogEvent]): + """ + Mapper implementation which will format given CloudWatch log event into string with coloring + log stream name and timestamp + """ + + def __init__(self, colored: Colored): + self._colored = colored + + def map(self, event: CWLogEvent) -> CWLogEvent: + timestamp = self._colored.yellow(timestamp_to_iso(int(event.timestamp))) + log_stream_name = self._colored.cyan(event.log_stream_name) + event.message = f"{log_stream_name} {timestamp} {event.message}" + return event diff --git a/samcli/lib/logs/provider.py b/samcli/lib/observability/cw_logs/cw_log_group_provider.py similarity index 100% rename from samcli/lib/logs/provider.py rename to samcli/lib/observability/cw_logs/cw_log_group_provider.py diff --git a/samcli/lib/observability/cw_logs/cw_log_puller.py b/samcli/lib/observability/cw_logs/cw_log_puller.py new file mode 100644 index 0000000000..e7d8b7fb10 --- /dev/null +++ b/samcli/lib/observability/cw_logs/cw_log_puller.py @@ -0,0 +1,111 @@ +""" +CloudWatch log event puller implementation +""" +import logging +import time +from datetime import datetime +from typing import Optional, Any + +from samcli.lib.observability.cw_logs.cw_log_event import CWLogEvent +from samcli.lib.observability.observability_info_puller import ObservabilityPuller, ObservabilityEventConsumer +from samcli.lib.utils.time import to_timestamp, to_datetime + +LOG = logging.getLogger(__name__) + + +class CWLogPuller(ObservabilityPuller): + """ + Puller implementation that can pull events from CloudWatch log group + """ + + def __init__( + self, + logs_client: Any, + consumer: ObservabilityEventConsumer, + cw_log_group: str, + resource_name: Optional[str] = None, + max_retries: int = 1000, + poll_interval: int = 1, + ): + """ + Parameters + ---------- + logs_client: Any + boto3 logs client instance + consumer : ObservabilityEventConsumer + Consumer instance that will process pulled events + cw_log_group : str + CloudWatch log group name + resource_name : Optional[str] + Optional parameter to assign a resource name for each event. + max_retries: int + Optional parameter to set maximum retries when tailing. Default value is 1000 + poll_interval: int + Optional parameter to define sleep interval between pulling new log events when tailing. Default value is 1 + """ + self.logs_client = logs_client + self.consumer = consumer + self.cw_log_group = cw_log_group + self.resource_name = resource_name + self._max_retries = max_retries + self._poll_interval = poll_interval + self.latest_event_time = 0 + self.had_data = False + + def tail(self, start_time: Optional[datetime] = None, filter_pattern: Optional[str] = None): + if start_time: + self.latest_event_time = to_timestamp(start_time) + + counter = self._max_retries + while counter > 0: + LOG.debug("Tailing logs from %s starting at %s", self.cw_log_group, str(self.latest_event_time)) + + counter -= 1 + self.load_time_period(to_datetime(self.latest_event_time), filter_pattern=filter_pattern) + + # This poll fetched logs. Reset the retry counter and set the timestamp for next poll + if self.had_data: + counter = self._max_retries + self.latest_event_time += 1 # one extra millisecond to fetch next log event + self.had_data = False + + # We already fetched logs once. Sleep for some time before querying again. + # This also helps us scoot under the TPS limit for CloudWatch API call. + time.sleep(self._poll_interval) + + def load_time_period( + self, + start_time: Optional[datetime] = None, + end_time: Optional[datetime] = None, + filter_pattern: Optional[str] = None, + ): + kwargs = {"logGroupName": self.cw_log_group, "interleaved": True} + + if start_time: + kwargs["startTime"] = to_timestamp(start_time) + + if end_time: + kwargs["endTime"] = to_timestamp(end_time) + + if filter_pattern: + kwargs["filterPattern"] = filter_pattern + + while True: + LOG.debug("Fetching logs from CloudWatch with parameters %s", kwargs) + result = self.logs_client.filter_log_events(**kwargs) + + # Several events will be returned. Yield one at a time + for event in result.get("events", []): + self.had_data = True + cw_event = CWLogEvent(self.cw_log_group, event, self.resource_name) + + if cw_event.timestamp > self.latest_event_time: + self.latest_event_time = cw_event.timestamp + + self.consumer.consume(cw_event) + + # Keep iterating until there are no more logs left to query. + next_token = result.get("nextToken", None) + kwargs["nextToken"] = next_token + if not next_token: + break diff --git a/samcli/lib/observability/observability_info_puller.py b/samcli/lib/observability/observability_info_puller.py new file mode 100644 index 0000000000..b6d6f2b906 --- /dev/null +++ b/samcli/lib/observability/observability_info_puller.py @@ -0,0 +1,143 @@ +""" +Interfaces and generic implementations for observability events (like CW logs) +""" +import logging +from abc import ABC, abstractmethod +from datetime import datetime +from typing import List, Optional, Generic, TypeVar, Any + +LOG = logging.getLogger(__name__) + +# Generic type for the internal observability event +InternalEventType = TypeVar("InternalEventType") + + +class ObservabilityEvent(Generic[InternalEventType]): + """ + Generic class that represents observability event + This keeps some common fields for filtering or sorting later on + """ + + def __init__(self, event: InternalEventType, timestamp: int, resource_name: Optional[str] = None): + """ + Parameters + ---------- + event : EventType + Actual event object. This can be any type with generic definition (dict, str etc.) + timestamp : int + Timestamp of the event + resource_name : Optional[str] + Resource name related to this event. This is optional since not all events is connected to a single resource + """ + self.event = event + self.timestamp = timestamp + self.resource_name = resource_name + + +# Generic type for identifying different ObservabilityEvent +ObservabilityEventType = TypeVar("ObservabilityEventType", bound=ObservabilityEvent) + + +class ObservabilityPuller(ABC): + """ + Interface definition for pulling observability information. + """ + + @abstractmethod + def tail(self, start_time: Optional[datetime] = None, filter_pattern: Optional[str] = None): + """ + Parameters + ---------- + start_time : Optional[datetime] + Optional parameter to tail information from earlier time + filter_pattern : Optional[str] + Optional parameter to filter events with given string + """ + + @abstractmethod + def load_time_period( + self, + start_time: Optional[datetime] = None, + end_time: Optional[datetime] = None, + filter_pattern: Optional[str] = None, + ): + """ + Parameters + ---------- + start_time : Optional[datetime] + Optional parameter to load events from certain date time + end_time : Optional[datetime] + Optional parameter to load events until certain date time + filter_pattern : Optional[str] + Optional parameter to filter events with given string + """ + + +# pylint: disable=fixme +# fixme add ABC parent class back once we bump the pylint to a version 2.8.2 or higher +class ObservabilityEventMapper(Generic[ObservabilityEventType]): + """ + Interface definition to map/change any event to another object + This could be used by highlighting certain parts or formatting events before logging into console + """ + + @abstractmethod + def map(self, event: ObservabilityEventType) -> Any: + """ + Parameters + ---------- + event : ObservabilityEventType + Event object that will be mapped/converted to another event or any object + + Returns + ------- + Any + Return converted type + """ + + +class ObservabilityEventConsumer(Generic[ObservabilityEventType]): + """ + Consumer interface, which will consume any event. + An example is to output event into console. + """ + + @abstractmethod + def consume(self, event: ObservabilityEventType): + """ + Parameters + ---------- + event : ObservabilityEvent + Event that will be consumed + """ + + +class ObservabilityEventConsumerDecorator(ObservabilityEventConsumer): + """ + A decorator implementation for consumer, which can have mappers and decorated consumer within. + Rather than the normal implementation, this will process the events through mappers which is been + provided, and then pass them to actual consumer + """ + + def __init__(self, mappers: List[ObservabilityEventMapper], consumer: ObservabilityEventConsumer): + """ + Parameters + ---------- + mappers : List[ObservabilityEventMapper] + List of event mappers which will be used to process events before passing to consumer + consumer : ObservabilityEventConsumer + Actual consumer which will handle the events after they are processed by mappers + """ + super().__init__() + self._mappers = mappers + self._consumer = consumer + + def consume(self, event: ObservabilityEvent): + """ + See Also ObservabilityEventConsumerDecorator and ObservabilityEventConsumer + """ + for mapper in self._mappers: + LOG.debug("Calling mapper (%s) for event (%s)", mapper, event) + event = mapper.map(event) + LOG.debug("Calling consumer (%s) for event (%s)", self._consumer, event) + self._consumer.consume(event) diff --git a/samcli/lib/package/artifact_exporter.py b/samcli/lib/package/artifact_exporter.py index 8ef0652f47..c0f2b94576 100644 --- a/samcli/lib/package/artifact_exporter.py +++ b/samcli/lib/package/artifact_exporter.py @@ -36,7 +36,13 @@ ) from samcli.lib.package.s3_uploader import S3Uploader from samcli.lib.package.uploaders import Uploaders -from samcli.lib.package.utils import is_local_folder, make_abs_path, is_s3_url, is_local_file, mktempfile +from samcli.lib.package.utils import ( + is_local_folder, + make_abs_path, + is_local_file, + mktempfile, + is_s3_url, +) from samcli.lib.utils.packagetype import ZIP from samcli.yamlhelper import yaml_parse, yaml_dump @@ -62,12 +68,7 @@ def do_export(self, resource_id, resource_dict, parent_dir): template_path = resource_dict.get(self.PROPERTY_NAME, None) - if ( - template_path is None - or is_s3_url(template_path) - or template_path.startswith(self.uploader.s3.meta.endpoint_url) - or template_path.startswith("https://s3.amazonaws.com/") - ): + if template_path is None or is_s3_url(template_path): # Nothing to do return diff --git a/samcli/lib/package/packageable_resources.py b/samcli/lib/package/packageable_resources.py index edb99074d8..937b451a28 100644 --- a/samcli/lib/package/packageable_resources.py +++ b/samcli/lib/package/packageable_resources.py @@ -20,7 +20,7 @@ copy_to_temp_dir, upload_local_artifacts, upload_local_image_artifacts, - is_s3_url, + is_s3_protocol_url, is_path_value_valid, ) @@ -466,7 +466,7 @@ def include_transform_export_handler(template_dict, uploader, parent_dir): return template_dict include_location = template_dict.get("Parameters", {}).get("Location", None) - if not include_location or not is_path_value_valid(include_location) or is_s3_url(include_location): + if not include_location or not is_path_value_valid(include_location) or is_s3_protocol_url(include_location): # `include_location` is either empty, or not a string, or an S3 URI return template_dict diff --git a/samcli/lib/package/utils.py b/samcli/lib/package/utils.py index 54bb81ba97..6317c35a48 100644 --- a/samcli/lib/package/utils.py +++ b/samcli/lib/package/utils.py @@ -4,6 +4,7 @@ import logging import os import platform +import re import shutil import tempfile import uuid @@ -22,6 +23,29 @@ LOG = logging.getLogger(__name__) +# https://docs.aws.amazon.com/AmazonS3/latest/dev-retired/UsingBucket.html +_REGION_PATTERN = r"[a-zA-Z0-9-]+" +_DOT_AMAZONAWS_COM_PATTERN = r"\.amazonaws\.com" +_S3_URL_REGEXS = [ + # Path-Style (and ipv6 dualstack) + # - https://s3.Region.amazonaws.com/bucket-name/key name + # - https://s3.amazonaws.com/bucket-name/key name (old, without region) + # - https://s3.dualstack.us-west-2.amazonaws.com/... + re.compile(rf"http(s)?://s3(.dualstack)?(\.{_REGION_PATTERN})?{_DOT_AMAZONAWS_COM_PATTERN}/.+/.+"), + # Virtual Hosted-Style (including two legacies) + # https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html + # - Virtual Hosted-Style: https://bucket-name.s3.Region.amazonaws.com/key name + # - Virtual Hosted-Style (Legacy: a dash between S3 and the Region): https://bucket-name.s3-Region.amazonaws.com/... + # - Virtual Hosted-Style (Legacy Global Endpoint): https://my-bucket.s3.amazonaws.com/... + re.compile(rf"http(s)?://.+\.s3((.|-){_REGION_PATTERN})?{_DOT_AMAZONAWS_COM_PATTERN}/.+"), + # S3 access point: + # - https://AccessPointName-AccountId.s3-accesspoint.region.amazonaws.com + re.compile(rf"http(s)?://.+-\d+\.s3-accesspoint\.{_REGION_PATTERN}{_DOT_AMAZONAWS_COM_PATTERN}/.+/.+"), + # S3 protocol URL: + # - s3://bucket-name/key-name + re.compile(r"s3://.+/.+"), +] + def is_path_value_valid(path): return isinstance(path, str) @@ -33,7 +57,10 @@ def make_abs_path(directory, path): return path -def is_s3_url(url): +def is_s3_protocol_url(url): + """ + Check whether url is a valid path in the form of "s3://..." + """ try: S3Uploader.parse_s3_url(url) return True @@ -41,6 +68,14 @@ def is_s3_url(url): return False +def is_s3_url(url: str) -> bool: + """ + Check whether a URL is a S3 access URL + specified at https://docs.aws.amazon.com/AmazonS3/latest/dev-retired/UsingBucket.html + """ + return any(regex.match(url) for regex in _S3_URL_REGEXS) + + def is_local_folder(path): return is_path_value_valid(path) and os.path.isdir(path) @@ -122,7 +157,7 @@ def upload_local_artifacts( # Build the root directory and upload to S3 local_path = parent_dir - if is_s3_url(local_path): + if is_s3_protocol_url(local_path): # A valid CloudFormation template will specify artifacts as S3 URLs. # This check is supporting the case where your resource does not # refer to local artifacts diff --git a/samcli/lib/providers/sam_stack_provider.py b/samcli/lib/providers/sam_stack_provider.py index 25053a1e52..01ef176916 100644 --- a/samcli/lib/providers/sam_stack_provider.py +++ b/samcli/lib/providers/sam_stack_provider.py @@ -163,8 +163,14 @@ def _convert_cfn_stack_resource( resource_properties: Dict, global_parameter_overrides: Optional[Dict] = None, ) -> Optional[Stack]: - template_url = resource_properties.get("TemplateURL", "") + template_url = resource_properties.get("TemplateURL") + if isinstance(template_url, dict): + # This happens when TemplateURL has unresolvable intrinsic functions + # and it usually happens in CDK generated template files (#2832). + raise RemoteStackLocationNotSupported() + + template_url = cast(str, template_url) if SamLocalStackProvider.is_remote_url(template_url): raise RemoteStackLocationNotSupported() if template_url.startswith("file://"): diff --git a/samcli/lib/utils/git_repo.py b/samcli/lib/utils/git_repo.py new file mode 100644 index 0000000000..33e4597726 --- /dev/null +++ b/samcli/lib/utils/git_repo.py @@ -0,0 +1,160 @@ +""" Manage Git repo """ + +import logging +import os +import platform +import shutil +import subprocess +from pathlib import Path +from typing import Optional + +from samcli.lib.utils import osutils +from samcli.lib.utils.osutils import rmtree_callback + +LOG = logging.getLogger(__name__) + + +class CloneRepoException(Exception): + """ + Exception class when clone repo fails. + """ + + +class CloneRepoUnstableStateException(CloneRepoException): + """ + Exception class when clone repo enters an unstable state. + """ + + +class GitRepo: + """ + Class for managing a Git repo, currently it has a clone functionality only + + Attributes + ---------- + url: str + The URL of this Git repository, example "https://github.com/aws/aws-sam-cli" + local_path: Path + The path of the last local clone of this Git repository. Can be used in conjunction with clone_attempted + to avoid unnecessary multiple cloning of the repository. + clone_attempted: bool + whether an attempt to clone this Git repository took place or not. Can be used in conjunction with local_path + to avoid unnecessary multiple cloning of the repository + + Methods + ------- + clone(self, clone_dir: Path, clone_name, replace_existing=False) -> Path: + creates a local clone of this Git repository. (more details in the method documentation). + """ + + def __init__(self, url: str) -> None: + self.url: str = url + self.local_path: Optional[Path] = None + self.clone_attempted: bool = False + + @staticmethod + def _ensure_clone_directory_exists(clone_dir: Path) -> None: + try: + clone_dir.mkdir(mode=0o700, parents=True, exist_ok=True) + except OSError as ex: + LOG.warning("WARN: Unable to create clone directory.", exc_info=ex) + raise + + @staticmethod + def _git_executable() -> str: + if platform.system().lower() == "windows": + executables = ["git", "git.cmd", "git.exe", "git.bat"] + else: + executables = ["git"] + + for executable in executables: + try: + subprocess.Popen([executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # No exception. Let's pick this + return executable + except OSError as ex: + LOG.debug("Unable to find executable %s", executable, exc_info=ex) + + raise OSError("Cannot find git, was looking at executables: {}".format(executables)) + + def clone(self, clone_dir: Path, clone_name: str, replace_existing: bool = False) -> Path: + """ + creates a local clone of this Git repository. + This method is different from the standard Git clone in the following: + 1. It accepts the path to clone into as a clone_dir (the parent directory to clone in) and a clone_name (The + name of the local folder) instead of accepting the full path (the join of both) in one parameter + 2. It removes the "*.git" files/directories so the clone is not a GitRepo any more + 3. It has the option to replace the local folder(destination) if already exists + + Parameters + ---------- + clone_dir: Path + The directory to create the local clone inside + clone_name: str + The dirname of the local clone + replace_existing: bool + Whether to replace the current local clone directory if already exists or not + + Returns + ------- + The path of the created local clone + + Raises + ------ + OSError: + when file management errors like unable to mkdir, copytree, rmtree ...etc + CloneRepoException: + General errors like for example; if an error occurred while running `git clone` + or if the local_clone already exists and replace_existing is not set + CloneRepoUnstableStateException: + when reaching unstable state, for example with replace_existing flag set, unstable state can happen + if removed the current local clone but failed to copy the new one from the temp location to the destination + """ + + GitRepo._ensure_clone_directory_exists(clone_dir=clone_dir) + # clone to temp then move to the destination(repo_local_path) + with osutils.mkdir_temp(ignore_errors=True) as tempdir: + try: + temp_path = os.path.normpath(os.path.join(tempdir, clone_name)) + git_executable: str = GitRepo._git_executable() + LOG.info("\nCloning from %s", self.url) + subprocess.check_output( + [git_executable, "clone", self.url, clone_name], + cwd=tempdir, + stderr=subprocess.STDOUT, + ) + self.local_path = self._persist_local_repo(temp_path, clone_dir, clone_name, replace_existing) + return self.local_path + except OSError as ex: + LOG.warning("WARN: Could not clone repo %s", self.url, exc_info=ex) + raise + except subprocess.CalledProcessError as clone_error: + output = clone_error.output.decode("utf-8") + if "not found" in output.lower(): + LOG.warning("WARN: Could not clone repo %s", self.url, exc_info=clone_error) + raise CloneRepoException from clone_error + finally: + self.clone_attempted = True + + @staticmethod + def _persist_local_repo(temp_path: str, dest_dir: Path, dest_name: str, replace_existing: bool) -> Path: + dest_path = os.path.normpath(dest_dir.joinpath(dest_name)) + try: + if Path(dest_path).exists(): + if not replace_existing: + raise CloneRepoException(f"Can not clone to {dest_path}, directory already exist") + LOG.debug("Removing old repo at %s", dest_path) + shutil.rmtree(dest_path, onerror=rmtree_callback) + + LOG.debug("Copying from %s to %s", temp_path, dest_path) + # Todo consider not removing the .git files/directories + shutil.copytree(temp_path, dest_path, ignore=shutil.ignore_patterns("*.git")) + return Path(dest_path) + except (OSError, shutil.Error) as ex: + # UNSTABLE STATE + # it's difficult to see how this scenario could happen except weird permissions, user will need to debug + raise CloneRepoUnstableStateException( + "Unstable state when updating repo. " + f"Check that you have permissions to create/delete files in {dest_dir} directory " + "or file an issue at https://github.com/aws/aws-sam-cli/issues" + ) from ex diff --git a/samcli/lib/utils/hash.py b/samcli/lib/utils/hash.py index ee080faf51..a9cbae1885 100644 --- a/samcli/lib/utils/hash.py +++ b/samcli/lib/utils/hash.py @@ -3,6 +3,7 @@ """ import os import hashlib +from typing import List, Optional BLOCK_SIZE = 4096 @@ -37,25 +38,32 @@ def file_checksum(file_name: str) -> str: return md5.hexdigest() -def dir_checksum(directory: str, followlinks: bool = True) -> str: +def dir_checksum(directory: str, followlinks: bool = True, ignore_list: Optional[List[str]] = None) -> str: """ Parameters ---------- directory : A directory with an absolute path followlinks: Follow symbolic links through the given directory + ignore_list: The list of file/directory names to ignore in checksum Returns ------- md5 checksum of the directory. """ + ignore_set = set(ignore_list or []) md5_dir = hashlib.md5() files = list() # Walk through given directory and find all directories and files. - for dirpath, _, filenames in os.walk(directory, followlinks=followlinks): + for dirpath, dirnames, filenames in os.walk(directory, followlinks=followlinks): + # > When topdown is True, the caller can modify the dirnames list in-place + # > (perhaps using del or slice assignment) and walk() will only recurse + # > into the subdirectories whose names remain in dirnames + # > https://docs.python.org/library/os.html#os.walk + dirnames[:] = [dirname for dirname in dirnames if dirname not in ignore_set] # Go through every file in the directory and sub-directory. - for filepath in [os.path.join(dirpath, filename) for filename in filenames]: + for filepath in [os.path.join(dirpath, filename) for filename in filenames if filename not in ignore_set]: # Look at filename and contents. # Encode file's checksum to be utf-8 and bytes. files.append(filepath) diff --git a/samcli/local/lambdafn/runtime.py b/samcli/local/lambdafn/runtime.py index 495174fe75..163f9670b9 100644 --- a/samcli/local/lambdafn/runtime.py +++ b/samcli/local/lambdafn/runtime.py @@ -1,20 +1,21 @@ """ Classes representing a local Lambda runtime """ - +import copy import os import shutil import tempfile import signal import logging import threading -from typing import Optional +from typing import Optional, Union, Dict from samcli.local.docker.lambda_container import LambdaContainer from samcli.lib.utils.file_observer import LambdaFunctionObserver from samcli.lib.utils.packagetype import ZIP from samcli.lib.telemetry.metric import capture_parameter from .zip import unzip +from ...lib.providers.provider import LayerVersion from ...lib.utils.stream_writer import StreamWriter LOG = logging.getLogger(__name__) @@ -68,6 +69,7 @@ def create(self, function_config, debug_context=None, container_host=None, conta env_vars = function_config.env_vars.resolve() code_dir = self._get_code_dir(function_config.code_abs_path) + layers = [self._unarchived_layer(layer) for layer in function_config.layers] container = LambdaContainer( function_config.runtime, function_config.imageuri, @@ -75,7 +77,7 @@ def create(self, function_config, debug_context=None, container_host=None, conta function_config.packagetype, function_config.imageconfig, code_dir, - function_config.layers, + layers, self._image_builder, memory_mb=function_config.memory, env_vars=env_vars, @@ -250,9 +252,9 @@ def signal_handler(sig, frame): timer.start() return timer - def _get_code_dir(self, code_path): + def _get_code_dir(self, code_path: str) -> str: """ - Method to get a path to a directory where the Lambda function code is available. This directory will + Method to get a path to a directory where the function/layer code is available. This directory will be mounted directly inside the Docker container. This method handles a few different cases for ``code_path``: @@ -274,13 +276,34 @@ def _get_code_dir(self, code_path): """ if code_path and os.path.isfile(code_path) and code_path.endswith(self.SUPPORTED_ARCHIVE_EXTENSIONS): - decompressed_dir = _unzip_file(code_path) + decompressed_dir: str = _unzip_file(code_path) self._temp_uncompressed_paths_to_be_cleaned += [decompressed_dir] return decompressed_dir LOG.debug("Code %s is not a zip/jar file", code_path) return code_path + def _unarchived_layer(self, layer: Union[str, Dict, LayerVersion]) -> Union[str, Dict, LayerVersion]: + """ + If the layer's content uri points to a supported local archive file, use self._get_code_dir() to + un-archive it and so that it can be mounted directly inside the Docker container. + Parameters + ---------- + layer + a str, dict or a LayerVersion object representing a layer + + Returns + ------- + as it is (if no archived file is identified) + or a LayerVersion with ContentUri pointing to an unarchived directory + """ + if isinstance(layer, LayerVersion) and isinstance(layer.codeuri, str): + unarchived_layer = copy.deepcopy(layer) + unarchived_layer.codeuri = self._get_code_dir(layer.codeuri) + return unarchived_layer if unarchived_layer.codeuri != layer.codeuri else layer + + return layer + def _clean_decompressed_paths(self): """ Clean the temporary decompressed code dirs diff --git a/tests/functional/commands/validate/lib/models/all_policy_templates.yaml b/tests/functional/commands/validate/lib/models/all_policy_templates.yaml index 5216a65f2c..c46cfaecb9 100644 --- a/tests/functional/commands/validate/lib/models/all_policy_templates.yaml +++ b/tests/functional/commands/validate/lib/models/all_policy_templates.yaml @@ -168,3 +168,9 @@ Resources: - EventBridgePutEventsPolicy: EventBusName: name + + - AcmGetCertificatePolicy: + CertificateArn: arn + + - Route53ChangeResourceRecordSetsPolicy: + HostedZoneId: test diff --git a/tests/functional/commands/validate/lib/models/api_with_swagger_authorizer_none.yaml b/tests/functional/commands/validate/lib/models/api_with_swagger_authorizer_none.yaml new file mode 100644 index 0000000000..eb0ae32bea --- /dev/null +++ b/tests/functional/commands/validate/lib/models/api_with_swagger_authorizer_none.yaml @@ -0,0 +1,117 @@ +Resources: + MyApiWithCognitoAuth: + Type: "AWS::Serverless::Api" + Properties: + StageName: Prod + Auth: + Authorizers: + MyCognitoAuth: + UserPoolArn: !GetAtt MyUserPool.Arn + DefaultAuthorizer: MyCognitoAuth + + MyApiWithLambdaTokenAuth: + Type: "AWS::Serverless::Api" + Properties: + StageName: Prod + Auth: + Authorizers: + MyLambdaTokenAuth: + FunctionArn: !GetAtt MyAuthFn.Arn + DefaultAuthorizer: MyLambdaTokenAuth + + MyApiWithLambdaRequestAuth: + Type: "AWS::Serverless::Api" + Properties: + StageName: Prod + DefinitionBody: + swagger: 2.0 + info: + version: '1.0' + title: !Ref AWS::StackName + schemes: + - https + paths: + "/lambda-request": + get: + x-amazon-apigateway-integration: + httpMethod: POST + type: aws_proxy + uri: !Sub arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${MyFn.Arn}/invocations + passthroughBehavior: when_no_match + responses: {} + Auth: + Authorizers: + MyLambdaRequestAuth: + FunctionPayloadType: REQUEST + FunctionArn: !GetAtt MyAuthFn.Arn + Identity: + Headers: + - Authorization1 + DefaultAuthorizer: MyLambdaRequestAuth + + MyAuthFn: + Type: AWS::Serverless::Function + Properties: + InlineCode: | + exports.handler = async (event) => { + return { + statusCode: 200, + body: JSON.stringify(event), + headers: {} + } + } + Handler: index.handler + Runtime: nodejs8.10 + + MyFn: + Type: AWS::Serverless::Function + Properties: + InlineCode: | + exports.handler = async (event) => { + return { + statusCode: 200, + body: JSON.stringify(event), + headers: {} + } + } + Handler: index.handler + Runtime: nodejs8.10 + Events: + Cognito: + Type: Api + Properties: + RestApiId: !Ref MyApiWithCognitoAuth + Method: get + Auth: + Authorizer: NONE + Path: /cognito + LambdaToken: + Type: Api + Properties: + RestApiId: !Ref MyApiWithLambdaTokenAuth + Method: get + Auth: + Authorizer: NONE + Path: /lambda-token + LambdaRequest: + Type: Api + Properties: + RestApiId: !Ref MyApiWithLambdaRequestAuth + Auth: + Authorizer: NONE + Method: get + Path: /lambda-request + + MyUserPool: + Type: AWS::Cognito::UserPool + Properties: + UserPoolName: UserPoolName + Policies: + PasswordPolicy: + MinimumLength: 8 + UsernameAttributes: + - email + Schema: + - AttributeDataType: String + Name: email + Required: false \ No newline at end of file diff --git a/tests/functional/commands/validate/lib/models/api_with_usageplans_shared_attributes_three.yaml b/tests/functional/commands/validate/lib/models/api_with_usageplans_shared_attributes_three.yaml new file mode 100644 index 0000000000..aed811ca0a --- /dev/null +++ b/tests/functional/commands/validate/lib/models/api_with_usageplans_shared_attributes_three.yaml @@ -0,0 +1,102 @@ +Globals: + Api: + Auth: + ApiKeyRequired: true + UsagePlan: + CreateUsagePlan: SHARED + +Conditions: + C1: + Fn::Equals: + - test + - test + C2: + Fn::Equals: + - test + - test + +Resources: + MyApiOne: + Type: AWS::Serverless::Api + Condition: C1 + UpdateReplacePolicy: Delete + Properties: + StageName: Prod + + MyApiTwo: + Type: AWS::Serverless::Api + Condition: C2 + UpdateReplacePolicy: Snapshot + Properties: + StageName: Prod + + MyApiThree: + Type: AWS::Serverless::Api + Properties: + StageName: Prod + + MyFunctionOne: + Type: AWS::Serverless::Function + Properties: + Handler: index.handler + Runtime: nodejs12.x + InlineCode: | + exports.handler = async (event) => { + return { + statusCode: 200, + body: JSON.stringify(event), + headers: {} + } + } + Events: + ApiKey: + Type: Api + Properties: + RestApiId: + Ref: MyApiOne + Method: get + Path: /path/one + + MyFunctionTwo: + Type: AWS::Serverless::Function + Properties: + Handler: index.handler + Runtime: nodejs12.x + InlineCode: | + exports.handler = async (event) => { + return { + statusCode: 200, + body: JSON.stringify(event), + headers: {} + } + } + Events: + ApiKey: + Type: Api + Properties: + RestApiId: + Ref: MyApiTwo + Method: get + Path: /path/two + + MyFunctionThree: + Type: AWS::Serverless::Function + Properties: + Handler: index.handler + Runtime: nodejs12.x + InlineCode: | + exports.handler = async (event) => { + return { + statusCode: 200, + body: JSON.stringify(event), + headers: {} + } + } + Events: + ApiKey: + Type: Api + Properties: + RestApiId: + Ref: MyApiThree + Method: get + Path: /path/three \ No newline at end of file diff --git a/tests/functional/commands/validate/lib/models/api_with_usageplans_shared_attributes_two.yaml b/tests/functional/commands/validate/lib/models/api_with_usageplans_shared_attributes_two.yaml new file mode 100644 index 0000000000..36c5bab657 --- /dev/null +++ b/tests/functional/commands/validate/lib/models/api_with_usageplans_shared_attributes_two.yaml @@ -0,0 +1,75 @@ +Globals: + Api: + Auth: + ApiKeyRequired: true + UsagePlan: + CreateUsagePlan: SHARED + +Conditions: + C1: + Fn::Equals: + - test + - test + C2: + Fn::Equals: + - test + - test + +Resources: + MyApiOne: + Type: AWS::Serverless::Api + DeletionPolicy: Delete + Condition: C1 + Properties: + StageName: Prod + + MyApiTwo: + Type: AWS::Serverless::Api + DeletionPolicy: Retain + Condition: C2 + Properties: + StageName: Prod + + MyFunctionOne: + Type: AWS::Serverless::Function + Properties: + Handler: index.handler + Runtime: nodejs12.x + InlineCode: | + exports.handler = async (event) => { + return { + statusCode: 200, + body: JSON.stringify(event), + headers: {} + } + } + Events: + ApiKey: + Type: Api + Properties: + RestApiId: + Ref: MyApiOne + Method: get + Path: /path/one + + MyFunctionTwo: + Type: AWS::Serverless::Function + Properties: + Handler: index.handler + Runtime: nodejs12.x + InlineCode: | + exports.handler = async (event) => { + return { + statusCode: 200, + body: JSON.stringify(event), + headers: {} + } + } + Events: + ApiKey: + Type: Api + Properties: + RestApiId: + Ref: MyApiTwo + Method: get + Path: /path/two \ No newline at end of file diff --git a/tests/functional/commands/validate/lib/models/function_with_deployment_preference_alarms_intrinsic_if.yaml b/tests/functional/commands/validate/lib/models/function_with_deployment_preference_alarms_intrinsic_if.yaml new file mode 100644 index 0000000000..f392f10628 --- /dev/null +++ b/tests/functional/commands/validate/lib/models/function_with_deployment_preference_alarms_intrinsic_if.yaml @@ -0,0 +1,23 @@ +Conditions: + MyCondition: + Fn::Equals: + - true + - false +Resources: + MinimalFunction: + Type: "AWS::Serverless::Function" + Properties: + CodeUri: s3://sam-demo-bucket/hello.zip + Handler: hello.handler + Runtime: python2.7 + AutoPublishAlias: live + DeploymentPreference: + Type: Linear10PercentEvery3Minutes + Alarms: + Fn::If: + - MyCondition + - - Alarm1 + - Alarm2 + - Alarm3 + - - Alarm1 + - Alarm5 diff --git a/tests/functional/commands/validate/lib/models/implicit_api_deletion_policy_precedence.yaml b/tests/functional/commands/validate/lib/models/implicit_api_deletion_policy_precedence.yaml new file mode 100644 index 0000000000..643b9ac477 --- /dev/null +++ b/tests/functional/commands/validate/lib/models/implicit_api_deletion_policy_precedence.yaml @@ -0,0 +1,32 @@ +Resources: + RestApiFunction: + Type: AWS::Serverless::Function + DeletionPolicy: Delete + UpdateReplacePolicy: Retain + Properties: + CodeUri: s3://sam-demo-bucket/todo_list.zip + Handler: index.restapi + Runtime: nodejs12.x + Policies: AmazonDynamoDBFullAccess + Events: + GetHtml: + Type: Api + Properties: + Path: /{proxy+} + Method: any + + GetHtmlFunction: + Type: AWS::Serverless::Function + DeletionPolicy: Retain + UpdateReplacePolicy: Retain + Properties: + CodeUri: s3://sam-demo-bucket/todo_list.zip + Handler: index.gethtml + Runtime: nodejs12.x + Policies: AmazonDynamoDBReadOnlyAccess + Events: + GetHtml: + Type: Api + Properties: + Path: /{proxy++} + Method: any diff --git a/tests/functional/commands/validate/lib/models/layer_deletion_policy_precedence.yaml b/tests/functional/commands/validate/lib/models/layer_deletion_policy_precedence.yaml new file mode 100644 index 0000000000..a967ed6212 --- /dev/null +++ b/tests/functional/commands/validate/lib/models/layer_deletion_policy_precedence.yaml @@ -0,0 +1,18 @@ +Resources: + MinimalLayer: + Type: 'AWS::Serverless::LayerVersion' + DeletionPolicy: Delete + Properties: + ContentUri: s3://sam-demo-bucket/layer.zip + RetentionPolicy: Retain + + MinimalLayer2: + Type: 'AWS::Serverless::LayerVersion' + DeletionPolicy: Delete + Properties: + ContentUri: s3://sam-demo-bucket/layer.zip + + MinimalLayer3: + Type: 'AWS::Serverless::LayerVersion' + Properties: + ContentUri: s3://sam-demo-bucket/layer.zip \ No newline at end of file diff --git a/tests/functional/commands/validate/lib/models/state_machine_with_xray_policies.yaml b/tests/functional/commands/validate/lib/models/state_machine_with_xray_policies.yaml new file mode 100644 index 0000000000..719d5874ab --- /dev/null +++ b/tests/functional/commands/validate/lib/models/state_machine_with_xray_policies.yaml @@ -0,0 +1,22 @@ +Resources: + MyFunction: + Type: "AWS::Serverless::Function" + Properties: + CodeUri: s3://sam-demo-bucket/hello.zip + Handler: hello.handler + Runtime: python2.7 + + StateMachine: + Type: AWS::Serverless::StateMachine + Properties: + Name: MyBasicStateMachine + Type: STANDARD + DefinitionUri: s3://sam-demo-bucket/my-state-machine.asl.json + Tracing: + Enabled: true + Policies: + - Version: "2012-10-17" + Statement: + - Effect: Allow + Action: lambda:InvokeFunction + Resource: !GetAtt MyFunction.Arn diff --git a/tests/functional/commands/validate/lib/models/state_machine_with_xray_role.yaml b/tests/functional/commands/validate/lib/models/state_machine_with_xray_role.yaml new file mode 100644 index 0000000000..f5e56e7294 --- /dev/null +++ b/tests/functional/commands/validate/lib/models/state_machine_with_xray_role.yaml @@ -0,0 +1,10 @@ +Resources: + StateMachine: + Type: AWS::Serverless::StateMachine + Properties: + Name: MyStateMachineWithXRayTracing + Type: STANDARD + DefinitionUri: s3://sam-demo-bucket/my-state-machine.asl.json + Role: arn:aws:iam::123456123456:role/service-role/SampleRole + Tracing: + Enabled: true diff --git a/tests/functional/commands/validate/lib/models/version_deletion_policy_precedence.yaml b/tests/functional/commands/validate/lib/models/version_deletion_policy_precedence.yaml new file mode 100644 index 0000000000..bf868f9a6e --- /dev/null +++ b/tests/functional/commands/validate/lib/models/version_deletion_policy_precedence.yaml @@ -0,0 +1,19 @@ +Resources: + MinimalFunction: + Type: 'AWS::Serverless::Function' + Properties: + CodeUri: s3://sam-demo-bucket/hello.zip + Handler: hello.handler + Runtime: python2.7 + AutoPublishAlias: live + VersionDescription: sam-testing + + MinimalFunction2: + Type: 'AWS::Serverless::Function' + DeletionPolicy: Delete + Properties: + CodeUri: s3://sam-demo-bucket/hello.zip + Handler: hello.handler + Runtime: python2.7 + AutoPublishAlias: live + VersionDescription: sam-testing \ No newline at end of file diff --git a/tests/integration/buildcmd/test_build_cmd.py b/tests/integration/buildcmd/test_build_cmd.py index 427123f681..fd03c1aeb7 100644 --- a/tests/integration/buildcmd/test_build_cmd.py +++ b/tests/integration/buildcmd/test_build_cmd.py @@ -89,22 +89,25 @@ class TestBuildCommand_PythonFunctions(BuildIntegBase): @parameterized.expand( [ - ("python2.7", False), - ("python3.6", False), - ("python3.7", False), - ("python3.8", False), - ("python2.7", "use_container"), - ("python3.6", "use_container"), - ("python3.7", "use_container"), - ("python3.8", "use_container"), + ("python2.7", "Python", False), + ("python3.6", "Python", False), + ("python3.7", "Python", False), + ("python3.8", "Python", False), + # numpy 1.20.3 (in PythonPEP600/requirements.txt) only support python 3.7+ + ("python3.7", "PythonPEP600", False), + ("python3.8", "PythonPEP600", False), + ("python2.7", "Python", "use_container"), + ("python3.6", "Python", "use_container"), + ("python3.7", "Python", "use_container"), + ("python3.8", "Python", "use_container"), ] ) @pytest.mark.flaky(reruns=3) - def test_with_default_requirements(self, runtime, use_container): + def test_with_default_requirements(self, runtime, codeuri, use_container): if use_container and SKIP_DOCKER_TESTS: self.skipTest(SKIP_DOCKER_MESSAGE) - overrides = {"Runtime": runtime, "CodeUri": "Python", "Handler": "main.handler"} + overrides = {"Runtime": runtime, "CodeUri": codeuri, "Handler": "main.handler"} cmdlist = self.get_command_list(use_container=use_container, parameter_overrides=overrides) LOG.info("Running Command: {}".format(cmdlist)) @@ -334,7 +337,11 @@ def _prepare_application_environment(self): class TestBuildCommand_Java(BuildIntegBase): EXPECTED_FILES_PROJECT_MANIFEST_GRADLE = {"aws", "lib", "META-INF"} EXPECTED_FILES_PROJECT_MANIFEST_MAVEN = {"aws", "lib"} - EXPECTED_DEPENDENCIES = {"annotations-2.1.0.jar", "aws-lambda-java-core-1.1.0.jar"} + EXPECTED_GRADLE_DEPENDENCIES = {"annotations-2.1.0.jar", "aws-lambda-java-core-1.1.0.jar"} + EXPECTED_MAVEN_DEPENDENCIES = { + "software.amazon.awssdk.annotations-2.1.0.jar", + "com.amazonaws.aws-lambda-java-core-1.1.0.jar", + } FUNCTION_LOGICAL_ID = "Function" USING_GRADLE_PATH = os.path.join("Java", "gradle") @@ -344,60 +351,70 @@ class TestBuildCommand_Java(BuildIntegBase): @parameterized.expand( [ - ("java8", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN), - ("java8", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8.al2", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8.al2", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8.al2", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8.al2", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN), - ("java8.al2", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java11", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java11", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java11", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java11", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN), - ("java11", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), + ("java8", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN, EXPECTED_MAVEN_DEPENDENCIES), + ("java8", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8.al2", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8.al2", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ( + "java8.al2", + USING_GRADLE_KOTLIN_PATH, + EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, + EXPECTED_GRADLE_DEPENDENCIES, + ), + ("java8.al2", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN, EXPECTED_MAVEN_DEPENDENCIES), + ("java8.al2", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java11", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java11", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java11", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java11", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN, EXPECTED_MAVEN_DEPENDENCIES), + ("java11", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), ] ) @skipIf(SKIP_DOCKER_TESTS, SKIP_DOCKER_MESSAGE) @pytest.mark.flaky(reruns=3) - def test_building_java_in_container(self, runtime, code_path, expected_files): - self._test_with_building_java(runtime, code_path, expected_files, "use_container") + def test_building_java_in_container(self, runtime, code_path, expected_files, expected_dependencies): + self._test_with_building_java(runtime, code_path, expected_files, expected_dependencies, "use_container") @parameterized.expand( [ - ("java8", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN), - ("java8", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8.al2", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8.al2", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8.al2", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java8.al2", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN), - ("java8.al2", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), + ("java8", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN, EXPECTED_MAVEN_DEPENDENCIES), + ("java8", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8.al2", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java8.al2", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ( + "java8.al2", + USING_GRADLE_KOTLIN_PATH, + EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, + EXPECTED_GRADLE_DEPENDENCIES, + ), + ("java8.al2", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN, EXPECTED_MAVEN_DEPENDENCIES), + ("java8.al2", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), ] ) @pytest.mark.flaky(reruns=3) - def test_building_java8_in_process(self, runtime, code_path, expected_files): - self._test_with_building_java(runtime, code_path, expected_files, False) + def test_building_java8_in_process(self, runtime, code_path, expected_files, expected_dependencies): + self._test_with_building_java(runtime, code_path, expected_files, expected_dependencies, False) @parameterized.expand( [ - ("java11", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java11", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java11", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), - ("java11", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN), - ("java11", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE), + ("java11", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java11", USING_GRADLEW_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java11", USING_GRADLE_KOTLIN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), + ("java11", USING_MAVEN_PATH, EXPECTED_FILES_PROJECT_MANIFEST_MAVEN, EXPECTED_MAVEN_DEPENDENCIES), + ("java11", USING_GRADLE_PATH, EXPECTED_FILES_PROJECT_MANIFEST_GRADLE, EXPECTED_GRADLE_DEPENDENCIES), ] ) @pytest.mark.flaky(reruns=3) - def test_building_java11_in_process(self, runtime, code_path, expected_files): - self._test_with_building_java(runtime, code_path, expected_files, False) + def test_building_java11_in_process(self, runtime, code_path, expected_files, expected_dependencies): + self._test_with_building_java(runtime, code_path, expected_files, expected_dependencies, False) - def _test_with_building_java(self, runtime, code_path, expected_files, use_container): + def _test_with_building_java(self, runtime, code_path, expected_files, expected_dependencies, use_container): if use_container and SKIP_DOCKER_TESTS: self.skipTest(SKIP_DOCKER_MESSAGE) @@ -411,7 +428,7 @@ def _test_with_building_java(self, runtime, code_path, expected_files, use_conta run_command(cmdlist, cwd=self.working_dir) self._verify_built_artifact( - self.default_build_dir, self.FUNCTION_LOGICAL_ID, expected_files, self.EXPECTED_DEPENDENCIES + self.default_build_dir, self.FUNCTION_LOGICAL_ID, expected_files, expected_dependencies ) self._verify_resource_property( diff --git a/tests/integration/local/invoke/test_integrations_cli.py b/tests/integration/local/invoke/test_integrations_cli.py index a9bbf4d824..a3d6206c40 100644 --- a/tests/integration/local/invoke/test_integrations_cli.py +++ b/tests/integration/local/invoke/test_integrations_cli.py @@ -13,7 +13,7 @@ from tests.integration.local.invoke.layer_utils import LayerUtils from .invoke_integ_base import InvokeIntegBase -from tests.testing_utils import IS_WINDOWS, RUNNING_ON_CI, RUNNING_TEST_FOR_MASTER_ON_CI, RUN_BY_CANARY +from tests.testing_utils import IS_WINDOWS, RUNNING_ON_CI, RUNNING_TEST_FOR_MASTER_ON_CI, RUN_BY_CANARY, run_command # Layers tests require credentials and Appveyor will only add credentials to the env if the PR is from the same repo. # This is to restrict layers tests to run outside of Appveyor, when the branch is not master and tests are not run by Canary. @@ -884,6 +884,24 @@ def test_caching_two_layers_with_layer_cache_env_set(self): self.assertEqual(2, len(os.listdir(str(self.layer_cache)))) +@skipIf(SKIP_LAYERS_TESTS, "Skip layers tests in Appveyor only") +class TestLocalZipLayerVersion(InvokeIntegBase): + template = Path("layers", "local-zip-layer-template.yml") + + def test_local_zip_layers( + self, + ): + command_list = self.get_command_list( + "OneLayerVersionServerlessFunction", + template_path=self.template_path, + no_event=True, + ) + + execute = run_command(command_list) + self.assertEqual(0, execute.process.returncode) + self.assertEqual('"Layer1"', execute.stdout.decode()) + + @skipIf(SKIP_LAYERS_TESTS, "Skip layers tests in Appveyor only") class TestLayerVersionThatDoNotCreateCache(InvokeIntegBase): template = Path("layers", "layer-template.yml") diff --git a/tests/integration/testdata/buildcmd/PyLayer/requirements.txt b/tests/integration/testdata/buildcmd/PyLayer/requirements.txt index bf8549f936..ce4af48039 100644 --- a/tests/integration/testdata/buildcmd/PyLayer/requirements.txt +++ b/tests/integration/testdata/buildcmd/PyLayer/requirements.txt @@ -1,6 +1,7 @@ # These are some hard packages to build. Using them here helps us verify that building works on various platforms -numpy~=1.15 +# NOTE: Fixing to <1.20.3 as numpy1.20.3 started to use a new wheel naming convention (PEP 600) +numpy<1.20.3 # `cryptography` has a dependency on `pycparser` which, for some reason doesn't build inside a Docker container. # Turning this off until we resolve this issue: https://github.com/awslabs/aws-lambda-builders/issues/29 # cryptography~=2.4 diff --git a/tests/integration/testdata/buildcmd/PyLayerMake/requirements.txt b/tests/integration/testdata/buildcmd/PyLayerMake/requirements.txt index bf8549f936..ce4af48039 100644 --- a/tests/integration/testdata/buildcmd/PyLayerMake/requirements.txt +++ b/tests/integration/testdata/buildcmd/PyLayerMake/requirements.txt @@ -1,6 +1,7 @@ # These are some hard packages to build. Using them here helps us verify that building works on various platforms -numpy~=1.15 +# NOTE: Fixing to <1.20.3 as numpy1.20.3 started to use a new wheel naming convention (PEP 600) +numpy<1.20.3 # `cryptography` has a dependency on `pycparser` which, for some reason doesn't build inside a Docker container. # Turning this off until we resolve this issue: https://github.com/awslabs/aws-lambda-builders/issues/29 # cryptography~=2.4 diff --git a/tests/integration/testdata/buildcmd/Python/requirements.txt b/tests/integration/testdata/buildcmd/Python/requirements.txt index bf8549f936..ce4af48039 100644 --- a/tests/integration/testdata/buildcmd/Python/requirements.txt +++ b/tests/integration/testdata/buildcmd/Python/requirements.txt @@ -1,6 +1,7 @@ # These are some hard packages to build. Using them here helps us verify that building works on various platforms -numpy~=1.15 +# NOTE: Fixing to <1.20.3 as numpy1.20.3 started to use a new wheel naming convention (PEP 600) +numpy<1.20.3 # `cryptography` has a dependency on `pycparser` which, for some reason doesn't build inside a Docker container. # Turning this off until we resolve this issue: https://github.com/awslabs/aws-lambda-builders/issues/29 # cryptography~=2.4 diff --git a/tests/integration/testdata/buildcmd/PythonImage/requirements.txt b/tests/integration/testdata/buildcmd/PythonImage/requirements.txt index bf8549f936..ce4af48039 100644 --- a/tests/integration/testdata/buildcmd/PythonImage/requirements.txt +++ b/tests/integration/testdata/buildcmd/PythonImage/requirements.txt @@ -1,6 +1,7 @@ # These are some hard packages to build. Using them here helps us verify that building works on various platforms -numpy~=1.15 +# NOTE: Fixing to <1.20.3 as numpy1.20.3 started to use a new wheel naming convention (PEP 600) +numpy<1.20.3 # `cryptography` has a dependency on `pycparser` which, for some reason doesn't build inside a Docker container. # Turning this off until we resolve this issue: https://github.com/awslabs/aws-lambda-builders/issues/29 # cryptography~=2.4 diff --git a/tests/integration/testdata/buildcmd/PythonPEP600/__init__.py b/tests/integration/testdata/buildcmd/PythonPEP600/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/testdata/buildcmd/PythonPEP600/main.py b/tests/integration/testdata/buildcmd/PythonPEP600/main.py new file mode 100644 index 0000000000..b636d9d592 --- /dev/null +++ b/tests/integration/testdata/buildcmd/PythonPEP600/main.py @@ -0,0 +1,19 @@ +import numpy + + +# from cryptography.fernet import Fernet + + +def handler(event, context): + # Try using some of the modules to make sure they work & don't crash the process + # print(Fernet.generate_key()) + + return {"pi": "{0:.2f}".format(numpy.pi)} + + +def first_function_handler(event, context): + return "Hello World" + + +def second_function_handler(event, context): + return "Hello Mars" diff --git a/tests/integration/testdata/buildcmd/PythonPEP600/requirements.txt b/tests/integration/testdata/buildcmd/PythonPEP600/requirements.txt new file mode 100644 index 0000000000..a58b87977c --- /dev/null +++ b/tests/integration/testdata/buildcmd/PythonPEP600/requirements.txt @@ -0,0 +1,6 @@ +# These are some hard packages to build. Using them here helps us verify that building works on various platforms + +# these dependency versions use PEP600 +numpy==1.20.3 +greenlet==1.1.0 +sqlalchemy==1.4.15 diff --git a/tests/integration/testdata/invoke/layers/local-zip-layer-template.yml b/tests/integration/testdata/invoke/layers/local-zip-layer-template.yml new file mode 100644 index 0000000000..466ff9791a --- /dev/null +++ b/tests/integration/testdata/invoke/layers/local-zip-layer-template.yml @@ -0,0 +1,19 @@ +AWSTemplateFormatVersion : '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Description: A hello world application. + +Resources: + LayerOne: + Type: AWS::Lambda::LayerVersion + Properties: + Content: ../layer_zips/layer1.zip + + OneLayerVersionServerlessFunction: + Type: AWS::Serverless::Function + Properties: + Handler: layer-main.one_layer_hanlder + Runtime: python3.6 + CodeUri: . + Timeout: 20 + Layers: + - !Ref LayerOne diff --git a/tests/integration/testdata/validate/default_json/template.json b/tests/integration/testdata/validate/default_json/template.json new file mode 100644 index 0000000000..f9da3fc2d3 --- /dev/null +++ b/tests/integration/testdata/validate/default_json/template.json @@ -0,0 +1,15 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Transform": "AWS::Serverless-2016-10-31", + + "Resources": { + "HelloWorldFunction": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "hello-world/", + "Handler": "app.lambdaHandler", + "Runtime": "nodejs14.x" + } + } + } +} diff --git a/tests/integration/testdata/validate/default_yaml/template.yaml b/tests/integration/testdata/validate/default_yaml/template.yaml new file mode 100644 index 0000000000..acb6a8cf26 --- /dev/null +++ b/tests/integration/testdata/validate/default_yaml/template.yaml @@ -0,0 +1,10 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 + +Resources: + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: HelloWorldFunction + Handler: app.lambdaHandler + Runtime: nodejs14.x diff --git a/tests/integration/testdata/validate/multiple_files/template.json b/tests/integration/testdata/validate/multiple_files/template.json new file mode 100644 index 0000000000..f9da3fc2d3 --- /dev/null +++ b/tests/integration/testdata/validate/multiple_files/template.json @@ -0,0 +1,15 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Transform": "AWS::Serverless-2016-10-31", + + "Resources": { + "HelloWorldFunction": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "hello-world/", + "Handler": "app.lambdaHandler", + "Runtime": "nodejs14.x" + } + } + } +} diff --git a/tests/integration/testdata/validate/multiple_files/template.yaml b/tests/integration/testdata/validate/multiple_files/template.yaml new file mode 100644 index 0000000000..acb6a8cf26 --- /dev/null +++ b/tests/integration/testdata/validate/multiple_files/template.yaml @@ -0,0 +1,10 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 + +Resources: + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: HelloWorldFunction + Handler: app.lambdaHandler + Runtime: nodejs14.x diff --git a/tests/integration/testdata/validate/with_build/.aws-sam/build/template.yaml b/tests/integration/testdata/validate/with_build/.aws-sam/build/template.yaml new file mode 100644 index 0000000000..acb6a8cf26 --- /dev/null +++ b/tests/integration/testdata/validate/with_build/.aws-sam/build/template.yaml @@ -0,0 +1,10 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 + +Resources: + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: HelloWorldFunction + Handler: app.lambdaHandler + Runtime: nodejs14.x diff --git a/tests/integration/testdata/validate/with_build/template.json b/tests/integration/testdata/validate/with_build/template.json new file mode 100644 index 0000000000..f9da3fc2d3 --- /dev/null +++ b/tests/integration/testdata/validate/with_build/template.json @@ -0,0 +1,15 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Transform": "AWS::Serverless-2016-10-31", + + "Resources": { + "HelloWorldFunction": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "hello-world/", + "Handler": "app.lambdaHandler", + "Runtime": "nodejs14.x" + } + } + } +} diff --git a/tests/integration/validate/__init__.py b/tests/integration/validate/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/validate/test_validate_command.py b/tests/integration/validate/test_validate_command.py new file mode 100644 index 0000000000..c5b62bdaa1 --- /dev/null +++ b/tests/integration/validate/test_validate_command.py @@ -0,0 +1,75 @@ +""" +Integration tests for sam validate +""" + +import os +import re +from enum import Enum, auto +from pathlib import Path +from typing import List, Optional +from unittest import TestCase +from unittest.case import skipIf + +from parameterized import parameterized +from tests.testing_utils import RUN_BY_CANARY, RUNNING_ON_CI, RUNNING_TEST_FOR_MASTER_ON_CI, run_command + +# Validate tests require credentials and CI/CD will only add credentials to the env if the PR is from the same repo. +# This is to restrict package tests to run outside of CI/CD, when the branch is not master or tests are not run by Canary +SKIP_VALIDATE_TESTS = RUNNING_ON_CI and RUNNING_TEST_FOR_MASTER_ON_CI and not RUN_BY_CANARY + + +class TemplateFileTypes(Enum): + JSON = auto() + YAML = auto() + + +@skipIf(SKIP_VALIDATE_TESTS, "Skip validate tests in CI/CD only") +class TestValidate(TestCase): + @classmethod + def setUpClass(cls): + cls.patterns = { + TemplateFileTypes.JSON: re.compile(r"^/.+/template[.]json is a valid SAM Template$"), + TemplateFileTypes.YAML: re.compile(r"^/.+/template[.]yaml is a valid SAM Template$"), + } + + @staticmethod + def base_command() -> str: + return "samdev" if os.getenv("SAM_CLI_DEV") else "sam" + + def command_list( + self, + template_file: Optional[Path] = None, + profile: Optional[str] = None, + region: Optional[str] = None, + config_file: Optional[Path] = None, + ) -> List[str]: + command_list = [self.base_command(), "validate"] + if template_file: + command_list += ["--template-file", str(template_file)] + if profile: + command_list += ["--profile", profile] + if region: + command_list += ["--region", region] + if config_file: + command_list += ["--config_file", str(config_file)] + return command_list + + @parameterized.expand( + [ + ("default_yaml", TemplateFileTypes.YAML), # project with template.yaml + ("default_json", TemplateFileTypes.JSON), # project with template.json + ("multiple_files", TemplateFileTypes.YAML), # project with both template.yaml and template.json + ( + "with_build", + TemplateFileTypes.JSON, + ), # project with template.json and standard build directory .aws-sam/build/template.yaml + ] + ) + def test_default_template_file_choice(self, relative_folder: str, expected_file: TemplateFileTypes): + test_data_path = Path(__file__).resolve().parents[2] / "integration" / "testdata" / "validate" + process_dir = test_data_path / relative_folder + command_result = run_command(self.command_list(), cwd=str(process_dir)) + pattern = self.patterns[expected_file] # type: ignore + output = command_result.stdout.decode("utf-8") + self.assertEqual(command_result.process.returncode, 0) + self.assertRegex(output, pattern) diff --git a/tests/unit/commands/_utils/test_options.py b/tests/unit/commands/_utils/test_options.py index bbc6701049..ea82e5cdbf 100644 --- a/tests/unit/commands/_utils/test_options.py +++ b/tests/unit/commands/_utils/test_options.py @@ -53,7 +53,18 @@ def test_must_return_yml_extension(self, os_mock): def test_must_return_yaml_extension(self, os_mock): expected = "template.yaml" - os_mock.path.exists.return_value = True + os_mock.path.exists.side_effect = lambda file_name: file_name == expected + os_mock.path.abspath.return_value = "absPath" + + result = get_or_default_template_file_name(None, None, _TEMPLATE_OPTION_DEFAULT_VALUE, include_build=False) + self.assertEqual(result, "absPath") + os_mock.path.abspath.assert_called_with(expected) + + @patch("samcli.commands._utils.options.os") + def test_must_return_json_extension(self, os_mock): + expected = "template.json" + + os_mock.path.exists.side_effect = lambda file_name: file_name == expected os_mock.path.abspath.return_value = "absPath" result = get_or_default_template_file_name(None, None, _TEMPLATE_OPTION_DEFAULT_VALUE, include_build=False) diff --git a/tests/unit/commands/init/test_cli.py b/tests/unit/commands/init/test_cli.py index d80b3022a7..5d61386acc 100644 --- a/tests/unit/commands/init/test_cli.py +++ b/tests/unit/commands/init/test_cli.py @@ -1,3 +1,5 @@ +import os +from pathlib import Path from unittest import TestCase from unittest.mock import patch, ANY @@ -5,22 +7,23 @@ import click from click.testing import CliRunner -from samcli.commands.init.init_templates import InitTemplates +from samcli.commands.exceptions import UserException from samcli.commands.init import cli as init_cmd from samcli.commands.init import do_cli as init_cli +from samcli.commands.init.init_templates import InitTemplates, APP_TEMPLATES_REPO_URL from samcli.lib.init import GenerateProjectFailedError -from samcli.commands.exceptions import UserException +from samcli.lib.utils.git_repo import GitRepo from samcli.lib.utils.packagetype import IMAGE, ZIP class MockInitTemplates: - def __init__(self, no_interactive=False, auto_clone=True): - self._repo_url = "https://github.com/awslabs/aws-sam-cli-app-templates.git" - self._repo_name = "aws-sam-cli-app-templates" - self.repo_path = "repository" - self.clone_attempted = True + def __init__(self, no_interactive=False): self._no_interactive = no_interactive - self._auto_clone = auto_clone + self._git_repo: GitRepo = GitRepo( + url=APP_TEMPLATES_REPO_URL, + ) + self._git_repo.clone_attempted = True + self._git_repo.local_path = Path("repository") class TestCli(TestCase): @@ -40,9 +43,9 @@ def setUp(self): self.extra_context = '{"project_name": "testing project", "runtime": "python3.6"}' self.extra_context_as_json = {"project_name": "testing project", "runtime": "python3.6"} - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") + @patch("samcli.lib.utils.git_repo.GitRepo.clone") @patch("samcli.commands.init.init_generator.generate_project") - def test_init_cli(self, generate_project_patch, sd_mock): + def test_init_cli(self, generate_project_patch, git_repo_clone_mock): # GIVEN generate_project successfully created a project # WHEN a project name has been passed init_cli( @@ -59,7 +62,6 @@ def test_init_cli(self, generate_project_patch, sd_mock): app_template=self.app_template, no_input=self.no_input, extra_context=None, - auto_clone=False, ) # THEN we should receive no errors @@ -75,9 +77,9 @@ def test_init_cli(self, generate_project_patch, sd_mock): self.extra_context_as_json, ) - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") + @patch("samcli.lib.utils.git_repo.GitRepo.clone") @patch("samcli.commands.init.init_generator.generate_project") - def test_init_image_cli(self, generate_project_patch, sd_mock): + def test_init_image_cli(self, generate_project_patch, git_repo_clone_mock): # GIVEN generate_project successfully created a project # WHEN a project name has been passed init_cli( @@ -94,7 +96,6 @@ def test_init_image_cli(self, generate_project_patch, sd_mock): app_template=None, no_input=self.no_input, extra_context=None, - auto_clone=False, ) # THEN we should receive no errors @@ -110,9 +111,9 @@ def test_init_image_cli(self, generate_project_patch, sd_mock): {"runtime": "nodejs12.x", "project_name": "testing project"}, ) - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") + @patch("samcli.lib.utils.git_repo.GitRepo.clone") @patch("samcli.commands.init.init_generator.generate_project") - def test_init_image_java_cli(self, generate_project_patch, sd_mock): + def test_init_image_java_cli(self, generate_project_patch, git_repo_clone_mock): # GIVEN generate_project successfully created a project # WHEN a project name has been passed init_cli( @@ -129,7 +130,6 @@ def test_init_image_java_cli(self, generate_project_patch, sd_mock): app_template=None, no_input=self.no_input, extra_context=None, - auto_clone=False, ) # THEN we should receive no errors @@ -145,8 +145,8 @@ def test_init_image_java_cli(self, generate_project_patch, sd_mock): {"runtime": "java11", "project_name": "testing project"}, ) - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") - def test_init_fails_invalid_template(self, sd_mock): + @patch("samcli.lib.utils.git_repo.GitRepo.clone") + def test_init_fails_invalid_template(self, git_repo_clone_mock): # WHEN an unknown app template is passed in # THEN an exception should be raised with self.assertRaises(UserException): @@ -164,11 +164,10 @@ def test_init_fails_invalid_template(self, sd_mock): app_template="wrong-and-bad", no_input=self.no_input, extra_context=None, - auto_clone=False, ) - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") - def test_init_fails_invalid_dep_mgr(self, sd_mock): + @patch("samcli.lib.utils.git_repo.GitRepo.clone") + def test_init_fails_invalid_dep_mgr(self, git_repo_clone_mock): # WHEN an unknown app template is passed in # THEN an exception should be raised with self.assertRaises(UserException): @@ -186,12 +185,11 @@ def test_init_fails_invalid_dep_mgr(self, sd_mock): app_template=self.app_template, no_input=self.no_input, extra_context=None, - auto_clone=False, ) - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") + @patch("samcli.lib.utils.git_repo.GitRepo.clone") @patch("samcli.commands.init.init_generator.generate_project") - def test_init_cli_generate_project_fails(self, generate_project_patch, sd_mock): + def test_init_cli_generate_project_fails(self, generate_project_patch, git_repo_clone_mock): # GIVEN generate_project fails to create a project generate_project_patch.side_effect = GenerateProjectFailedError( project=self.name, provider_error="Something wrong happened" @@ -214,16 +212,15 @@ def test_init_cli_generate_project_fails(self, generate_project_patch, sd_mock): app_template=None, no_input=self.no_input, extra_context=None, - auto_clone=False, ) generate_project_patch.assert_called_with( self.location, self.runtime, self.dependency_manager, self.output_dir, self.name, self.no_input ) - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") + @patch("samcli.lib.utils.git_repo.GitRepo.clone") @patch("samcli.commands.init.init_generator.generate_project") - def test_init_cli_generate_project_image_fails(self, generate_project_patch, sd_mock): + def test_init_cli_generate_project_image_fails(self, generate_project_patch, git_repo_clone_mock): # GIVEN generate_project fails to create a project generate_project_patch.side_effect = GenerateProjectFailedError( project=self.name, provider_error="Something wrong happened" @@ -246,7 +243,6 @@ def test_init_cli_generate_project_image_fails(self, generate_project_patch, sd_ app_template=None, no_input=self.no_input, extra_context=None, - auto_clone=False, ) generate_project_patch.assert_called_with( @@ -271,7 +267,6 @@ def test_init_cli_with_extra_context_parameter_not_passed(self, generate_project app_template=self.app_template, no_input=self.no_input, extra_context=None, - auto_clone=False, ) # THEN we should receive no errors @@ -297,7 +292,6 @@ def test_init_cli_with_extra_context_parameter_passed(self, generate_project_pat app_template=self.app_template, no_input=self.no_input, extra_context='{"schema_name":"events", "schema_type":"aws"}', - auto_clone=False, ) # THEN we should receive no errors and right extra_context should be passed @@ -330,7 +324,6 @@ def test_init_cli_with_extra_context_not_overriding_default_parameter(self, gene app_template=self.app_template, no_input=self.no_input, extra_context='{"project_name": "my_project", "runtime": "java8", "schema_name":"events", "schema_type": "aws"}', - auto_clone=False, ) # THEN extra_context should have not overridden default_parameters(name, runtime) @@ -363,7 +356,6 @@ def test_init_cli_with_extra_context_input_as_wrong_json_raises_exception(self): app_template=self.app_template, no_input=self.no_input, extra_context='{"project_name", "my_project", "runtime": "java8", "schema_name":"events", "schema_type": "aws"}', - auto_clone=False, ) @patch("samcli.commands.init.init_generator.generate_project") @@ -384,7 +376,6 @@ def test_init_cli_must_set_default_context_when_location_is_provided(self, gener app_template=None, no_input=None, extra_context='{"schema_name":"events", "schema_type": "aws"}', - auto_clone=False, ) # THEN should set default parameter(name, runtime) as extra_context @@ -417,7 +408,6 @@ def test_init_cli_must_only_set_passed_project_name_when_location_is_provided(se app_template=None, no_input=None, extra_context='{"schema_name":"events", "schema_type": "aws"}', - auto_clone=False, ) # THEN extra_context should be without runtime @@ -450,7 +440,6 @@ def test_init_cli_must_only_set_passed_runtime_when_location_is_provided(self, g app_template=None, no_input=None, extra_context='{"schema_name":"events", "schema_type": "aws"}', - auto_clone=False, ) # THEN extra_context should be without name @@ -485,7 +474,6 @@ def test_init_cli_with_extra_context_parameter_passed_as_escaped(self, generate_ # fmt: off extra_context='{\"schema_name\":\"events\", \"schema_type\":\"aws\"}', # fmt: on - auto_clone=False, ) # THEN we should receive no errors and right extra_context should be passed @@ -1035,7 +1023,6 @@ def test_init_passes_dynamic_event_bridge_template(self, generate_project_patch, app_template="eventBridge-schema-app", no_input=self.no_input, extra_context=None, - auto_clone=False, ) generate_project_patch.assert_called_once_with( @@ -1050,9 +1037,9 @@ def test_init_passes_dynamic_event_bridge_template(self, generate_project_patch, self.extra_context_as_json, ) - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") + @patch("samcli.lib.utils.git_repo.GitRepo._ensure_clone_directory_exists") @patch("samcli.commands.init.init_generator.generate_project") - def test_init_cli_int_from_location(self, generate_project_patch, sd_mock): + def test_init_cli_int_from_location(self, generate_project_patch, cd_mock): # WHEN the user follows interactive init prompts # 2: selecting custom location @@ -1079,9 +1066,9 @@ def test_init_cli_int_from_location(self, generate_project_patch, sd_mock): None, ) - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") + @patch("samcli.lib.utils.git_repo.GitRepo._ensure_clone_directory_exists") @patch("samcli.commands.init.init_generator.generate_project") - def test_init_cli_no_package_type(self, generate_project_patch, sd_mock): + def test_init_cli_no_package_type(self, generate_project_patch, cd_mock): # WHEN the user follows interactive init prompts # 1: selecting template source @@ -1115,3 +1102,251 @@ def test_init_cli_no_package_type(self, generate_project_patch, sd_mock): True, ANY, ) + + @patch.object(InitTemplates, "__init__", MockInitTemplates.__init__) + @patch("samcli.commands.init.init_templates.InitTemplates._init_options_from_manifest") + def test_init_cli_image_pool_with_base_image_having_multiple_managed_template_but_no_app_template_provided( + self, + init_options_from_manifest_mock, + ): + init_options_from_manifest_mock.return_value = [ + { + "directory": "python3.8-image/cookiecutter-aws-sam-hello-python-lambda-image", + "displayName": "Hello World Lambda Image Example", + "dependencyManager": "pip", + "appTemplate": "hello-world-lambda-image", + "packageType": "Image", + }, + { + "directory": "python3.8-image/cookiecutter-ml-apigw-pytorch", + "displayName": "PyTorch Machine Learning Inference API", + "dependencyManager": "pip", + "appTemplate": "ml-apigw-pytorch", + "packageType": "Image", + }, + ] + with self.assertRaises(UserException): + init_cli( + ctx=self.ctx, + no_interactive=self.no_interactive, + pt_explicit=self.pt_explicit, + package_type="Image", + base_image="amazon/python3.8-base", + dependency_manager="pip", + app_template=None, + name=self.name, + output_dir=self.output_dir, + location=None, + runtime=None, + no_input=self.no_input, + extra_context=self.extra_context, + ) + + @patch.object(InitTemplates, "__init__", MockInitTemplates.__init__) + @patch("samcli.commands.init.init_templates.InitTemplates._init_options_from_manifest") + def test_init_cli_image_pool_with_base_image_having_multiple_managed_template_and_provided_app_template_not_matching_any_managed_templates( + self, + init_options_from_manifest_mock, + ): + init_options_from_manifest_mock.return_value = [ + { + "directory": "python3.8-image/cookiecutter-aws-sam-hello-python-lambda-image", + "displayName": "Hello World Lambda Image Example", + "dependencyManager": "pip", + "appTemplate": "hello-world-lambda-image", + "packageType": "Image", + }, + { + "directory": "python3.8-image/cookiecutter-ml-apigw-pytorch", + "displayName": "PyTorch Machine Learning Inference API", + "dependencyManager": "pip", + "appTemplate": "ml-apigw-pytorch", + "packageType": "Image", + }, + ] + with self.assertRaises(UserException): + init_cli( + ctx=self.ctx, + no_interactive=self.no_interactive, + pt_explicit=self.pt_explicit, + package_type="Image", + base_image="amazon/python3.8-base", + dependency_manager="pip", + app_template="Not-ml-apigw-pytorch", # different value than appTemplates shown in the manifest above + name=self.name, + output_dir=self.output_dir, + location=None, + runtime=None, + no_input=self.no_input, + extra_context=self.extra_context, + ) + + @patch.object(InitTemplates, "__init__", MockInitTemplates.__init__) + @patch("samcli.commands.init.init_templates.InitTemplates._init_options_from_manifest") + @patch("samcli.commands.init.init_generator.generate_project") + def test_init_cli_image_pool_with_base_image_having_multiple_managed_template_with_matching_app_template_provided( + self, + generate_project_patch, + init_options_from_manifest_mock, + ): + init_options_from_manifest_mock.return_value = [ + { + "directory": "python3.8-image/cookiecutter-aws-sam-hello-python-lambda-image", + "displayName": "Hello World Lambda Image Example", + "dependencyManager": "pip", + "appTemplate": "hello-world-lambda-image", + "packageType": "Image", + }, + { + "directory": "python3.8-image/cookiecutter-ml-apigw-pytorch", + "displayName": "PyTorch Machine Learning Inference API", + "dependencyManager": "pip", + "appTemplate": "ml-apigw-pytorch", + "packageType": "Image", + }, + ] + init_cli( + ctx=self.ctx, + no_interactive=True, + pt_explicit=True, + package_type="Image", + base_image="amazon/python3.8-base", + dependency_manager="pip", + app_template="ml-apigw-pytorch", # same value as one appTemplate in the manifest above + name=self.name, + output_dir=None, + location=None, + runtime=None, + no_input=None, + extra_context=None, + ) + generate_project_patch.assert_called_once_with( + os.path.normpath("repository/python3.8-image/cookiecutter-ml-apigw-pytorch"), # location + "Image", # package_type + "python3.8", # runtime + "pip", # dependency_manager + self.output_dir, + self.name, + True, # no_input + ANY, + ) + + @patch.object(InitTemplates, "__init__", MockInitTemplates.__init__) + @patch("samcli.commands.init.init_templates.InitTemplates._init_options_from_manifest") + @patch("samcli.commands.init.init_generator.generate_project") + def test_init_cli_image_pool_with_base_image_having_one_managed_template_does_not_need_app_template_argument( + self, + generate_project_patch, + init_options_from_manifest_mock, + ): + init_options_from_manifest_mock.return_value = [ + { + "directory": "python3.8-image/cookiecutter-ml-apigw-pytorch", + "displayName": "PyTorch Machine Learning Inference API", + "dependencyManager": "pip", + "appTemplate": "ml-apigw-pytorch", + "packageType": "Image", + }, + ] + init_cli( + ctx=self.ctx, + no_interactive=True, + pt_explicit=True, + package_type="Image", + base_image="amazon/python3.8-base", + dependency_manager="pip", + app_template=None, + name=self.name, + output_dir=None, + location=None, + runtime=None, + no_input=None, + extra_context=None, + ) + generate_project_patch.assert_called_once_with( + os.path.normpath("repository/python3.8-image/cookiecutter-ml-apigw-pytorch"), # location + "Image", # package_type + "python3.8", # runtime + "pip", # dependency_manager + self.output_dir, + self.name, + True, # no_input + ANY, + ) + + @patch.object(InitTemplates, "__init__", MockInitTemplates.__init__) + @patch("samcli.commands.init.init_templates.InitTemplates._init_options_from_manifest") + @patch("samcli.commands.init.init_generator.generate_project") + def test_init_cli_image_pool_with_base_image_having_one_managed_template_with_provided_app_template_matching_the_managed_template( + self, + generate_project_patch, + init_options_from_manifest_mock, + ): + init_options_from_manifest_mock.return_value = [ + { + "directory": "python3.8-image/cookiecutter-ml-apigw-pytorch", + "displayName": "PyTorch Machine Learning Inference API", + "dependencyManager": "pip", + "appTemplate": "ml-apigw-pytorch", + "packageType": "Image", + }, + ] + init_cli( + ctx=self.ctx, + no_interactive=True, + pt_explicit=True, + package_type="Image", + base_image="amazon/python3.8-base", + dependency_manager="pip", + app_template="ml-apigw-pytorch", # same value as appTemplate indicated in the manifest above + name=self.name, + output_dir=None, + location=None, + runtime=None, + no_input=None, + extra_context=None, + ) + generate_project_patch.assert_called_once_with( + os.path.normpath("repository/python3.8-image/cookiecutter-ml-apigw-pytorch"), # location + "Image", # package_type + "python3.8", # runtime + "pip", # dependency_manager + self.output_dir, + self.name, + True, # no_input + ANY, + ) + + @patch.object(InitTemplates, "__init__", MockInitTemplates.__init__) + @patch("samcli.commands.init.init_templates.InitTemplates._init_options_from_manifest") + @patch("samcli.commands.init.init_generator.generate_project") + def test_init_cli_image_pool_with_base_image_having_one_managed_template_with_provided_app_template_not_matching_the_managed_template( + self, + generate_project_patch, + init_options_from_manifest_mock, + ): + init_options_from_manifest_mock.return_value = [ + { + "directory": "python3.8-image/cookiecutter-ml-apigw-pytorch", + "displayName": "PyTorch Machine Learning Inference API", + "dependencyManager": "pip", + "appTemplate": "ml-apigw-pytorch", + "packageType": "Image", + }, + ] + with (self.assertRaises(UserException)): + init_cli( + ctx=self.ctx, + no_interactive=True, + pt_explicit=True, + package_type="Image", + base_image="amazon/python3.8-base", + dependency_manager="pip", + app_template="NOT-ml-apigw-pytorch", # different value than appTemplate shown in the manifest above + name=self.name, + output_dir=None, + location=None, + runtime=None, + no_input=None, + extra_context=None, + ) diff --git a/tests/unit/commands/init/test_templates.py b/tests/unit/commands/init/test_templates.py index b422b5dbf0..0e11d6aed9 100644 --- a/tests/unit/commands/init/test_templates.py +++ b/tests/unit/commands/init/test_templates.py @@ -1,23 +1,20 @@ import json import subprocess -import click - -from unittest.mock import mock_open, patch, PropertyMock, MagicMock +from pathlib import Path from re import search from unittest import TestCase -from samcli.lib.utils.packagetype import IMAGE, ZIP - -from pathlib import Path +from unittest.mock import mock_open, patch, PropertyMock, MagicMock from samcli.commands.init.init_templates import InitTemplates +from samcli.lib.utils.packagetype import IMAGE, ZIP class TestTemplates(TestCase): @patch("subprocess.check_output") - @patch("samcli.commands.init.init_templates.InitTemplates._git_executable") - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") + @patch("samcli.lib.utils.git_repo.GitRepo._git_executable") + @patch("samcli.lib.utils.git_repo.GitRepo._ensure_clone_directory_exists") @patch("shutil.copytree") - def test_location_from_app_template_zip(self, subprocess_mock, git_exec_mock, sd_mock, copy_mock): + def test_location_from_app_template_zip(self, subprocess_mock, git_exec_mock, cd_mock, copy_mock): it = InitTemplates(True) manifest = { @@ -35,16 +32,16 @@ def test_location_from_app_template_zip(self, subprocess_mock, git_exec_mock, sd m = mock_open(read_data=manifest_json) with patch("samcli.cli.global_config.GlobalConfig.config_dir", new_callable=PropertyMock) as mock_cfg: - mock_cfg.return_value = "/tmp/test-sam" + mock_cfg.return_value = Path("/tmp/test-sam") with patch("samcli.commands.init.init_templates.open", m): location = it.location_from_app_template(ZIP, "ruby2.5", None, "bundler", "hello-world") self.assertTrue(search("mock-ruby-template", location)) @patch("subprocess.check_output") - @patch("samcli.commands.init.init_templates.InitTemplates._git_executable") - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") + @patch("samcli.lib.utils.git_repo.GitRepo._git_executable") + @patch("samcli.lib.utils.git_repo.GitRepo._ensure_clone_directory_exists") @patch("shutil.copytree") - def test_location_from_app_template_image(self, subprocess_mock, git_exec_mock, sd_mock, copy_mock): + def test_location_from_app_template_image(self, subprocess_mock, git_exec_mock, cd_mock, copy_mock): it = InitTemplates(True) manifest = { @@ -62,63 +59,37 @@ def test_location_from_app_template_image(self, subprocess_mock, git_exec_mock, m = mock_open(read_data=manifest_json) with patch("samcli.cli.global_config.GlobalConfig.config_dir", new_callable=PropertyMock) as mock_cfg: - mock_cfg.return_value = "/tmp/test-sam" + mock_cfg.return_value = Path("/tmp/test-sam") with patch("samcli.commands.init.init_templates.open", m): location = it.location_from_app_template( IMAGE, None, "ruby2.5-image", "bundler", "hello-world-lambda-image" ) self.assertTrue(search("mock-ruby-image-template", location)) - @patch("samcli.commands.init.init_templates.InitTemplates._git_executable") + @patch("samcli.lib.utils.git_repo.GitRepo._git_executable") @patch("click.prompt") - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") - def test_fallback_options(self, git_exec_mock, prompt_mock, sd_mock): + @patch("samcli.lib.utils.git_repo.GitRepo._ensure_clone_directory_exists") + def test_fallback_options(self, git_exec_mock, prompt_mock, cd_mock): prompt_mock.return_value = "1" with patch("subprocess.check_output", new_callable=MagicMock) as mock_sub: with patch("samcli.cli.global_config.GlobalConfig.config_dir", new_callable=PropertyMock) as mock_cfg: mock_sub.side_effect = OSError("Fail") - mock_cfg.return_value = "/tmp/test-sam" + mock_cfg.return_value = Path("/tmp/test-sam") it = InitTemplates(True) location, app_template = it.prompt_for_location(ZIP, "ruby2.5", None, "bundler") self.assertTrue(search("cookiecutter-aws-sam-hello-ruby", location)) self.assertEqual("hello-world", app_template) - @patch("samcli.commands.init.init_templates.InitTemplates._git_executable") + @patch("samcli.lib.utils.git_repo.GitRepo._git_executable") @patch("click.prompt") - @patch("samcli.commands.init.init_templates.InitTemplates._shared_dir_check") - def test_fallback_process_error(self, git_exec_mock, prompt_mock, sd_mock): + @patch("samcli.lib.utils.git_repo.GitRepo._ensure_clone_directory_exists") + def test_fallback_process_error(self, git_exec_mock, prompt_mock, cd_mock): prompt_mock.return_value = "1" with patch("subprocess.check_output", new_callable=MagicMock) as mock_sub: with patch("samcli.cli.global_config.GlobalConfig.config_dir", new_callable=PropertyMock) as mock_cfg: mock_sub.side_effect = subprocess.CalledProcessError("fail", "fail", "not found".encode("utf-8")) - mock_cfg.return_value = "/tmp/test-sam" + mock_cfg.return_value = Path("/tmp/test-sam") it = InitTemplates(True) location, app_template = it.prompt_for_location(ZIP, "ruby2.5", None, "bundler") self.assertTrue(search("cookiecutter-aws-sam-hello-ruby", location)) self.assertEqual("hello-world", app_template) - - def test_git_executable_windows(self): - with patch("platform.system", new_callable=MagicMock) as mock_platform: - mock_platform.return_value = "Windows" - with patch("subprocess.Popen", new_callable=MagicMock) as mock_popen: - it = InitTemplates(True) - executable = it._git_executable() - self.assertEqual(executable, "git") - - def test_git_executable_fails(self): - with patch("subprocess.Popen", new_callable=MagicMock) as mock_popen: - mock_popen.side_effect = OSError("fail") - it = InitTemplates(True) - with self.assertRaises(OSError): - executable = it._git_executable() - - def test_shared_dir_check(self): - it = InitTemplates(True, False) - shared_dir_mock = MagicMock() - self.assertTrue(it._shared_dir_check(shared_dir_mock)) - - def test_shared_dir_failure(self): - it = InitTemplates(True, False) - shared_dir_mock = MagicMock() - shared_dir_mock.mkdir.side_effect = OSError("fail") - self.assertFalse(it._shared_dir_check(shared_dir_mock)) diff --git a/tests/unit/commands/logs/test_command.py b/tests/unit/commands/logs/test_command.py index b895428f19..3a48600ae0 100644 --- a/tests/unit/commands/logs/test_command.py +++ b/tests/unit/commands/logs/test_command.py @@ -1,5 +1,5 @@ from unittest import TestCase -from unittest.mock import Mock, patch, call +from unittest.mock import Mock, patch from samcli.commands.logs.command import do_cli @@ -13,67 +13,46 @@ def setUp(self): self.start_time = "start" self.end_time = "end" - @patch("samcli.commands.logs.command.click") @patch("samcli.commands.logs.logs_context.LogsCommandContext") - def test_without_tail(self, LogsCommandContextMock, click_mock): + def test_without_tail(self, logs_command_context_mock): tailing = False - events_iterable = [1, 2, 3] - formatted_events = [4, 5, 6] context_mock = Mock() - LogsCommandContextMock.return_value.__enter__.return_value = context_mock - - context_mock.fetcher.fetch.return_value = events_iterable - context_mock.formatter.do_format.return_value = formatted_events + logs_command_context_mock.return_value.__enter__.return_value = context_mock do_cli(self.function_name, self.stack_name, self.filter_pattern, tailing, self.start_time, self.end_time) - LogsCommandContextMock.assert_called_with( + logs_command_context_mock.assert_called_with( self.function_name, stack_name=self.stack_name, filter_pattern=self.filter_pattern, start_time=self.start_time, end_time=self.end_time, - output_file=None, ) - context_mock.fetcher.fetch.assert_called_with( - context_mock.log_group_name, + context_mock.fetcher.load_time_period.assert_called_with( filter_pattern=context_mock.filter_pattern, - start=context_mock.start_time, - end=context_mock.end_time, + start_time=context_mock.start_time, + end_time=context_mock.end_time, ) - context_mock.formatter.do_format.assert_called_with(events_iterable) - click_mock.echo.assert_has_calls([call(v, nl=False) for v in formatted_events]) - - @patch("samcli.commands.logs.command.click") @patch("samcli.commands.logs.logs_context.LogsCommandContext") - def test_with_tailing(self, LogsCommandContextMock, click_mock): + def test_with_tailing(self, logs_command_context_mock): tailing = True - events_iterable = [1, 2, 3] - formatted_events = [4, 5, 6] context_mock = Mock() - LogsCommandContextMock.return_value.__enter__.return_value = context_mock - - context_mock.fetcher.tail.return_value = events_iterable - context_mock.formatter.do_format.return_value = formatted_events + logs_command_context_mock.return_value.__enter__.return_value = context_mock do_cli(self.function_name, self.stack_name, self.filter_pattern, tailing, self.start_time, self.end_time) - LogsCommandContextMock.assert_called_with( + logs_command_context_mock.assert_called_with( self.function_name, stack_name=self.stack_name, filter_pattern=self.filter_pattern, start_time=self.start_time, end_time=self.end_time, - output_file=None, ) context_mock.fetcher.tail.assert_called_with( - context_mock.log_group_name, filter_pattern=context_mock.filter_pattern, start=context_mock.start_time + filter_pattern=context_mock.filter_pattern, start_time=context_mock.start_time ) - - context_mock.formatter.do_format.assert_called_with(events_iterable) - click_mock.echo.assert_has_calls([call(v, nl=False) for v in formatted_events]) diff --git a/tests/unit/commands/logs/test_console_consumers.py b/tests/unit/commands/logs/test_console_consumers.py new file mode 100644 index 0000000000..ab824ca769 --- /dev/null +++ b/tests/unit/commands/logs/test_console_consumers.py @@ -0,0 +1,15 @@ +from unittest import TestCase +from unittest.mock import patch, Mock + +from samcli.commands.logs.console_consumers import CWConsoleEventConsumer + + +class TestCWConsoleEventConsumer(TestCase): + def setUp(self): + self.consumer = CWConsoleEventConsumer() + + @patch("samcli.commands.logs.console_consumers.click") + def test_consume_with_event(self, patched_click): + event = Mock() + self.consumer.consume(event) + patched_click.echo.assert_called_with(event.message, nl=False) diff --git a/tests/unit/commands/logs/test_logs_context.py b/tests/unit/commands/logs/test_logs_context.py index fe37d4e1c7..abcd792b27 100644 --- a/tests/unit/commands/logs/test_logs_context.py +++ b/tests/unit/commands/logs/test_logs_context.py @@ -1,11 +1,11 @@ -import botocore.session -from botocore.stub import Stubber - from unittest import TestCase from unittest.mock import Mock, patch, ANY -from samcli.commands.logs.logs_context import LogsCommandContext +import botocore.session +from botocore.stub import Stubber + from samcli.commands.exceptions import UserException +from samcli.commands.logs.logs_context import LogsCommandContext class TestLogsCommandContext(TestCase): @@ -30,13 +30,6 @@ def test_basic_properties(self): self.assertEqual(self.context.filter_pattern, self.filter_pattern) self.assertIsNone(self.context.output_file_handle) # before setting context handle will be null - @patch("samcli.commands.logs.logs_context.LogsFetcher") - def test_fetcher_property(self, LogsFetcherMock): - LogsFetcherMock.return_value = Mock() - - self.assertEqual(self.context.fetcher, LogsFetcherMock.return_value) - LogsFetcherMock.assert_called_with(self.context._logs_client) - @patch("samcli.commands.logs.logs_context.Colored") def test_colored_property(self, ColoredMock): ColoredMock.return_value = Mock() @@ -61,15 +54,6 @@ def test_colored_property_without_output_file(self, ColoredMock): self.assertEqual(ctx.colored, ColoredMock.return_value) ColoredMock.assert_called_with(colorize=True) # Must enable colors - @patch("samcli.commands.logs.logs_context.LogsFormatter") - @patch("samcli.commands.logs.logs_context.Colored") - def test_formatter_property(self, ColoredMock, LogsFormatterMock): - LogsFormatterMock.return_value = Mock() - ColoredMock.return_value = Mock() - - self.assertEqual(self.context.formatter, LogsFormatterMock.return_value) - LogsFormatterMock.assert_called_with(ColoredMock.return_value, ANY) - @patch("samcli.commands.logs.logs_context.LogGroupProvider") @patch.object(LogsCommandContext, "_get_resource_id_from_stack") def test_log_group_name_property_with_stack_name(self, get_resource_id_mock, LogGroupProviderMock): diff --git a/tests/unit/lib/build_module/test_build_graph.py b/tests/unit/lib/build_module/test_build_graph.py index d1979f3c89..7b326beea1 100644 --- a/tests/unit/lib/build_module/test_build_graph.py +++ b/tests/unit/lib/build_module/test_build_graph.py @@ -26,7 +26,7 @@ InvalidBuildGraphException, LayerBuildDefinition, ) -from samcli.lib.providers.provider import Function +from samcli.lib.providers.provider import Function, LayerVersion from samcli.lib.utils import osutils from samcli.lib.utils.packagetype import ZIP @@ -47,10 +47,13 @@ def generate_function( layers="layers", events="events", codesign_config_arn="codesign_config_arn", - metadata={}, + metadata=None, inlinecode=None, stack_path="", ): + if metadata is None: + metadata = {} + return Function( name, function_name, @@ -73,6 +76,21 @@ def generate_function( ) +def generate_layer( + arn="arn:aws:lambda:region:account-id:layer:layer-name:1", + codeuri="codeuri", + compatible_runtimes=None, + metadata=None, + stack_path="", +): + if compatible_runtimes is None: + compatible_runtimes = ["runtime"] + if metadata is None: + metadata = {} + + return LayerVersion(arn, codeuri, compatible_runtimes, metadata, stack_path) + + class TestConversionFunctions(TestCase): def test_function_build_definition_to_toml_table(self): build_definition = FunctionBuildDefinition( @@ -151,8 +169,11 @@ def test_toml_table_to_layer_build_definition(self): class TestBuildGraph(TestCase): CODEURI = "hello_world_python/" + LAYER_CODEURI = "sum_layer/" + LAYER_NAME = "SumLayer" ZIP = ZIP RUNTIME = "python3.8" + LAYER_RUNTIME = "nodejs12.x" METADATA = {"Test": "hello", "Test2": "world"} UUID = "3c1c254e-cd4b-4d94-8c74-7ab870b36063" LAYER_UUID = "7dnc257e-cd4b-4d94-8c74-7ab870b3abc3" @@ -175,10 +196,10 @@ class TestBuildGraph(TestCase): [layer_build_definitions] [layer_build_definitions.{LAYER_UUID}] - layer_name = "SumLayer" - codeuri = "sum_layer/" - build_method = "nodejs12.x" - compatible_runtimes = ["nodejs12.x"] + layer_name = "{LAYER_NAME}" + codeuri = "{LAYER_CODEURI}" + build_method = "{LAYER_RUNTIME}" + compatible_runtimes = ["{LAYER_RUNTIME}"] source_md5 = "{SOURCE_MD5}" layer = "SumLayer" [layer_build_definitions.{LAYER_UUID}.env_vars] @@ -197,6 +218,7 @@ def test_should_instantiate_first_time(self): self.assertEqual( build_graph1.get_function_build_definitions(), build_graph2.get_function_build_definitions() ) + self.assertEqual(build_graph1.get_layer_build_definitions(), build_graph2.get_layer_build_definitions()) def test_should_instantiate_first_time_and_update(self): with osutils.mkdir_temp() as temp_base_dir: @@ -205,7 +227,7 @@ def test_should_instantiate_first_time_and_update(self): # create a build graph and persist it build_graph1 = BuildGraph(str(build_dir)) - build_definition1 = FunctionBuildDefinition( + function_build_definition1 = FunctionBuildDefinition( TestBuildGraph.RUNTIME, TestBuildGraph.CODEURI, TestBuildGraph.ZIP, @@ -216,7 +238,22 @@ def test_should_instantiate_first_time_and_update(self): function1 = generate_function( runtime=TestBuildGraph.RUNTIME, codeuri=TestBuildGraph.CODEURI, metadata=TestBuildGraph.METADATA ) - build_graph1.put_function_build_definition(build_definition1, function1) + build_graph1.put_function_build_definition(function_build_definition1, function1) + layer_build_definition1 = LayerBuildDefinition( + TestBuildGraph.LAYER_NAME, + TestBuildGraph.LAYER_CODEURI, + TestBuildGraph.LAYER_RUNTIME, + [TestBuildGraph.LAYER_RUNTIME], + TestBuildGraph.SOURCE_MD5, + TestBuildGraph.ENV_VARS, + ) + layer1 = generate_layer( + compatible_runtimes=[TestBuildGraph.RUNTIME], + codeuri=TestBuildGraph.LAYER_CODEURI, + metadata=TestBuildGraph.METADATA, + ) + build_graph1.put_layer_build_definition(layer_build_definition1, layer1) + build_graph1.clean_redundant_definitions_and_update(True) # read previously persisted graph and compare @@ -224,10 +261,17 @@ def test_should_instantiate_first_time_and_update(self): self.assertEqual( len(build_graph1.get_function_build_definitions()), len(build_graph2.get_function_build_definitions()) ) + self.assertEqual( + len(build_graph1.get_layer_build_definitions()), len(build_graph2.get_layer_build_definitions()) + ) self.assertEqual( list(build_graph1.get_function_build_definitions())[0], list(build_graph2.get_function_build_definitions())[0], ) + self.assertEqual( + list(build_graph1.get_layer_build_definitions())[0], + list(build_graph2.get_layer_build_definitions())[0], + ) def test_should_read_existing_build_graph(self): with osutils.mkdir_temp() as temp_base_dir: @@ -238,13 +282,20 @@ def test_should_read_existing_build_graph(self): build_graph_path.write_text(TestBuildGraph.BUILD_GRAPH_CONTENTS) build_graph = BuildGraph(str(build_dir)) - for build_definition in build_graph.get_function_build_definitions(): - self.assertEqual(build_definition.codeuri, TestBuildGraph.CODEURI) - self.assertEqual(build_definition.runtime, TestBuildGraph.RUNTIME) - self.assertEqual(build_definition.packagetype, TestBuildGraph.ZIP) - self.assertEqual(build_definition.metadata, TestBuildGraph.METADATA) - self.assertEqual(build_definition.source_md5, TestBuildGraph.SOURCE_MD5) - self.assertEqual(build_definition.env_vars, TestBuildGraph.ENV_VARS) + for function_build_definition in build_graph.get_function_build_definitions(): + self.assertEqual(function_build_definition.codeuri, TestBuildGraph.CODEURI) + self.assertEqual(function_build_definition.runtime, TestBuildGraph.RUNTIME) + self.assertEqual(function_build_definition.packagetype, TestBuildGraph.ZIP) + self.assertEqual(function_build_definition.metadata, TestBuildGraph.METADATA) + self.assertEqual(function_build_definition.source_md5, TestBuildGraph.SOURCE_MD5) + self.assertEqual(function_build_definition.env_vars, TestBuildGraph.ENV_VARS) + + for layer_build_definition in build_graph.get_layer_build_definitions(): + self.assertEqual(layer_build_definition.name, TestBuildGraph.LAYER_NAME) + self.assertEqual(layer_build_definition.codeuri, TestBuildGraph.LAYER_CODEURI) + self.assertEqual(layer_build_definition.build_method, TestBuildGraph.LAYER_RUNTIME) + self.assertEqual(layer_build_definition.compatible_runtimes, [TestBuildGraph.LAYER_RUNTIME]) + self.assertEqual(layer_build_definition.env_vars, TestBuildGraph.ENV_VARS) def test_functions_should_be_added_existing_build_graph(self): with osutils.mkdir_temp() as temp_base_dir: @@ -265,15 +316,17 @@ def test_functions_should_be_added_existing_build_graph(self): TestBuildGraph.ENV_VARS, ) function1 = generate_function( - runtime=TestBuildGraph.RUNTIME, codeuri=TestBuildGraph.CODEURI, metadata=TestBuildGraph.METADATA + runtime=TestBuildGraph.RUNTIME, + codeuri=TestBuildGraph.CODEURI, + metadata=TestBuildGraph.METADATA, ) build_graph.put_function_build_definition(build_definition1, function1) - self.assertTrue(len(build_graph.get_function_build_definitions()), 1) - for build_definition in build_graph.get_function_build_definitions(): - self.assertTrue(len(build_definition.functions), 1) - self.assertTrue(build_definition.functions[0], function1) - self.assertEqual(build_definition.uuid, TestBuildGraph.UUID) + build_definitions = build_graph.get_function_build_definitions() + self.assertEqual(len(build_definitions), 1) + self.assertEqual(len(build_definitions[0].functions), 1) + self.assertEqual(build_definitions[0].functions[0], function1) + self.assertEqual(build_definitions[0].uuid, TestBuildGraph.UUID) build_definition2 = FunctionBuildDefinition( "another_runtime", @@ -285,7 +338,56 @@ def test_functions_should_be_added_existing_build_graph(self): ) function2 = generate_function(name="another_function") build_graph.put_function_build_definition(build_definition2, function2) - self.assertTrue(len(build_graph.get_function_build_definitions()), 2) + + build_definitions = build_graph.get_function_build_definitions() + self.assertEqual(len(build_definitions), 2) + self.assertEqual(len(build_definitions[1].functions), 1) + self.assertEqual(build_definitions[1].functions[0], function2) + + def test_layers_should_be_added_existing_build_graph(self): + with osutils.mkdir_temp() as temp_base_dir: + build_dir = Path(temp_base_dir, ".aws-sam", "build") + build_dir.mkdir(parents=True) + + build_graph_path = Path(build_dir.parent, "build.toml") + build_graph_path.write_text(TestBuildGraph.BUILD_GRAPH_CONTENTS) + + build_graph = BuildGraph(str(build_dir)) + + build_definition1 = LayerBuildDefinition( + TestBuildGraph.LAYER_NAME, + TestBuildGraph.LAYER_CODEURI, + TestBuildGraph.LAYER_RUNTIME, + [TestBuildGraph.LAYER_RUNTIME], + TestBuildGraph.SOURCE_MD5, + TestBuildGraph.ENV_VARS, + ) + layer1 = generate_layer( + compatible_runtimes=[TestBuildGraph.RUNTIME], + codeuri=TestBuildGraph.LAYER_CODEURI, + metadata=TestBuildGraph.METADATA, + ) + build_graph.put_layer_build_definition(build_definition1, layer1) + + build_definitions = build_graph.get_layer_build_definitions() + self.assertEqual(len(build_definitions), 1) + self.assertEqual(build_definitions[0].layer, layer1) + self.assertEqual(build_definitions[0].uuid, TestBuildGraph.LAYER_UUID) + + build_definition2 = LayerBuildDefinition( + "another_layername", + "another_codeuri", + "another_runtime", + ["another_runtime"], + "another_source_md5", + {"env_vars": "value2"}, + ) + layer2 = generate_layer(arn="arn:aws:lambda:region:account-id:layer:another-layer-name:1") + build_graph.put_layer_build_definition(build_definition2, layer2) + + build_definitions = build_graph.get_layer_build_definitions() + self.assertEqual(len(build_definitions), 2) + self.assertEqual(build_definitions[1].layer, layer2) class TestBuildDefinition(TestCase): diff --git a/tests/unit/lib/cookiecutter/test_interactive_flow.py b/tests/unit/lib/cookiecutter/test_interactive_flow.py index ed52626451..47ed0ec2b6 100644 --- a/tests/unit/lib/cookiecutter/test_interactive_flow.py +++ b/tests/unit/lib/cookiecutter/test_interactive_flow.py @@ -1,3 +1,4 @@ +from pathlib import Path from unittest import TestCase from unittest.mock import patch from samcli.lib.cookiecutter.interactive_flow import InteractiveFlow @@ -49,3 +50,26 @@ def test_run(self, mock_3rd_q, mock_2nd_q, mock_1st_q): mock_3rd_q.assert_called_once() self.assertEqual(expected_context, actual_context) self.assertIsNot(actual_context, initial_context) # shouldn't modify the input, it should copy and return new + + @patch.object(Question, "ask") + @patch.object(Confirm, "ask") + @patch.object(Choice, "ask") + def test_run_with_preloaded_default_values(self, mock_3rd_q, mock_2nd_q, mock_1st_q): + + mock_1st_q.return_value = "answer1" + mock_2nd_q.return_value = False + mock_3rd_q.return_value = "option1" + + initial_context = {"key": "value", "['beta', 'bootstrap', 'x']": "y"} + + actual_context = self.flow.run(initial_context) + + mock_1st_q.assert_called_once() + mock_2nd_q.assert_called_once() + mock_3rd_q.assert_called_once() + + self.assertEqual( + {"1st": "answer1", "2nd": False, "3rd": "option1", "['beta', 'bootstrap', 'x']": "y", "key": "value"}, + actual_context, + ) + self.assertIsNot(actual_context, initial_context) # shouldn't modify the input, it should copy and return new diff --git a/tests/unit/lib/cookiecutter/test_question.py b/tests/unit/lib/cookiecutter/test_question.py index e59a76b782..c46a37fa43 100644 --- a/tests/unit/lib/cookiecutter/test_question.py +++ b/tests/unit/lib/cookiecutter/test_question.py @@ -1,5 +1,9 @@ +from typing import List, Union, Dict from unittest import TestCase -from unittest.mock import ANY, patch +from unittest.mock import ANY, patch, Mock + +from parameterized import parameterized + from samcli.lib.cookiecutter.question import Question, QuestionKind, Choice, Confirm, Info, QuestionFactory @@ -27,6 +31,18 @@ def setUp(self): default_next_question_key=self._ANY_DEFAULT_NEXT_QUESTION_KEY, ) + def get_question_with_default_from_cookiecutter_context_using_keypath( + self, key_path: List[Union[str, Dict]] + ) -> Question: + return Question( + text=self._ANY_TEXT, + key=self._ANY_KEY, + default={"keyPath": key_path}, + is_required=True, + next_question_map=self._ANY_NEXT_QUESTION_MAP, + default_next_question_key=self._ANY_DEFAULT_NEXT_QUESTION_KEY, + ) + def test_creating_questions(self): q = Question(text=self._ANY_TEXT, key=self._ANY_KEY) self.assertEqual(q.text, self._ANY_TEXT) @@ -61,10 +77,80 @@ def test_get_next_question_key(self): @patch("samcli.lib.cookiecutter.question.click") def test_ask(self, mock_click): mock_click.prompt.return_value = self._ANY_ANSWER - answer = self.question.ask() + answer = self.question.ask({}) self.assertEqual(answer, self._ANY_ANSWER) mock_click.prompt.assert_called_once_with(text=self.question.text, default=self.question.default_answer) + @patch("samcli.lib.cookiecutter.question.click") + def test_ask_resolves_from_cookiecutter_context(self, mock_click): + # Setup + expected_default_value = Mock() + previous_question_key = "this is a question" + previous_question_answer = "this is an answer" + context = { + "['x', 'this is an answer']": expected_default_value, + previous_question_key: previous_question_answer, + } + question = self.get_question_with_default_from_cookiecutter_context_using_keypath( + ["x", {"valueOf": previous_question_key}] + ) + + # Trigger + question.ask(context=context) + + # Verify + mock_click.prompt.assert_called_once_with(text=self.question.text, default=expected_default_value) + + @patch("samcli.lib.cookiecutter.question.click") + def test_ask_resolves_from_cookiecutter_context_non_exist_key_path(self, mock_click): + # Setup + context = {} + question = self.get_question_with_default_from_cookiecutter_context_using_keypath(["y"]) + + # Trigger + question.ask(context=context) + + # Verify + mock_click.prompt.assert_called_once_with(text=self.question.text, default=None) + + def test_ask_resolves_from_cookiecutter_context_non_exist_question_key(self): + # Setup + expected_default_value = Mock() + previous_question_key = "this is a question" + previous_question_answer = "this is an answer" + context = { + "['x', 'this is an answer']": expected_default_value, + previous_question_key: previous_question_answer, + } + question = self.get_question_with_default_from_cookiecutter_context_using_keypath( + ["x", {"valueOf": "non_exist_question_key"}] + ) + + # Trigger + with self.assertRaises(KeyError): + question.ask(context=context) + + @parameterized.expand([("this should have been a list"), ([1],), ({},)]) + def test_ask_resolves_from_cookiecutter_context_with_key_path_not_a_list(self, key_path): + # Setup + context = {} + question = self.get_question_with_default_from_cookiecutter_context_using_keypath(key_path) + + # Trigger + with self.assertRaises(ValueError): + question.ask(context=context) + + @parameterized.expand([({"keyPath123": Mock()},), ({"keyPath": [{"valueOf123": Mock()}]},)]) + def test_ask_resolves_from_cookiecutter_context_with_default_object_missing_keys(self, default_object): + # Setup + context = {} + question = self.get_question_with_default_from_cookiecutter_context_using_keypath([]) + question._default_answer = default_object + + # Trigger + with self.assertRaises(KeyError): + question.ask(context=context) + class TestChoice(TestCase): def setUp(self): @@ -99,7 +185,7 @@ def test_get_options_indexes_with_different_bases(self): @patch("samcli.lib.cookiecutter.question.click") def test_ask(self, mock_click, mock_choice): mock_click.prompt.return_value = 2 - answer = self.question.ask() + answer = self.question.ask({}) self.assertEqual(answer, TestQuestion._ANY_OPTIONS[1]) # we deduct one from user's choice (base 1 vs base 0) mock_click.prompt.assert_called_once_with( text="Choice", default=self.question.default_answer, show_choices=False, type=ANY @@ -112,7 +198,7 @@ class TestInfo(TestCase): def test_ask(self, mock_click): q = Info(text=TestQuestion._ANY_TEXT, key=TestQuestion._ANY_KEY) mock_click.echo.return_value = None - answer = q.ask() + answer = q.ask({}) self.assertIsNone(answer) mock_click.echo.assert_called_once_with(message=q.text) @@ -122,7 +208,7 @@ class TestConfirm(TestCase): def test_ask(self, mock_click): q = Confirm(text=TestQuestion._ANY_TEXT, key=TestQuestion._ANY_KEY) mock_click.confirm.return_value = True - answer = q.ask() + answer = q.ask({}) self.assertTrue(answer) mock_click.confirm.assert_called_once_with(text=q.text) diff --git a/tests/unit/lib/logs/test_fetcher.py b/tests/unit/lib/logs/test_fetcher.py deleted file mode 100644 index c0b634c008..0000000000 --- a/tests/unit/lib/logs/test_fetcher.py +++ /dev/null @@ -1,255 +0,0 @@ -import copy -import datetime -import botocore.session - -from unittest import TestCase -from unittest.mock import Mock, patch, call, ANY -from botocore.stub import Stubber - -from samcli.lib.logs.fetcher import LogsFetcher -from samcli.lib.logs.event import LogEvent -from samcli.lib.utils.time import to_timestamp, to_datetime - - -class TestLogsFetcher_fetch(TestCase): - def setUp(self): - - real_client = botocore.session.get_session().create_client("logs", region_name="us-east-1") - self.client_stubber = Stubber(real_client) - self.fetcher = LogsFetcher(real_client) - - self.log_group_name = "name" - self.stream_name = "stream name" - self.timestamp = to_timestamp(datetime.datetime.utcnow()) - - self.mock_api_response = { - "events": [ - { - "eventId": "id1", - "ingestionTime": 0, - "logStreamName": self.stream_name, - "message": "message 1", - "timestamp": self.timestamp, - }, - { - "eventId": "id2", - "ingestionTime": 0, - "logStreamName": self.stream_name, - "message": "message 2", - "timestamp": self.timestamp, - }, - ] - } - - self.expected_events = [ - LogEvent( - self.log_group_name, - { - "eventId": "id1", - "ingestionTime": 0, - "logStreamName": self.stream_name, - "message": "message 1", - "timestamp": self.timestamp, - }, - ), - LogEvent( - self.log_group_name, - { - "eventId": "id2", - "ingestionTime": 0, - "logStreamName": self.stream_name, - "message": "message 2", - "timestamp": self.timestamp, - }, - ), - ] - - def test_must_fetch_logs_for_log_group(self): - expected_params = {"logGroupName": self.log_group_name, "interleaved": True} - - # Configure the stubber to return the configured response. The stubber also verifies - # that input params were provided as expected - self.client_stubber.add_response("filter_log_events", self.mock_api_response, expected_params) - - with self.client_stubber: - events_iterable = self.fetcher.fetch(self.log_group_name) - - actual_result = list(events_iterable) - self.assertEqual(self.expected_events, actual_result) - - def test_must_fetch_logs_with_all_params(self): - pattern = "foobar" - start = datetime.datetime.utcnow() - end = datetime.datetime.utcnow() - - expected_params = { - "logGroupName": self.log_group_name, - "interleaved": True, - "startTime": to_timestamp(start), - "endTime": to_timestamp(end), - "filterPattern": pattern, - } - - self.client_stubber.add_response("filter_log_events", self.mock_api_response, expected_params) - - with self.client_stubber: - events_iterable = self.fetcher.fetch(self.log_group_name, start=start, end=end, filter_pattern=pattern) - - actual_result = list(events_iterable) - self.assertEqual(self.expected_events, actual_result) - - def test_must_paginate_using_next_token(self): - """Make three API calls, first two returns a nextToken and last does not.""" - token = "token" - expected_params = {"logGroupName": self.log_group_name, "interleaved": True} - expected_params_with_token = {"logGroupName": self.log_group_name, "interleaved": True, "nextToken": token} - - mock_response_with_token = copy.deepcopy(self.mock_api_response) - mock_response_with_token["nextToken"] = token - - # Call 1 returns a token. Also when first call is made, token is **not** passed as API params - self.client_stubber.add_response("filter_log_events", mock_response_with_token, expected_params) - - # Call 2 returns a token - self.client_stubber.add_response("filter_log_events", mock_response_with_token, expected_params_with_token) - - # Call 3 DOES NOT return a token. This will terminate the loop. - self.client_stubber.add_response("filter_log_events", self.mock_api_response, expected_params_with_token) - - # Same data was returned in each API call - expected_events_result = self.expected_events + self.expected_events + self.expected_events - - with self.client_stubber: - events_iterable = self.fetcher.fetch(self.log_group_name) - - actual_result = list(events_iterable) - self.assertEqual(expected_events_result, actual_result) - - -class TestLogsFetcher_tail(TestCase): - def setUp(self): - - self.fetcher = LogsFetcher(Mock()) - - self.log_group_name = "name" - self.filter_pattern = "pattern" - - self.start_time = to_datetime(10) - self.max_retries = 3 - self.poll_interval = 1 - - self.mock_events1 = [ - LogEvent(self.log_group_name, {"timestamp": 11}), - LogEvent(self.log_group_name, {"timestamp": 12}), - ] - self.mock_events2 = [ - LogEvent(self.log_group_name, {"timestamp": 13}), - LogEvent(self.log_group_name, {"timestamp": 14}), - ] - self.mock_events_empty = [] - - @patch("samcli.lib.logs.fetcher.time") - def test_must_tail_logs_with_single_data_fetch(self, time_mock): - - self.fetcher.fetch = Mock() - - self.fetcher.fetch.side_effect = [ - self.mock_events1, - # Return empty data for `max_retries` number of polls - self.mock_events_empty, - self.mock_events_empty, - self.mock_events_empty, - ] - - expected_fetch_calls = [ - # First fetch returns data - call(ANY, start=self.start_time, filter_pattern=self.filter_pattern), - # Three empty fetches - call(ANY, start=to_datetime(13), filter_pattern=self.filter_pattern), - call(ANY, start=to_datetime(13), filter_pattern=self.filter_pattern), - call(ANY, start=to_datetime(13), filter_pattern=self.filter_pattern), - ] - - # One per poll - expected_sleep_calls = [call(self.poll_interval) for i in expected_fetch_calls] - - result_itr = self.fetcher.tail( - self.log_group_name, - start=self.start_time, - filter_pattern=self.filter_pattern, - max_retries=self.max_retries, - poll_interval=self.poll_interval, - ) - - self.assertEqual(self.mock_events1, list(result_itr)) - self.assertEqual(expected_fetch_calls, self.fetcher.fetch.call_args_list) - self.assertEqual(expected_sleep_calls, time_mock.sleep.call_args_list) - - @patch("samcli.lib.logs.fetcher.time") - def test_must_tail_logs_with_multiple_data_fetches(self, time_mock): - - self.fetcher.fetch = Mock() - - self.fetcher.fetch.side_effect = [ - self.mock_events1, - # Just one empty fetch - self.mock_events_empty, - # This fetch returns data - self.mock_events2, - # Return empty data for `max_retries` number of polls - self.mock_events_empty, - self.mock_events_empty, - self.mock_events_empty, - ] - - expected_fetch_calls = [ - # First fetch returns data - call(ANY, start=self.start_time, filter_pattern=self.filter_pattern), - # This fetch was empty - call(ANY, start=to_datetime(13), filter_pattern=self.filter_pattern), - # This fetch returned data - call(ANY, start=to_datetime(13), filter_pattern=self.filter_pattern), - # Three empty fetches - call(ANY, start=to_datetime(15), filter_pattern=self.filter_pattern), - call(ANY, start=to_datetime(15), filter_pattern=self.filter_pattern), - call(ANY, start=to_datetime(15), filter_pattern=self.filter_pattern), - ] - - # One per poll - expected_sleep_calls = [call(self.poll_interval) for i in expected_fetch_calls] - - result_itr = self.fetcher.tail( - self.log_group_name, - start=self.start_time, - filter_pattern=self.filter_pattern, - max_retries=self.max_retries, - poll_interval=self.poll_interval, - ) - - self.assertEqual(self.mock_events1 + self.mock_events2, list(result_itr)) - self.assertEqual(expected_fetch_calls, self.fetcher.fetch.call_args_list) - self.assertEqual(expected_sleep_calls, time_mock.sleep.call_args_list) - - @patch("samcli.lib.logs.fetcher.time") - def test_without_start_time(self, time_mock): - - self.fetcher.fetch = Mock() - - self.fetcher.fetch.return_value = self.mock_events_empty - - expected_fetch_calls = [ - # Three empty fetches, all with default start time - call(ANY, start=to_datetime(0), filter_pattern=ANY), - call(ANY, start=to_datetime(0), filter_pattern=ANY), - call(ANY, start=to_datetime(0), filter_pattern=ANY), - ] - - result_itr = self.fetcher.tail( - self.log_group_name, - filter_pattern=self.filter_pattern, - max_retries=self.max_retries, - poll_interval=self.poll_interval, - ) - - self.assertEqual([], list(result_itr)) - self.assertEqual(expected_fetch_calls, self.fetcher.fetch.call_args_list) diff --git a/tests/unit/lib/logs/test_formatter.py b/tests/unit/lib/logs/test_formatter.py deleted file mode 100644 index b30fd49c71..0000000000 --- a/tests/unit/lib/logs/test_formatter.py +++ /dev/null @@ -1,164 +0,0 @@ -import json - -from unittest import TestCase -from unittest.mock import Mock, patch, call -from parameterized import parameterized - -from samcli.lib.logs.formatter import LogsFormatter, LambdaLogMsgFormatters, KeywordHighlighter, JSONMsgFormatter -from samcli.lib.logs.event import LogEvent - - -class TestLogsFormatter_pretty_print_event(TestCase): - def setUp(self): - self.colored_mock = Mock() - self.group_name = "group name" - self.stream_name = "stream name" - self.message = "message" - self.event_dict = {"timestamp": 1, "message": self.message, "logStreamName": self.stream_name} - - def test_must_serialize_event(self): - colored_timestamp = "colored timestamp" - colored_stream_name = "colored stream name" - self.colored_mock.yellow.return_value = colored_timestamp - self.colored_mock.cyan.return_value = colored_stream_name - - event = LogEvent(self.group_name, self.event_dict) - - expected = " ".join([colored_stream_name, colored_timestamp, self.message]) - result = LogsFormatter._pretty_print_event(event, self.colored_mock) - - self.assertEqual(expected, result) - self.colored_mock.yellow.has_calls() - self.colored_mock.cyan.assert_called_with(self.stream_name) - - -def _passthru_formatter(event, colored): - return event - - -class TestLogsFormatter_do_format(TestCase): - def setUp(self): - self.colored_mock = Mock() - - # Set formatter chain method to return the input unaltered. - self.chain_method1 = Mock(wraps=_passthru_formatter) - self.chain_method2 = Mock(wraps=_passthru_formatter) - self.chain_method3 = Mock(wraps=_passthru_formatter) - - self.formatter_chain = [self.chain_method1, self.chain_method2, self.chain_method3] - - @patch.object(LogsFormatter, "_pretty_print_event", wraps=_passthru_formatter) - def test_must_map_formatters_sequentially(self, pretty_print_mock): - - events_iterable = [1, 2, 3] - expected_result = [1, 2, 3] - expected_call_order = [ - call(1, colored=self.colored_mock), - call(2, colored=self.colored_mock), - call(3, colored=self.colored_mock), - ] - - formatter = LogsFormatter(self.colored_mock, self.formatter_chain) - - result_iterable = formatter.do_format(events_iterable) - self.assertEqual(list(result_iterable), expected_result) - - self.chain_method1.assert_has_calls(expected_call_order) - self.chain_method2.assert_has_calls(expected_call_order) - self.chain_method3.assert_has_calls(expected_call_order) - pretty_print_mock.assert_has_calls(expected_call_order) # Pretty Printer must always be called - - @patch.object(LogsFormatter, "_pretty_print_event", wraps=_passthru_formatter) - def test_must_work_without_formatter_chain(self, pretty_print_mock): - - events_iterable = [1, 2, 3] - expected_result = [1, 2, 3] - expected_call_order = [ - call(1, colored=self.colored_mock), - call(2, colored=self.colored_mock), - call(3, colored=self.colored_mock), - ] - - # No formatter chain. - formatter = LogsFormatter(self.colored_mock) - - result_iterable = formatter.do_format(events_iterable) - self.assertEqual(list(result_iterable), expected_result) - - # Pretty Print is always called, even if there are no other formatters in the chain. - pretty_print_mock.assert_has_calls(expected_call_order) - self.chain_method1.assert_not_called() - self.chain_method2.assert_not_called() - self.chain_method3.assert_not_called() - - -class TestLambdaLogMsgFormatters_colorize_crashes(TestCase): - @parameterized.expand( - [ - "Task timed out", - "Something happened. Task timed out. Something else happend", - "Process exited before completing request", - ] - ) - def test_must_color_crash_messages(self, input_msg): - color_result = "colored messaage" - colored = Mock() - colored.red.return_value = color_result - event = LogEvent("group_name", {"message": input_msg}) - - result = LambdaLogMsgFormatters.colorize_errors(event, colored) - self.assertEqual(result.message, color_result) - colored.red.assert_called_with(input_msg) - - def test_must_ignore_other_messages(self): - colored = Mock() - event = LogEvent("group_name", {"message": "some msg"}) - - result = LambdaLogMsgFormatters.colorize_errors(event, colored) - self.assertEqual(result.message, "some msg") - colored.red.assert_not_called() - - -class TestKeywordHighlight_highlight_keyword(TestCase): - def test_must_highlight_all_keywords(self): - input_msg = "this keyword some keyword other keyword" - keyword = "keyword" - color_result = "colored" - expected_msg = "this colored some colored other colored" - - colored = Mock() - colored.underline.return_value = color_result - event = LogEvent("group_name", {"message": input_msg}) - - result = KeywordHighlighter(keyword).highlight_keywords(event, colored) - self.assertEqual(result.message, expected_msg) - colored.underline.assert_called_with(keyword) - - def test_must_ignore_if_keyword_is_absent(self): - colored = Mock() - input_msg = "this keyword some keyword other keyword" - event = LogEvent("group_name", {"message": input_msg}) - - result = KeywordHighlighter().highlight_keywords(event, colored) - self.assertEqual(result.message, input_msg) - colored.underline.assert_not_called() - - -class TestJSONMsgFormatter_format_json(TestCase): - def test_must_pretty_print_json(self): - data = {"a": "b"} - input_msg = '{"a": "b"}' - expected_msg = json.dumps(data, indent=2) - - event = LogEvent("group_name", {"message": input_msg}) - - result = JSONMsgFormatter.format_json(event, None) - self.assertEqual(result.message, expected_msg) - - @parameterized.expand(["this is not json", '{"not a valid json"}']) - def test_ignore_non_json(self, input_msg): - - event = LogEvent("group_name", {"message": input_msg}) - - result = JSONMsgFormatter.format_json(event, None) - self.assertEqual(result.message, input_msg) diff --git a/tests/unit/lib/observability/__init__.py b/tests/unit/lib/observability/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/lib/observability/cw_logs/__init__.py b/tests/unit/lib/observability/cw_logs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/lib/logs/test_event.py b/tests/unit/lib/observability/cw_logs/test_cw_log_event.py similarity index 51% rename from tests/unit/lib/logs/test_event.py rename to tests/unit/lib/observability/cw_logs/test_cw_log_event.py index c093edf0e2..62968f0d71 100644 --- a/tests/unit/lib/logs/test_event.py +++ b/tests/unit/lib/observability/cw_logs/test_cw_log_event.py @@ -1,9 +1,9 @@ from unittest import TestCase -from samcli.lib.logs.event import LogEvent +from samcli.lib.observability.cw_logs.cw_log_event import CWLogEvent -class TestLogEvent(TestCase): +class TestCWLogEvent(TestCase): def setUp(self): self.group_name = "log group name" self.stream_name = "stream name" @@ -12,44 +12,56 @@ def setUp(self): self.timestamp_str = "2018-07-06T13:09:54" def test_must_extract_fields_from_event(self): - event = LogEvent( + event = CWLogEvent( self.group_name, {"timestamp": self.timestamp, "logStreamName": self.stream_name, "message": self.message} ) - self.assertEqual(event.log_group_name, self.group_name) + self.assertEqual(event.cw_log_group, self.group_name) self.assertEqual(event.log_stream_name, self.stream_name) self.assertEqual(event.message, self.message) - self.assertEqual(self.timestamp_str, event.timestamp) + self.assertEqual(self.timestamp, event.timestamp) def test_must_ignore_if_some_fields_are_empty(self): - event = LogEvent(self.group_name, {"logStreamName": "stream name"}) + event = CWLogEvent(self.group_name, {"logStreamName": "stream name"}) - self.assertEqual(event.log_group_name, self.group_name) + self.assertEqual(event.cw_log_group, self.group_name) self.assertEqual(event.log_stream_name, self.stream_name) self.assertEqual(event.message, "") - self.assertIsNone(event.timestamp) + self.assertEqual(event.timestamp, 0) def test_must_ignore_if_event_is_empty(self): - event = LogEvent(self.group_name, {}) + event = CWLogEvent(self.group_name, {}) - self.assertEqual(event.log_group_name, self.group_name) - self.assertIsNone(event.log_stream_name) - self.assertIsNone(event.message) - self.assertIsNone(event.timestamp) + self.assertEqual(event.cw_log_group, self.group_name) + self.assertEqual(event.log_stream_name, "") + self.assertEqual(event.message, "") + self.assertEqual(event.timestamp, 0) def test_check_for_equality(self): - event = LogEvent( + event = CWLogEvent( self.group_name, {"timestamp": self.timestamp, "logStreamName": self.stream_name, "message": self.message} ) - other = LogEvent( + other = CWLogEvent( self.group_name, {"timestamp": self.timestamp, "logStreamName": self.stream_name, "message": self.message} ) self.assertEqual(event, other) + def test_check_for_inequality(self): + event = CWLogEvent( + self.group_name, + {"timestamp": self.timestamp + 1, "logStreamName": self.stream_name, "message": self.message}, + ) + + other = CWLogEvent( + self.group_name, {"timestamp": self.timestamp, "logStreamName": self.stream_name, "message": self.message} + ) + + self.assertNotEqual(event, other) + def test_check_for_equality_with_other_data_types(self): - event = LogEvent(self.group_name, {}) + event = CWLogEvent(self.group_name, {}) other = "this is not an event" self.assertNotEqual(event, other) diff --git a/tests/unit/lib/observability/cw_logs/test_cw_log_formatters.py b/tests/unit/lib/observability/cw_logs/test_cw_log_formatters.py new file mode 100644 index 0000000000..f864ff1fe7 --- /dev/null +++ b/tests/unit/lib/observability/cw_logs/test_cw_log_formatters.py @@ -0,0 +1,120 @@ +import json +from unittest import TestCase +from unittest.mock import Mock + +from parameterized import parameterized + +from samcli.lib.observability.cw_logs.cw_log_event import CWLogEvent +from samcli.lib.observability.cw_logs.cw_log_formatters import ( + CWPrettyPrintFormatter, + CWColorizeErrorsFormatter, + CWKeywordHighlighterFormatter, + CWJsonFormatter, +) + + +class TestCWPrettyPrintFormatter(TestCase): + def setUp(self): + self.colored = Mock() + self.pretty_print_formatter = CWPrettyPrintFormatter(self.colored) + self.group_name = "group name" + self.stream_name = "stream name" + self.message = "message" + self.event_dict = {"timestamp": 1, "message": self.message, "logStreamName": self.stream_name} + + def test_must_serialize_event(self): + colored_timestamp = "colored timestamp" + colored_stream_name = "colored stream name" + self.colored.yellow.return_value = colored_timestamp + self.colored.cyan.return_value = colored_stream_name + + event = CWLogEvent(self.group_name, self.event_dict) + + expected = " ".join([colored_stream_name, colored_timestamp, self.message]) + result = self.pretty_print_formatter.map(event) + + self.assertEqual(expected, result.message) + self.colored.yellow.has_calls() + self.colored.cyan.assert_called_with(self.stream_name) + + +class TestCWColorizeErrorsFormatter(TestCase): + def setUp(self): + self.colored = Mock() + self.formatter = CWColorizeErrorsFormatter(self.colored) + + @parameterized.expand( + [ + "Task timed out", + "Something happened. Task timed out. Something else happend", + "Process exited before completing request", + ] + ) + def test_must_color_crash_messages(self, input_msg): + color_result = "colored messaage" + self.colored.red.return_value = color_result + event = CWLogEvent("group_name", {"message": input_msg}) + + result = self.formatter.map(event) + self.assertEqual(result.message, color_result) + self.colored.red.assert_called_with(input_msg) + + def test_must_ignore_other_messages(self): + event = CWLogEvent("group_name", {"message": "some msg"}) + + result = self.formatter.map(event) + self.assertEqual(result.message, "some msg") + self.colored.red.assert_not_called() + + +class CWCWKeywordHighlighterFormatter(TestCase): + def setUp(self): + self.colored = Mock() + + def test_must_highlight_all_keywords(self): + input_msg = "this keyword some keyword other keyword" + keyword = "keyword" + color_result = "colored" + expected_msg = "this colored some colored other colored" + + formatter = CWKeywordHighlighterFormatter(self.colored, keyword) + + self.colored.underline.return_value = color_result + event = CWLogEvent("group_name", {"message": input_msg}) + + result = formatter.map(event) + self.assertEqual(result.message, expected_msg) + self.colored.underline.assert_called_with(keyword) + + def test_must_ignore_if_keyword_is_absent(self): + input_msg = "this keyword some keyword other keyword" + event = CWLogEvent("group_name", {"message": input_msg}) + + formatter = CWKeywordHighlighterFormatter(self.colored) + + result = formatter.map(event) + self.assertEqual(result.message, input_msg) + self.colored.underline.assert_not_called() + + +class TestCWJsonFormatter(TestCase): + def setUp(self): + self.formatter = CWJsonFormatter() + + def test_must_pretty_print_json(self): + data = {"a": "b"} + input_msg = '{"a": "b"}' + expected_msg = json.dumps(data, indent=2) + + event = CWLogEvent("group_name", {"message": input_msg}) + + result = self.formatter.map(event) + self.assertEqual(result.message, expected_msg) + + @parameterized.expand(["this is not json", '{"not a valid json"}']) + def test_ignore_non_json(self, input_msg): + + event = CWLogEvent("group_name", {"message": input_msg}) + + result = self.formatter.map(event) + self.assertEqual(result.message, input_msg) diff --git a/tests/unit/lib/logs/test_provider.py b/tests/unit/lib/observability/cw_logs/test_cw_log_group_provider.py similarity index 78% rename from tests/unit/lib/logs/test_provider.py rename to tests/unit/lib/observability/cw_logs/test_cw_log_group_provider.py index 59da01928c..295ad6d898 100644 --- a/tests/unit/lib/logs/test_provider.py +++ b/tests/unit/lib/observability/cw_logs/test_cw_log_group_provider.py @@ -1,6 +1,6 @@ from unittest import TestCase -from samcli.lib.logs.provider import LogGroupProvider +from samcli.lib.observability.cw_logs.cw_log_group_provider import LogGroupProvider class TestLogGroupProvider_for_lambda_function(TestCase): diff --git a/tests/unit/lib/observability/cw_logs/test_cw_log_puller.py b/tests/unit/lib/observability/cw_logs/test_cw_log_puller.py new file mode 100644 index 0000000000..98f4e6d3de --- /dev/null +++ b/tests/unit/lib/observability/cw_logs/test_cw_log_puller.py @@ -0,0 +1,322 @@ +import copy +from datetime import datetime +from unittest import TestCase +from unittest.mock import Mock, call, patch, ANY + +import botocore.session +from botocore.stub import Stubber + +from samcli.lib.observability.cw_logs.cw_log_event import CWLogEvent +from samcli.lib.observability.cw_logs.cw_log_puller import CWLogPuller +from samcli.lib.utils.time import to_timestamp, to_datetime + + +class TestCWLogPuller_load_time_period(TestCase): + def setUp(self): + self.log_group_name = "name" + self.stream_name = "stream name" + self.timestamp = to_timestamp(datetime.utcnow()) + + real_client = botocore.session.get_session().create_client("logs", region_name="us-east-1") + self.client_stubber = Stubber(real_client) + self.consumer = Mock() + self.fetcher = CWLogPuller(real_client, self.consumer, self.log_group_name) + + self.mock_api_response = { + "events": [ + { + "eventId": "id1", + "ingestionTime": 0, + "logStreamName": self.stream_name, + "message": "message 1", + "timestamp": self.timestamp, + }, + { + "eventId": "id2", + "ingestionTime": 0, + "logStreamName": self.stream_name, + "message": "message 2", + "timestamp": self.timestamp, + }, + ] + } + + self.expected_events = [ + CWLogEvent( + self.log_group_name, + { + "eventId": "id1", + "ingestionTime": 0, + "logStreamName": self.stream_name, + "message": "message 1", + "timestamp": self.timestamp, + }, + ), + CWLogEvent( + self.log_group_name, + { + "eventId": "id2", + "ingestionTime": 0, + "logStreamName": self.stream_name, + "message": "message 2", + "timestamp": self.timestamp, + }, + ), + ] + + def test_must_fetch_logs_for_log_group(self): + expected_params = {"logGroupName": self.log_group_name, "interleaved": True} + + # Configure the stubber to return the configured response. The stubber also verifies + # that input params were provided as expected + self.client_stubber.add_response("filter_log_events", self.mock_api_response, expected_params) + + with self.client_stubber: + self.fetcher.load_time_period() + + call_args = [args[0] for (args, _) in self.consumer.consume.call_args_list] + for event in self.expected_events: + self.assertIn(event, call_args) + + def test_must_fetch_logs_with_all_params(self): + pattern = "foobar" + start = datetime.utcnow() + end = datetime.utcnow() + + expected_params = { + "logGroupName": self.log_group_name, + "interleaved": True, + "startTime": to_timestamp(start), + "endTime": to_timestamp(end), + "filterPattern": pattern, + } + + self.client_stubber.add_response("filter_log_events", self.mock_api_response, expected_params) + + with self.client_stubber: + self.fetcher.load_time_period(start_time=start, end_time=end, filter_pattern=pattern) + + call_args = [args[0] for (args, _) in self.consumer.consume.call_args_list] + for event in self.expected_events: + self.assertIn(event, call_args) + + def test_must_paginate_using_next_token(self): + """Make three API calls, first two returns a nextToken and last does not.""" + token = "token" + expected_params = {"logGroupName": self.log_group_name, "interleaved": True} + expected_params_with_token = {"logGroupName": self.log_group_name, "interleaved": True, "nextToken": token} + + mock_response_with_token = copy.deepcopy(self.mock_api_response) + mock_response_with_token["nextToken"] = token + + # Call 1 returns a token. Also when first call is made, token is **not** passed as API params + self.client_stubber.add_response("filter_log_events", mock_response_with_token, expected_params) + + # Call 2 returns a token + self.client_stubber.add_response("filter_log_events", mock_response_with_token, expected_params_with_token) + + # Call 3 DOES NOT return a token. This will terminate the loop. + self.client_stubber.add_response("filter_log_events", self.mock_api_response, expected_params_with_token) + + # Same data was returned in each API call + expected_events_result = self.expected_events + self.expected_events + self.expected_events + + with self.client_stubber: + self.fetcher.load_time_period() + + call_args = [args[0] for (args, _) in self.consumer.consume.call_args_list] + for event in expected_events_result: + self.assertIn(event, call_args) + + +class TestCWLogPuller_tail(TestCase): + def setUp(self): + self.log_group_name = "name" + self.filter_pattern = "pattern" + self.start_time = to_datetime(10) + self.max_retries = 3 + self.poll_interval = 1 + + real_client = botocore.session.get_session().create_client("logs", region_name="us-east-1") + self.client_stubber = Stubber(real_client) + self.consumer = Mock() + self.fetcher = CWLogPuller( + real_client, + self.consumer, + self.log_group_name, + max_retries=self.max_retries, + poll_interval=self.poll_interval, + ) + + self.mock_api_empty_response = {"events": []} + self.mock_api_response_1 = { + "events": [ + { + "timestamp": 11, + }, + { + "timestamp": 12, + }, + ] + } + self.mock_api_response_2 = { + "events": [ + { + "timestamp": 13, + }, + { + "timestamp": 14, + }, + ] + } + + self.mock_events1 = [ + CWLogEvent(self.log_group_name, {"timestamp": 11}), + CWLogEvent(self.log_group_name, {"timestamp": 12}), + ] + self.mock_events2 = [ + CWLogEvent(self.log_group_name, {"timestamp": 13}), + CWLogEvent(self.log_group_name, {"timestamp": 14}), + ] + self.mock_events_empty = [] + + @patch("samcli.lib.observability.cw_logs.cw_log_puller.time") + def test_must_tail_logs_with_single_data_fetch(self, time_mock): + expected_params = { + "logGroupName": self.log_group_name, + "interleaved": True, + "startTime": 10, + "filterPattern": self.filter_pattern, + } + expected_params_second_try = { + "logGroupName": self.log_group_name, + "interleaved": True, + "startTime": 13, + "filterPattern": self.filter_pattern, + } + + # first successful return + self.client_stubber.add_response("filter_log_events", self.mock_api_response_1, expected_params) + # 3 empty returns as the number of max retries + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params_second_try) + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params_second_try) + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params_second_try) + + with patch.object( + self.fetcher, "load_time_period", wraps=self.fetcher.load_time_period + ) as patched_load_time_period: + with self.client_stubber: + self.fetcher.tail( + start_time=self.start_time, + filter_pattern=self.filter_pattern, + ) + + expected_load_time_period_calls = [ + # First fetch returns data + call(self.start_time, filter_pattern=self.filter_pattern), + # Three empty fetches + call(to_datetime(13), filter_pattern=self.filter_pattern), + call(to_datetime(13), filter_pattern=self.filter_pattern), + call(to_datetime(13), filter_pattern=self.filter_pattern), + ] + + # One per poll + expected_sleep_calls = [call(self.poll_interval) for _ in expected_load_time_period_calls] + + consumer_call_args = [args[0] for (args, _) in self.consumer.consume.call_args_list] + + self.assertEqual(self.mock_events1, consumer_call_args) + self.assertEqual(expected_sleep_calls, time_mock.sleep.call_args_list) + self.assertEqual(expected_load_time_period_calls, patched_load_time_period.call_args_list) + + @patch("samcli.lib.observability.cw_logs.cw_log_puller.time") + def test_must_tail_logs_with_multiple_data_fetches(self, time_mock): + expected_params = { + "logGroupName": self.log_group_name, + "interleaved": True, + "startTime": 10, + "filterPattern": self.filter_pattern, + } + expected_params_second_try = { + "logGroupName": self.log_group_name, + "interleaved": True, + "startTime": 13, + "filterPattern": self.filter_pattern, + } + expected_params_third_try = { + "logGroupName": self.log_group_name, + "interleaved": True, + "startTime": 15, + "filterPattern": self.filter_pattern, + } + + self.client_stubber.add_response("filter_log_events", self.mock_api_response_1, expected_params) + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params_second_try) + self.client_stubber.add_response("filter_log_events", self.mock_api_response_2, expected_params_second_try) + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params_third_try) + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params_third_try) + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params_third_try) + + expected_load_time_period_calls = [ + # First fetch returns data + call(self.start_time, filter_pattern=self.filter_pattern), + # This fetch was empty + call(to_datetime(13), filter_pattern=self.filter_pattern), + # This fetch returned data + call(to_datetime(13), filter_pattern=self.filter_pattern), + # Three empty fetches + call(to_datetime(15), filter_pattern=self.filter_pattern), + call(to_datetime(15), filter_pattern=self.filter_pattern), + call(to_datetime(15), filter_pattern=self.filter_pattern), + ] + + # One per poll + expected_sleep_calls = [call(self.poll_interval) for _ in expected_load_time_period_calls] + + with patch.object( + self.fetcher, "load_time_period", wraps=self.fetcher.load_time_period + ) as patched_load_time_period: + with self.client_stubber: + self.fetcher.tail(start_time=self.start_time, filter_pattern=self.filter_pattern) + + expected_consumer_call_args = [args[0] for (args, _) in self.consumer.consume.call_args_list] + + self.assertEqual(self.mock_events1 + self.mock_events2, expected_consumer_call_args) + self.assertEqual(expected_load_time_period_calls, patched_load_time_period.call_args_list) + self.assertEqual(expected_sleep_calls, time_mock.sleep.call_args_list) + + @patch("samcli.lib.observability.cw_logs.cw_log_puller.time") + def test_without_start_time(self, time_mock): + expected_params = { + "logGroupName": self.log_group_name, + "interleaved": True, + "startTime": 0, + "filterPattern": self.filter_pattern, + } + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params) + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params) + self.client_stubber.add_response("filter_log_events", self.mock_api_empty_response, expected_params) + + expected_load_time_period_calls = [ + # Three empty fetches, all with default start time + call(to_datetime(0), filter_pattern=ANY), + call(to_datetime(0), filter_pattern=ANY), + call(to_datetime(0), filter_pattern=ANY), + ] + + # One per poll + expected_sleep_calls = [call(self.poll_interval) for _ in expected_load_time_period_calls] + + with patch.object( + self.fetcher, "load_time_period", wraps=self.fetcher.load_time_period + ) as patched_load_time_period: + with self.client_stubber: + self.fetcher.tail( + filter_pattern=self.filter_pattern, + ) + + expected_consumer_call_args = [args[0] for (args, _) in self.consumer.consume.call_args_list] + + self.assertEqual([], expected_consumer_call_args) + self.assertEqual(expected_load_time_period_calls, patched_load_time_period.call_args_list) + self.assertEqual(expected_sleep_calls, time_mock.sleep.call_args_list) diff --git a/tests/unit/lib/observability/test_observability_info_puller.py b/tests/unit/lib/observability/test_observability_info_puller.py new file mode 100644 index 0000000000..3fbbb9fe34 --- /dev/null +++ b/tests/unit/lib/observability/test_observability_info_puller.py @@ -0,0 +1,50 @@ +from unittest import TestCase +from unittest.mock import Mock + +from parameterized import parameterized, param + +from samcli.lib.observability.observability_info_puller import ObservabilityEventConsumerDecorator + + +class TestObservabilityEventConsumerDecorator(TestCase): + def test_decorator(self): + actual_consumer = Mock() + event = Mock() + + consumer_decorator = ObservabilityEventConsumerDecorator([], actual_consumer) + consumer_decorator.consume(event) + + actual_consumer.consume.assert_called_with(event) + + def test_decorator_with_mapper(self): + actual_consumer = Mock() + event = Mock() + mapped_event = Mock() + mapper = Mock() + mapper.map.return_value = mapped_event + + consumer_decorator = ObservabilityEventConsumerDecorator([mapper], actual_consumer) + consumer_decorator.consume(event) + + mapper.map.assert_called_with(event) + actual_consumer.consume.assert_called_with(mapped_event) + + @parameterized.expand( + [ + param([Mock()]), + param([Mock(), Mock()]), + param([Mock(), Mock(), Mock()]), + ] + ) + def test_decorator_with_mappers(self, mappers): + actual_consumer = Mock() + event = Mock() + for mapper in mappers: + mapper.map.return_value = event + + consumer_decorator = ObservabilityEventConsumerDecorator(mappers, actual_consumer) + consumer_decorator.consume(event) + + actual_consumer.consume.assert_called_with(event) + for mapper in mappers: + mapper.map.assert_called_with(event) diff --git a/tests/unit/lib/package/test_artifact_exporter.py b/tests/unit/lib/package/test_artifact_exporter.py index f1a793ecf3..7cc20f6be7 100644 --- a/tests/unit/lib/package/test_artifact_exporter.py +++ b/tests/unit/lib/package/test_artifact_exporter.py @@ -24,7 +24,7 @@ ServerlessApplicationResource, ) from samcli.lib.package.packageable_resources import ( - is_s3_url, + is_s3_protocol_url, is_local_file, upload_local_artifacts, Resource, @@ -198,10 +198,10 @@ def test_is_s3_url(self): self._assert_is_invalid_s3_url(url) def _assert_is_valid_s3_url(self, url): - self.assertTrue(is_s3_url(url), "{0} should be valid".format(url)) + self.assertTrue(is_s3_protocol_url(url), "{0} should be valid".format(url)) def _assert_is_invalid_s3_url(self, url): - self.assertFalse(is_s3_url(url), "{0} should be valid".format(url)) + self.assertFalse(is_s3_protocol_url(url), "{0} should be valid".format(url)) def test_parse_s3_url(self): diff --git a/tests/unit/lib/package/test_utils.py b/tests/unit/lib/package/test_utils.py new file mode 100644 index 0000000000..2907d7c479 --- /dev/null +++ b/tests/unit/lib/package/test_utils.py @@ -0,0 +1,44 @@ +from unittest import TestCase + +from parameterized import parameterized + +from samcli.lib.package import utils + + +class TestPackageUtils(TestCase): + @parameterized.expand( + [ + # path like + "https://s3.us-west-2.amazonaws.com/bucket-name/some/path/object.html", + "http://s3.amazonaws.com/bucket-name/some/path/object.html", + "https://s3.dualstack.us-west-2.amazonaws.com/bucket-name/some/path/object.html", + # virual host + "http://bucket-name.s3.us-west-2.amazonaws.com/some/path/object.html", + "https://bucket-name.s3-us-west-2.amazonaws.com/some/path/object.html", + "https://bucket-name.s3.amazonaws.com/some/path/object.html", + # access point + "https://access-name-123456.s3-accesspoint.us-west-2.amazonaws.com/some/path/object.html", + "http://access-name-899889.s3-accesspoint.us-east-1.amazonaws.com/some/path/object.html", + # s3:// + "s3://bucket-name/path/to/object", + ] + ) + def test_is_s3_url(self, url): + self.assertTrue(utils.is_s3_url(url)) + + @parameterized.expand( + [ + # path like + "https://s3.$region.amazonaws.com/bucket-name/some/path/object.html", # invalid region + "https://s3.amazonaws.com/object.html", # no bucket + # virual host + "https://bucket-name.s3-us-west-2.amazonaws.com/", # no object + # access point + "https://access-name.s3-accesspoint.us-west-2.amazonaws.com/some/path/object.html", # no account id + # s3:// + "s3://bucket-name", # no object + "s3:://bucket-name", # typo + ] + ) + def test_is_not_s3_url(self, url): + self.assertFalse(utils.is_s3_url(url)) diff --git a/tests/unit/lib/utils/test_git_repo.py b/tests/unit/lib/utils/test_git_repo.py new file mode 100644 index 0000000000..645dc5c2de --- /dev/null +++ b/tests/unit/lib/utils/test_git_repo.py @@ -0,0 +1,188 @@ +import subprocess +from pathlib import Path +from unittest import TestCase +from unittest.mock import patch, MagicMock, ANY, call +import os +from samcli.lib.utils.git_repo import GitRepo, rmtree_callback, CloneRepoException, CloneRepoUnstableStateException + +REPO_URL = "REPO URL" +REPO_NAME = "REPO NAME" +CLONE_DIR = os.path.normpath("/tmp/local/clone/dir") +EXPECTED_DEFAULT_CLONE_PATH = os.path.normpath(os.path.join(CLONE_DIR, REPO_NAME)) + + +class TestGitRepo(TestCase): + def setUp(self): + self.repo = GitRepo(url=REPO_URL) + self.local_clone_dir = MagicMock() + self.local_clone_dir.joinpath.side_effect = lambda sub_dir: os.path.normpath(os.path.join(CLONE_DIR, sub_dir)) + + def test_ensure_clone_directory_exists(self): + self.repo._ensure_clone_directory_exists(self.local_clone_dir) # No exception is thrown + self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True) + + def test_ensure_clone_directory_exists_fail(self): + self.local_clone_dir.mkdir.side_effect = OSError + with self.assertRaises(OSError): + self.repo._ensure_clone_directory_exists(self.local_clone_dir) + + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_git_executable_not_windows(self, mock_platform, mock_popen): + mock_platform.return_value = "Not Windows" + executable = self.repo._git_executable() + self.assertEqual(executable, "git") + + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_git_executable_windows(self, mock_platform, mock_popen): + mock_platform.return_value = "Windows" + executable = self.repo._git_executable() + self.assertEqual(executable, "git") + + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + def test_git_executable_fails(self, mock_popen): + mock_popen.side_effect = OSError("fail") + with self.assertRaises(OSError): + self.repo._git_executable() + + @patch("samcli.lib.utils.git_repo.Path.exists") + @patch("samcli.lib.utils.git_repo.shutil") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_happy_case(self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock): + path_exist_mock.return_value = False + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME) + self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True) + popen_mock.assert_called_once_with(["git"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + check_output_mock.assert_has_calls( + [call(["git", "clone", self.repo.url, REPO_NAME], cwd=ANY, stderr=subprocess.STDOUT)] + ) + shutil_mock.rmtree.assert_not_called() + shutil_mock.copytree.assert_called_with(ANY, EXPECTED_DEFAULT_CLONE_PATH, ignore=ANY) + shutil_mock.ignore_patterns.assert_called_with("*.git") + + @patch("samcli.lib.utils.git_repo.Path.exists") + @patch("samcli.lib.utils.git_repo.shutil") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_create_new_local_repo( + self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock + ): + path_exist_mock.return_value = False + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME) + shutil_mock.rmtree.assert_not_called() + shutil_mock.copytree.assert_called_with(ANY, EXPECTED_DEFAULT_CLONE_PATH, ignore=ANY) + shutil_mock.ignore_patterns.assert_called_with("*.git") + + @patch("samcli.lib.utils.git_repo.Path.exists") + @patch("samcli.lib.utils.git_repo.shutil") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_replace_current_local_repo_if_replace_existing_flag_is_set( + self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock + ): + path_exist_mock.return_value = True + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME, replace_existing=True) + self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True) + shutil_mock.rmtree.assert_called_with(EXPECTED_DEFAULT_CLONE_PATH, onerror=rmtree_callback) + shutil_mock.copytree.assert_called_with(ANY, EXPECTED_DEFAULT_CLONE_PATH, ignore=ANY) + shutil_mock.ignore_patterns.assert_called_with("*.git") + + @patch("samcli.lib.utils.git_repo.Path.exists") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_fail_if_current_local_repo_exists_and_replace_existing_flag_is_not_set( + self, platform_mock, popen_mock, check_output_mock, path_exist_mock + ): + path_exist_mock.return_value = True + with self.assertRaises(CloneRepoException): + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME) # replace_existing=False by default + + @patch("samcli.lib.utils.git_repo.shutil") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_attempt_is_set_to_true_after_clone(self, platform_mock, popen_mock, check_output_mock, shutil_mock): + self.assertFalse(self.repo.clone_attempted) + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME) + self.assertTrue(self.repo.clone_attempted) + + @patch("samcli.lib.utils.git_repo.shutil") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_attempt_is_set_to_true_even_if_clone_failed( + self, platform_mock, popen_mock, check_output_mock, shutil_mock + ): + check_output_mock.side_effect = subprocess.CalledProcessError("fail", "fail", "not found".encode("utf-8")) + self.assertFalse(self.repo.clone_attempted) + try: + with self.assertRaises(CloneRepoException): + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME) + except: + pass + self.assertTrue(self.repo.clone_attempted) + + @patch("samcli.lib.utils.git_repo.shutil") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_failed_to_create_the_clone_directory( + self, platform_mock, popen_mock, check_output_mock, shutil_mock + ): + self.local_clone_dir.mkdir.side_effect = OSError + try: + with self.assertRaises(OSError): + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME) + except: + pass + self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True) + popen_mock.assert_not_called() + check_output_mock.assert_not_called() + shutil_mock.assert_not_called() + + @patch("samcli.lib.utils.git_repo.shutil") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_when_the_subprocess_fail(self, platform_mock, popen_mock, check_output_mock, shutil_mock): + check_output_mock.side_effect = subprocess.CalledProcessError("fail", "fail", "any reason".encode("utf-8")) + with self.assertRaises(CloneRepoException): + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME) + + @patch("samcli.lib.utils.git_repo.LOG") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_when_the_git_repo_not_found(self, platform_mock, popen_mock, check_output_mock, log_mock): + check_output_mock.side_effect = subprocess.CalledProcessError("fail", "fail", "not found".encode("utf-8")) + try: + with self.assertRaises(CloneRepoException): + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME) + except Exception: + pass + log_mock.warning.assert_called() + + @patch("samcli.lib.utils.git_repo.Path.exists") + @patch("samcli.lib.utils.git_repo.shutil") + @patch("samcli.lib.utils.git_repo.subprocess.check_output") + @patch("samcli.lib.utils.git_repo.subprocess.Popen") + @patch("samcli.lib.utils.git_repo.platform.system") + def test_clone_when_failed_to_move_cloned_repo_from_temp_to_final_destination( + self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock + ): + path_exist_mock.return_value = True + shutil_mock.copytree.side_effect = OSError + try: + with self.assertRaises(CloneRepoUnstableStateException): + self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME, replace_existing=True) + except Exception: + pass + shutil_mock.rmtree.assert_called_once_with(EXPECTED_DEFAULT_CLONE_PATH, onerror=rmtree_callback) + shutil_mock.copytree.assert_called_once_with(ANY, EXPECTED_DEFAULT_CLONE_PATH, ignore=ANY) + shutil_mock.ignore_patterns.assert_called_once_with("*.git") diff --git a/tests/unit/lib/utils/test_hash.py b/tests/unit/lib/utils/test_hash.py index 1f16bb393e..388b3c96da 100644 --- a/tests/unit/lib/utils/test_hash.py +++ b/tests/unit/lib/utils/test_hash.py @@ -40,11 +40,11 @@ def test_dir_hash_independent_of_file_order(self): mockwalk.return_value = [ ( self.temp_dir, - (), - ( + [], + [ file1.name, file2.name, - ), + ], ), ] dir_checksums["first"] = dir_checksum(self.temp_dir) @@ -53,11 +53,11 @@ def test_dir_hash_independent_of_file_order(self): mockwalk.return_value = [ ( self.temp_dir, - (), - ( + [], + [ file2.name, file1.name, - ), + ], ), ] dir_checksums["second"] = dir_checksum(self.temp_dir) @@ -73,6 +73,27 @@ def test_dir_hash_same_contents_diff_file_per_directory(self): checksum_after = dir_checksum(os.path.dirname(_file.name)) self.assertNotEqual(checksum_before, checksum_after) + def test_dir_hash_with_ignore_list(self): + _file = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir) + _file.write(b"Testfile") + _file.close() + + dir_path = os.path.dirname(_file.name) + checksum_before = dir_checksum(dir_path) + + # add a file to .aws-sam/ + aws_sam_dir_path = os.path.join(dir_path, ".aws-sam") + os.mkdir(aws_sam_dir_path) + _new_file = tempfile.NamedTemporaryFile(delete=False, dir=aws_sam_dir_path) + _new_file.write(b"dummy") + _new_file.close() + + checksum_after = dir_checksum(os.path.dirname(_file.name)) + self.assertNotEqual(checksum_before, checksum_after) + + checksum_after_with_ignore_list = dir_checksum(os.path.dirname(_file.name), ignore_list=[".aws-sam"]) + self.assertEqual(checksum_before, checksum_after_with_ignore_list) + def test_dir_cyclic_links(self): _file = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir) _file.write(b"Testfile") diff --git a/tests/unit/local/lambdafn/test_runtime.py b/tests/unit/local/lambdafn/test_runtime.py index ed848478de..38ba59110f 100644 --- a/tests/unit/local/lambdafn/test_runtime.py +++ b/tests/unit/local/lambdafn/test_runtime.py @@ -514,6 +514,28 @@ def test_must_return_a_valid_file(self, unzip_file_mock, shutil_mock, os_mock): shutil_mock.rmtree.assert_not_called() +class TestLambdaRuntime_unarchived_layer(TestCase): + def setUp(self): + self.manager_mock = Mock() + self.layer_downloader = Mock() + self.runtime = LambdaRuntime(self.manager_mock, self.layer_downloader) + + @parameterized.expand([(LayerVersion("arn", "file.zip"),)]) + @patch("samcli.local.lambdafn.runtime.LambdaRuntime._get_code_dir") + def test_unarchived_layer(self, layer, get_code_dir_mock): + new_url = get_code_dir_mock.return_value = Mock() + result = self.runtime._unarchived_layer(layer) + self.assertNotEqual(layer, result) + self.assertEqual(new_url, result.codeuri) + + @parameterized.expand([("arn",), (LayerVersion("arn", "folder"),), ({"Name": "hi", "Version": "x.y.z"},)]) + @patch("samcli.local.lambdafn.runtime.LambdaRuntime._get_code_dir") + def test_unarchived_layer_not_local_archive_file(self, layer, get_code_dir_mock): + get_code_dir_mock.side_effect = lambda x: x # directly return the input + result = self.runtime._unarchived_layer(layer) + self.assertEqual(layer, result) + + class TestWarmLambdaRuntime_invoke(TestCase): DEFAULT_MEMORY = 128