Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

18 flask 1x upgrade #455

Merged
merged 7 commits into from
Sep 10, 2018
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,17 @@

Records breaking changes from major version bumps


## 44.0.0

PR [#455](https://github.com/alphagov/digitalmarketplace-utils/pull/455)

Upgrade flask to from 0.12.4 to 1.0.2. This has breaking changes for flask apps and therefore has breaking changes for users relying on init_app.

Apps should upgrade to `Flask==1.0.2` using the changelog here http://flask.pocoo.org/docs/1.0/changelog/#version-1-0-2 taking care to note
the breaking changes in [v1.0](http://flask.pocoo.org/docs/1.0/changelog/#version-1-0)


## 43.0.0

PR [#447](https://github.com/alphagov/digitalmarketplace-utils/pull/447)
Expand Down
2 changes: 1 addition & 1 deletion dmutils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
from .flask_init import init_app, init_manager


__version__ = '43.4.0'
__version__ = '44.0.0'
7 changes: 3 additions & 4 deletions dmutils/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,9 @@ def after_request(response):
if app.config['DM_APP_NAME'] != 'search-api':
loggers.append(logging.getLogger('urllib3.util.retry'))

for logger in loggers:
logger.addHandler(handler)
logger.setLevel(loglevel)

for logger_ in loggers:
logger_.addHandler(handler)
logger_.setLevel(loglevel)
app.logger.info('Logging configured')


Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
install_requires=[
'Flask-Script==2.0.6',
'Flask-WTF==0.14.2',
'Flask==0.12.4',
'Flask==1.0.2',
'Flask-Login>=0.2.11',
'boto3==1.7.83',
'botocore<1.11.0',
Expand Down
50 changes: 39 additions & 11 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,30 @@
import tempfile

import mock
import os
import pytest

from flask import Flask
import mock
from io import StringIO
from logging import Logger, StreamHandler

from boto.ec2.cloudwatch import CloudWatchConnection

from dmutils.logging import init_app


@pytest.fixture(scope='session', autouse=True)
def log_to_null():
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should consider moving some of these to -test-utils in course.

"""Replace stdout logging with logging to dev/null in tests only.

A handler is set to stdout by default for dev envs. Turn this off for tests to make the output less noisy. Other
handlers are unaffected.

See digitalmarketplace-utils/dmutils/logging.py::get_handler
"""
with open(os.devnull, 'w') as null:
with mock.patch('dmutils.logging.sys.stdout', null):
yield


@pytest.fixture
def app(request):
app = Flask(__name__)
Expand All @@ -17,15 +34,26 @@ def app(request):
return app


# it's deceptively difficult to capture & inspect *actual* log output in pytest (no, capfd doesn't seem to work)
@pytest.fixture
def app_logtofile(request):
with tempfile.NamedTemporaryFile() as f:
app = Flask(__name__)
app.root_path = request.fspath.dirname
app.config['DM_LOG_PATH'] = f.name
init_app(app)
yield app
def app_with_stream_logger(request):
"""Force StreamHandler to use our StringIO object.

In the dmutils.logging.get_handler method we default to StreamHandler(sys.stdout), if we override sys.stdout to our
stream it is possible to check the contents of the stream for the correct log entries.
"""
stream = StringIO()
with mock.patch('dmutils.logging.logging.StreamHandler', return_value=StreamHandler(stream)):
# Use the above app fixture method to return the app and return our stream
yield app(request), stream


@pytest.fixture
def app_with_mocked_logger(request):
"""Patch `create_logger` to return a mock logger that is made accessible on `app.logger`
"""
with mock.patch('flask.app.create_logger', return_value=mock.Mock(spec=Logger('flask.app'), handlers=[])):
# Use the above app fixture method to return the app
yield app(request)


@pytest.yield_fixture
Expand Down
187 changes: 100 additions & 87 deletions tests/test_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def test_init_app_only_adds_handlers_to_defined_loggers(app):
and isinstance(v.handlers[0], logging.StreamHandler)
}

assert loggers == {'conftest', 'dmutils', 'dmapiclient', 'flask_wtf.csrf', 'urllib3.util.retry'}
assert loggers == {'flask.app', 'dmutils', 'dmapiclient', 'flask_wtf.csrf', 'urllib3.util.retry'}


def test_urllib3_retry_logger_not_configured_for_search_api(app):
Expand All @@ -119,87 +119,105 @@ class _Request(app.request_class):
app.request_class = _Request


@pytest.mark.parametrize("is_sampled", (None, False, True,))
def test_app_request_logs_responses_with_info_level(app, is_sampled):
@pytest.mark.parametrize("is_sampled", (None, False))
def test_app_request_logs_responses_with_info_level_not_sampled(app_with_mocked_logger, is_sampled):
if is_sampled is not None:
_set_request_class_is_sampled(app, is_sampled)

# since app.logger is a read-only property we need to patch the Flask class
with mock.patch('flask.Flask.logger') as logger:
app.test_client().get('/')

assert logger.log.call_args_list == ([
mock.call(
logging.DEBUG,
'Received request {method} {url}',
extra={
"url": "http://localhost/",
"method": "GET",
"endpoint": None,
"process_": RestrictedAny(lambda value: isinstance(value, int)),
"thread_": RestrictedAny(lambda value: isinstance(value, (str, bytes,))),
},
),
] if is_sampled else []) + [
mock.call(
logging.INFO,
'{method} {url} {status}',
extra={
"url": "http://localhost/",
"status": 404,
"method": "GET",
"endpoint": None,
"duration_real": RestrictedAny(lambda value: isinstance(value, float) and 0 < value),
"duration_process": RestrictedAny(lambda value: isinstance(value, float) and 0 < value),
"process_": RestrictedAny(lambda value: isinstance(value, int)),
"thread_": RestrictedAny(lambda value: isinstance(value, (str, bytes,))),
},
)
]


@pytest.mark.parametrize("is_sampled", (None, False, True,))
def test_app_request_logs_5xx_responses_with_error_level(app, is_sampled):
_set_request_class_is_sampled(app_with_mocked_logger, is_sampled)

app_with_mocked_logger.test_client().get('/')

expected_call = mock.call(
logging.INFO,
'{method} {url} {status}',
extra={
"url": "http://localhost/",
"status": 404,
"method": "GET",
"endpoint": None,
"duration_real": RestrictedAny(lambda value: isinstance(value, float) and 0 < value),
"duration_process": RestrictedAny(lambda value: isinstance(value, float) and 0 < value),
"process_": RestrictedAny(lambda value: isinstance(value, int)),
"thread_": RestrictedAny(lambda value: isinstance(value, (str, bytes,))),
},
)

assert app_with_mocked_logger.logger.log.call_args_list == [expected_call]


def test_app_request_logs_responses_with_info_level_sampled(app_with_mocked_logger):
_set_request_class_is_sampled(app_with_mocked_logger, True)

app_with_mocked_logger.test_client().get('/')

expected_call = mock.call(
logging.DEBUG,
'Received request {method} {url}',
extra={
"url": "http://localhost/",
"method": "GET",
"endpoint": None,
"process_": RestrictedAny(lambda value: isinstance(value, int)),
"thread_": RestrictedAny(lambda value: isinstance(value, (str, bytes,))),
},
)

assert expected_call in app_with_mocked_logger.logger.log.call_args_list


@pytest.mark.parametrize("is_sampled", (None, False,))
def test_app_request_logs_5xx_responses_with_error_level_not_sampled(app_with_mocked_logger, is_sampled):
if is_sampled is not None:
_set_request_class_is_sampled(app, is_sampled)
_set_request_class_is_sampled(app_with_mocked_logger, is_sampled)

@app.route('/')
@app_with_mocked_logger.route('/')
def error_route():
time.sleep(0.05)
return 'error', 500

# since app.logger is a read-only property we need to patch the Flask class
with mock.patch('flask.Flask.logger') as logger:
app.test_client().get('/')

assert logger.log.call_args_list == ([
mock.call(
logging.DEBUG,
'Received request {method} {url}',
extra={
"url": "http://localhost/",
"method": "GET",
"endpoint": "error_route",
"process_": RestrictedAny(lambda value: isinstance(value, int)),
"thread_": RestrictedAny(lambda value: isinstance(value, (str, bytes,))),
},
),
] if is_sampled else []) + [
mock.call(
logging.ERROR,
'{method} {url} {status}',
extra={
"url": "http://localhost/",
"status": 500,
"method": "GET",
"endpoint": "error_route",
"duration_real": RestrictedAny(lambda value: isinstance(value, float) and 0.05 <= value),
"duration_process": RestrictedAny(lambda value: isinstance(value, float) and 0 < value),
"process_": RestrictedAny(lambda value: isinstance(value, int)),
"thread_": RestrictedAny(lambda value: isinstance(value, (str, bytes,))),
},
),
]
app_with_mocked_logger.test_client().get('/')

assert app_with_mocked_logger.logger.log.call_args_list == [
mock.call(
logging.ERROR,
'{method} {url} {status}',
extra={
"url": "http://localhost/",
"status": 500,
"method": "GET",
"endpoint": "error_route",
"duration_real": RestrictedAny(lambda value: isinstance(value, float) and 0.05 <= value),
"duration_process": RestrictedAny(lambda value: isinstance(value, float) and 0 < value),
"process_": RestrictedAny(lambda value: isinstance(value, int)),
"thread_": RestrictedAny(lambda value: isinstance(value, (str, bytes,))),
},
)
]


def test_app_request_logs_5xx_responses_with_error_level_sampled(app_with_mocked_logger):

_set_request_class_is_sampled(app_with_mocked_logger, True)

@app_with_mocked_logger.route('/')
def error_route():
time.sleep(0.05)
return 'error', 500

app_with_mocked_logger.test_client().get('/')

expected_call = mock.call(
logging.DEBUG,
'Received request {method} {url}',
extra={
"url": "http://localhost/",
"method": "GET",
"endpoint": "error_route",
"process_": RestrictedAny(lambda value: isinstance(value, int)),
"thread_": RestrictedAny(lambda value: isinstance(value, (str, bytes,))),
},
)

assert expected_call in app_with_mocked_logger.logger.log.call_args_list


class TestJSONFormatter(object):
Expand Down Expand Up @@ -287,20 +305,18 @@ def test_failed_log_message_formatting_logs_an_error(self):


@pytest.mark.parametrize("is_sampled", (False, True,))
def test_log_context_handling_in_initialized_app_high_level(app_logtofile, is_sampled):
def test_log_context_handling_in_initialized_app_high_level(app_with_stream_logger, is_sampled):
app, stream = app_with_stream_logger

def _logging_call_site(app):
app.logger.info(
"Charming day {ankles}, {underleaves}, {parent_span_id}",
extra={"underleaves": "ample"},
)

with open(app_logtofile.config["DM_LOG_PATH"], "r") as log_file:
# consume log initialization line
log_file.read()

_set_request_class_is_sampled(app_logtofile, is_sampled)
_set_request_class_is_sampled(app_with_stream_logger, is_sampled)

with app_logtofile.test_request_context('/'):
with app.test_request_context('/'):
test_extra_log_context = {
"ankles": "thinsocked",
"span_id": "beesWaxed",
Expand All @@ -311,12 +327,9 @@ def _logging_call_site(app):

# we perform the log call in a specifically designated & named function to exercise & be able to reliably
# assert the behaviour of the introspective aspects of our logging
_logging_call_site(app_logtofile)

# ensure buffers are flushed
logging.shutdown()
_logging_call_site(app)

all_lines = tuple(json.loads(line) for line in log_file.read().splitlines())
all_lines = tuple(json.loads(line) for line in stream.read().splitlines())

assert all_lines == (
AnySupersetOf({
Expand Down
Loading