From 57890abf2c7919eddfec36178b1136cd44ffe883 Mon Sep 17 00:00:00 2001 From: Troy Giorshev Date: Thu, 4 Jun 2020 11:49:09 -0400 Subject: [PATCH] Remove two unneeded tests Test 1 is a duplicate of test_size() later in the file. Inexplicably, this test does not work on macOS, whereas test_size() does. Test 2 is problematic for two reasons. First, it always fails with an invalid checksum, which is probably not what was intended. Second, it's not defined at this layer what the behavior should be. Hypothetically, if this test was fixed so that it gave messages with valid checksums, then the message would pass successfully thought the network layer and fail only in the processing layer. A priori the network layer has no idea what the size of a message "actually" is. The "Why does behavior change at 78 bytes" is because of the following: print(len(node.p2p.build_message(msg))) # 125 => Payload size = 125 - 24 = 101 If we take 77 bytes, then there are 101 - 77 = 24 left That's exactly the size of a header So, bitcoind deserializes the header and rejects it for some other reason (Almost always an invalid size (too large)) But, if we take 78 bytes, then there are 101 - 78 = 23 left That's not enough to fill a header, so the socket stays open waiting for more data. That's why we sometimes have to push additional data in order for the peer to disconnect. Additionally, both of these tests use the "conn" variable. For fun, go look at where it's declared. (Hint: test_large_inv(). Don't we all love python's idea of scope?) --- test/functional/p2p_invalid_messages.py | 86 ------------------------- 1 file changed, 86 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 81302374c9d97..244a5a136dde7 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -5,7 +5,6 @@ """Test node responses to invalid network messages.""" import asyncio import struct -import sys from test_framework.messages import ( CBlockHeader, @@ -50,11 +49,6 @@ def run_test(self): . Test msg header 0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see that it isn't an effective DoS against the node. - - 1. Send an oversized (4MB+) message and check that we're disconnected. - - 2. Send a few messages with an incorrect data size in the header, ensure the - messages are ignored. """ self.test_magic_bytes() self.test_checksum() @@ -95,66 +89,6 @@ def run_test(self): node.p2p.sync_with_ping(timeout=400) assert node.p2p.is_connected - # - # 1. - # - # Send an oversized message, ensure we're disconnected. - # - # Under macOS this test is skipped due to an unexpected error code - # returned from the closing socket which python/asyncio does not - # yet know how to handle. - # - if sys.platform != 'darwin': - msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1)) - assert len(msg_over_size.serialize()) == (msg_limit + 1) - - # An unknown message type (or *any* message type) over - # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect. - node.p2p.send_message(msg_over_size) - node.p2p.wait_for_disconnect(timeout=4) - - node.disconnect_p2ps() - conn = node.add_p2p_connection(P2PDataStore()) - conn.wait_for_verack() - else: - self.log.info("Skipping test p2p_invalid_messages/1 (oversized message) under macOS") - - # - # 2. - # - # Send messages with an incorrect data size in the header. - # - actual_size = 100 - msg = msg_unrecognized(str_data="b" * actual_size) - - # TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect. - for wrong_size in (2, 77, 78, 79): - self.log.info("Sending a message with incorrect size of {}".format(wrong_size)) - - # Unmodified message should submit okay. - node.p2p.send_and_ping(msg) - - # A message lying about its data size results in a disconnect when the incorrect - # data size is less than the actual size. - # - # TODO: why does behavior change at 78 bytes? - # - node.p2p.send_raw_message(self._tweak_msg_data_size(msg, wrong_size)) - - # For some reason unknown to me, we sometimes have to push additional data to the - # peer in order for it to realize a disconnect. - try: - node.p2p.send_message(msg_ping(nonce=123123)) - except IOError: - pass - - node.p2p.wait_for_disconnect(timeout=10) - node.disconnect_p2ps() - node.add_p2p_connection(P2PDataStore()) - - # Node is still up. - conn = node.add_p2p_connection(P2PDataStore()) - def test_magic_bytes(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) @@ -225,26 +159,6 @@ def test_large_inv(self): conn.send_and_ping(msg) self.nodes[0].disconnect_p2ps() - def _tweak_msg_data_size(self, message, wrong_size): - """ - Return a raw message based on another message but with an incorrect data size in - the message header. - """ - raw_msg = self.node.p2p.build_message(message) - - bad_size_bytes = struct.pack("