Skip to content

Commit

Permalink
http2: no stream destroy while its data is on the wire
Browse files Browse the repository at this point in the history
This fixes a crash that occurred when a `Http2Stream` write
is completed after it is already destroyed.

Instead, don’t destroy the stream in that case and wait for
GC to take over.

Backport-PR-URL: #19185
PR-URL: #19002
Fixes: #18973
Reviewed-By: James M Snell <[email protected]>
Reviewed-By: Anatoli Papirovski <[email protected]>
  • Loading branch information
addaleax authored and MylesBorins committed Mar 7, 2018
1 parent 420d56c commit 50d1233
Show file tree
Hide file tree
Showing 3 changed files with 80 additions and 5 deletions.
20 changes: 15 additions & 5 deletions src/node_http2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1700,6 +1700,14 @@ void Http2Session::OnStreamRead(ssize_t nread, const uv_buf_t& buf) {
stream_buf_ = uv_buf_init(nullptr, 0);
}

bool Http2Session::HasWritesOnSocketForStream(Http2Stream* stream) {
for (const nghttp2_stream_write& wr : outgoing_buffers_) {
if (wr.req_wrap != nullptr && wr.req_wrap->stream() == stream)
return true;
}
return false;
}

// Every Http2Session session is tightly bound to a single i/o StreamBase
// (typically a net.Socket or tls.TLSSocket). The lifecycle of the two is
// tightly coupled with all data transfer between the two happening at the
Expand Down Expand Up @@ -1753,13 +1761,11 @@ Http2Stream::Http2Stream(


Http2Stream::~Http2Stream() {
DEBUG_HTTP2STREAM(this, "tearing down stream");
if (session_ != nullptr) {
session_->RemoveStream(this);
session_ = nullptr;
}

if (!object().IsEmpty())
ClearWrap(object());
}

// Notify the Http2Stream that a new block of HEADERS is being processed.
Expand Down Expand Up @@ -1837,15 +1843,19 @@ inline void Http2Stream::Destroy() {
Http2Stream* stream = static_cast<Http2Stream*>(data);
// Free any remaining outgoing data chunks here. This should be done
// here because it's possible for destroy to have been called while
// we still have qeueued outbound writes.
// we still have queued outbound writes.
while (!stream->queue_.empty()) {
nghttp2_stream_write& head = stream->queue_.front();
if (head.req_wrap != nullptr)
head.req_wrap->Done(UV_ECANCELED);
stream->queue_.pop();
}

delete stream;
// We can destroy the stream now if there are no writes for it
// already on the socket. Otherwise, we'll wait for the garbage collector
// to take care of cleaning up.
if (!stream->session()->HasWritesOnSocketForStream(stream))
delete stream;
}, this, this->object());

statistics_.end_time = uv_hrtime();
Expand Down
3 changes: 3 additions & 0 deletions src/node_http2.h
Original file line number Diff line number Diff line change
Expand Up @@ -878,6 +878,9 @@ class Http2Session : public AsyncWrap, public StreamListener {
// Removes a stream instance from this session
inline void RemoveStream(Http2Stream* stream);

// Indicates whether there currently exist outgoing buffers for this stream.
bool HasWritesOnSocketForStream(Http2Stream* stream);

// Write data to the session
inline ssize_t Write(const uv_buf_t* bufs, size_t nbufs);

Expand Down
62 changes: 62 additions & 0 deletions test/parallel/test-http2-write-finishes-after-stream-destroy.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
// Flags: --expose-gc
'use strict';
const common = require('../common');
if (!common.hasCrypto)
common.skip('missing crypto');
const assert = require('assert');
const http2 = require('http2');
const makeDuplexPair = require('../common/duplexpair');

// Make sure the Http2Stream destructor works, since we don't clean the
// stream up like we would otherwise do.
process.on('exit', global.gc);

{
const { clientSide, serverSide } = makeDuplexPair();

let serverSideHttp2Stream;
let serverSideHttp2StreamDestroyed = false;
const server = http2.createServer();
server.on('stream', common.mustCall((stream, headers) => {
serverSideHttp2Stream = stream;
stream.respond({
'content-type': 'text/html',
':status': 200
});

const originalWrite = serverSide._write;
serverSide._write = (buf, enc, cb) => {
if (serverSideHttp2StreamDestroyed) {
serverSide.destroy();
serverSide.write = () => {};
} else {
setImmediate(() => {
originalWrite.call(serverSide, buf, enc, () => setImmediate(cb));
});
}
};

// Enough data to fit into a single *session* window,
// not enough data to fit into a single *stream* window.
stream.write(Buffer.alloc(40000));
}));

server.emit('connection', serverSide);

const client = http2.connect('http://localhost:80', {
createConnection: common.mustCall(() => clientSide)
});

const req = client.request({ ':path': '/' });

req.on('response', common.mustCall((headers) => {
assert.strictEqual(headers[':status'], 200);
}));

req.on('data', common.mustCallAtLeast(() => {
if (!serverSideHttp2StreamDestroyed) {
serverSideHttp2Stream.destroy();
serverSideHttp2StreamDestroyed = true;
}
}));
}

0 comments on commit 50d1233

Please sign in to comment.