Skip to content

Commit

Permalink
fix(provider): ensure postcard buffers are appropriately sized
Browse files Browse the repository at this point in the history
  • Loading branch information
dignifiedquire committed Mar 16, 2023
1 parent 564152e commit d0af2c6
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 14 deletions.
4 changes: 2 additions & 2 deletions src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@ pub(crate) struct Request {
pub name: Hash,
}

#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone)]
#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone, MaxSize)]
pub(crate) struct Response {
pub data: Res,
}

#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone)]
#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone, MaxSize)]
pub(crate) enum Res {
NotFound,
// If found, a stream of bao data is sent as next message.
Expand Down
17 changes: 5 additions & 12 deletions src/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ use anyhow::{ensure, Context, Result};
use bytes::{Bytes, BytesMut};
use futures::future;
use futures::Stream;
use postcard::experimental::max_size::MaxSize;
use quic_rpc::server::RpcChannel;
use quic_rpc::transport::flume::FlumeConnection;
use quic_rpc::transport::misc::DummyServerEndpoint;
Expand Down Expand Up @@ -922,7 +923,6 @@ async fn create_collection_inner(
let mut db = HashMap::with_capacity(data_sources.len() + 1);
let mut blobs = Vec::with_capacity(data_sources.len());
let mut total_blobs_size: u64 = 0;
let mut blobs_encoded_size_estimate = 0;

// compute outboards in parallel, using tokio's blocking thread pool
let outboards = data_sources.into_iter().map(|data| {
Expand Down Expand Up @@ -974,7 +974,6 @@ async fn create_collection_inner(
.unwrap_or_default()
.to_string()
});
blobs_encoded_size_estimate += name.len() + 32;
blobs.push(Blob { name, hash });
}

Expand All @@ -983,14 +982,8 @@ async fn create_collection_inner(
blobs,
total_blobs_size,
};
blobs_encoded_size_estimate += c.name.len();

// NOTE: we can't use the postcard::MaxSize to estimate the encoding buffer size
// because the Collection and Blobs have `String` fields.
// So instead, we are tracking the filename + hash sizes of each blob, plus an extra 1024
// to account for any postcard encoding data.
let mut buffer = BytesMut::zeroed(blobs_encoded_size_estimate + 1024);
let data = postcard::to_slice(&c, &mut buffer)?;

let data = postcard::to_stdvec(&c).context("blob encoding")?;
let (outboard, hash) = abao::encode::outboard(&data);
let hash = Hash::from(hash);
db.insert(
Expand All @@ -1009,8 +1002,8 @@ async fn write_response<W: AsyncWrite + Unpin>(
let response = Response { data: res };

// TODO: do not transfer blob data as part of the responses
if buffer.len() < 1024 {
buffer.resize(1024, 0u8);
if buffer.len() < Response::POSTCARD_MAX_SIZE {
buffer.resize(Response::POSTCARD_MAX_SIZE, 0u8);
}
let used = postcard::to_slice(&response, buffer)?;

Expand Down

0 comments on commit d0af2c6

Please sign in to comment.