Skip to content

Commit

Permalink
Remove support for Poplar1. (#3482)
Browse files Browse the repository at this point in the history
DAP-11 removed multi-collection and, with it, the ability to usefully
use Poplar1. This commit makes the following changes:

* Application-level support for Poplar1 is removed.
* Unit tests which used Poplar1 now use libprio-rs' dummy::Vdaf.
* Introduce a test-only ability to configure the fake dummy::Vdaf for
  taskprov, required to migrate some taskprov unit tests.
  • Loading branch information
branlwyd authored Nov 14, 2024
1 parent 111ee9c commit 6f18526
Show file tree
Hide file tree
Showing 14 changed files with 609 additions and 977 deletions.
23 changes: 0 additions & 23 deletions aggregator/src/aggregator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,7 @@ use prio::{
topology::ping_pong::{PingPongState, PingPongTopology},
vdaf::{
self,
poplar1::Poplar1,
prio3::{Prio3, Prio3Count, Prio3Histogram, Prio3Sum, Prio3SumVec},
xof::XofTurboShake128,
},
};
use rand::{thread_rng, Rng};
Expand Down Expand Up @@ -1004,12 +1002,6 @@ impl<C: Clock> TaskAggregator<C> {
}
},

VdafInstance::Poplar1 { bits } => {
let vdaf = Poplar1::new_turboshake128(*bits);
let verify_key = task.vdaf_verify_key()?;
VdafOps::Poplar1(Arc::new(vdaf), verify_key)
}

#[cfg(feature = "test-util")]
VdafInstance::Fake { rounds } => VdafOps::Fake(Arc::new(dummy::Vdaf::new(*rounds))),

Expand Down Expand Up @@ -1300,10 +1292,6 @@ enum VdafOps {
VerifyKey<VERIFY_KEY_LENGTH>,
vdaf_ops_strategies::Prio3FixedPointBoundedL2VecSum,
),
Poplar1(
Arc<Poplar1<XofTurboShake128, 16>>,
VerifyKey<VERIFY_KEY_LENGTH>,
),
#[cfg(feature = "test-util")]
Fake(Arc<dummy::Vdaf>),
}
Expand Down Expand Up @@ -1458,17 +1446,6 @@ macro_rules! vdaf_ops_dispatch {
}
}

crate::aggregator::VdafOps::Poplar1(vdaf, verify_key) => {
let $vdaf = vdaf;
let $verify_key = verify_key;
type $Vdaf = ::prio::vdaf::poplar1::Poplar1<::prio::vdaf::xof::XofTurboShake128, 16>;
const $VERIFY_KEY_LENGTH: usize = ::janus_core::vdaf::VERIFY_KEY_LENGTH;
type $DpStrategy = janus_core::dp::NoDifferentialPrivacy;
let $dp_strategy = &Arc::new(janus_core::dp::NoDifferentialPrivacy);
let body = $body;
body
}

#[cfg(feature = "test-util")]
crate::aggregator::VdafOps::Fake(vdaf) => {
let $vdaf = vdaf;
Expand Down
36 changes: 13 additions & 23 deletions aggregator/src/aggregator/aggregate_init_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,7 @@ use janus_messages::{
};
use prio::{
codec::Encode,
idpf::IdpfInput,
vdaf::{
self, dummy,
poplar1::{Poplar1, Poplar1AggregationParam},
xof::XofTurboShake128,
},
vdaf::{self, dummy},
};
use rand::random;
use serde_json::json;
Expand Down Expand Up @@ -179,16 +174,12 @@ pub(super) async fn setup_aggregate_init_test() -> AggregationJobInitTestCase<0,
.await
}

async fn setup_poplar1_aggregate_init_test(
) -> AggregationJobInitTestCase<16, Poplar1<XofTurboShake128, 16>> {
let aggregation_param =
Poplar1AggregationParam::try_from_prefixes(Vec::from([IdpfInput::from_bools(&[false])]))
.unwrap();
async fn setup_multi_step_aggregate_init_test() -> AggregationJobInitTestCase<0, dummy::Vdaf> {
setup_aggregate_init_test_for_vdaf(
Poplar1::new_turboshake128(1),
VdafInstance::Poplar1 { bits: 1 },
aggregation_param,
IdpfInput::from_bools(&[true]),
dummy::Vdaf::new(2),
VdafInstance::Fake { rounds: 2 },
dummy::AggregationParam(7),
13,
)
.await
}
Expand Down Expand Up @@ -501,8 +492,9 @@ async fn aggregation_job_mutation_report_shares() {

#[tokio::test]
async fn aggregation_job_mutation_report_aggregations() {
// We must run Poplar1 in this test so that the aggregation job won't finish on the first step
let test_case = setup_poplar1_aggregate_init_test().await;
// We set up a multi-step VDAF in this test so that the aggregation job won't finish on the
// first step.
let test_case = setup_multi_step_aggregate_init_test().await;

// Generate some new reports using the existing reports' metadata, but varying the measurement
// values such that the prepare state computed during aggregation initializaton won't match the
Expand All @@ -514,10 +506,7 @@ async fn aggregation_job_mutation_report_aggregations() {
.map(|s| {
test_case
.prepare_init_generator
.next_with_metadata(
s.report_share().metadata().clone(),
&IdpfInput::from_bools(&[false]),
)
.next_with_metadata(s.report_share().metadata().clone(), &13)
.0
})
.collect();
Expand Down Expand Up @@ -614,8 +603,9 @@ async fn aggregation_job_intolerable_clock_skew() {

#[tokio::test]
async fn aggregation_job_init_two_step_vdaf_idempotence() {
// We must run Poplar1 in this test so that the aggregation job won't finish on the first step
let test_case = setup_poplar1_aggregate_init_test().await;
// We set up a multi-step VDAF in this test so that the aggregation job won't finish on the
// first step.
let test_case = setup_multi_step_aggregate_init_test().await;

// Send the aggregation job init request again. We should get an identical response back.
let mut response = put_aggregation_job(
Expand Down
95 changes: 36 additions & 59 deletions aggregator/src/aggregator/aggregation_job_continue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -422,21 +422,16 @@ mod tests {
use janus_core::{
test_util::{install_test_trace_subscriber, runtime::TestRuntime},
time::{IntervalExt, MockClock},
vdaf::{VdafInstance, VERIFY_KEY_LENGTH},
vdaf::VdafInstance,
};
use janus_messages::{
batch_mode::TimeInterval, AggregationJobContinueReq, AggregationJobId,
AggregationJobInitializeReq, AggregationJobResp, AggregationJobStep, Interval,
PartialBatchSelector, PrepareContinue, PrepareResp, PrepareStepResult, Role,
};
use prio::{
codec::Encode,
idpf::IdpfInput,
vdaf::{
poplar1::{Poplar1, Poplar1AggregationParam},
xof::XofTurboShake128,
Aggregator,
},
codec::Encode as _,
vdaf::{dummy, Aggregator},
};
use rand::random;
use serde_json::json;
Expand All @@ -461,41 +456,37 @@ mod tests {

/// Set up a helper with an aggregation job in step 0
#[allow(clippy::unit_arg)]
async fn setup_aggregation_job_continue_test(
) -> AggregationJobContinueTestCase<VERIFY_KEY_LENGTH, Poplar1<XofTurboShake128, 16>> {
async fn setup_aggregation_job_continue_test() -> AggregationJobContinueTestCase<0, dummy::Vdaf>
{
// Prepare datastore & request.
install_test_trace_subscriber();

let aggregation_job_id = random();
let task =
TaskBuilder::new(BatchMode::TimeInterval, VdafInstance::Poplar1 { bits: 1 }).build();
TaskBuilder::new(BatchMode::TimeInterval, VdafInstance::Fake { rounds: 2 }).build();
let helper_task = task.helper_view().unwrap();
let clock = MockClock::default();
let ephemeral_datastore = ephemeral_datastore().await;
let meter = noop_meter();
let datastore = Arc::new(ephemeral_datastore.datastore(clock.clone()).await);
let keypair = datastore.put_global_hpke_key().await.unwrap();

let aggregation_parameter = Poplar1AggregationParam::try_from_prefixes(Vec::from([
IdpfInput::from_bools(&[false]),
]))
.unwrap();
let aggregation_parameter = dummy::AggregationParam(7);
let prepare_init_generator = PrepareInitGenerator::new(
clock.clone(),
helper_task.clone(),
keypair.config().clone(),
Poplar1::new_turboshake128(1),
aggregation_parameter.clone(),
dummy::Vdaf::new(2),
aggregation_parameter,
);

let (prepare_init, transcript) =
prepare_init_generator.next(&IdpfInput::from_bools(&[true]));
let (prepare_init, transcript) = prepare_init_generator.next(&13);

datastore
.run_unnamed_tx(|tx| {
let (task, aggregation_param, prepare_init, transcript) = (
helper_task.clone(),
aggregation_parameter.clone(),
aggregation_parameter,
prepare_init.clone(),
transcript.clone(),
);
Expand All @@ -506,11 +497,7 @@ mod tests {
.await
.unwrap();

tx.put_aggregation_job(&AggregationJob::<
VERIFY_KEY_LENGTH,
TimeInterval,
Poplar1<XofTurboShake128, 16>,
>::new(
tx.put_aggregation_job(&AggregationJob::<0, TimeInterval, dummy::Vdaf>::new(
*task.id(),
aggregation_job_id,
aggregation_param,
Expand All @@ -522,21 +509,18 @@ mod tests {
.await
.unwrap();

tx.put_report_aggregation::<VERIFY_KEY_LENGTH, Poplar1<XofTurboShake128, 16>>(
&ReportAggregation::new(
*task.id(),
aggregation_job_id,
*prepare_init.report_share().metadata().id(),
*prepare_init.report_share().metadata().time(),
0,
None,
ReportAggregationState::WaitingHelper {
prepare_state: transcript.helper_prepare_transitions[0]
.prepare_state()
.clone(),
},
),
)
tx.put_report_aggregation::<0, dummy::Vdaf>(&ReportAggregation::new(
*task.id(),
aggregation_job_id,
*prepare_init.report_share().metadata().id(),
*prepare_init.report_share().metadata().time(),
0,
None,
ReportAggregationState::WaitingHelper {
prepare_state: *transcript.helper_prepare_transitions[0]
.prepare_state(),
},
))
.await
.unwrap();

Expand Down Expand Up @@ -583,7 +567,7 @@ mod tests {
/// Set up a helper with an aggregation job in step 1.
#[allow(clippy::unit_arg)]
async fn setup_aggregation_job_continue_step_recovery_test(
) -> AggregationJobContinueTestCase<VERIFY_KEY_LENGTH, Poplar1<XofTurboShake128, 16>> {
) -> AggregationJobContinueTestCase<0, dummy::Vdaf> {
let mut test_case = setup_aggregation_job_continue_test().await;

let first_continue_response = post_aggregation_job_and_decode(
Expand Down Expand Up @@ -728,9 +712,8 @@ mod tests {
async fn aggregation_job_continue_step_recovery_mutate_continue_request() {
let test_case = setup_aggregation_job_continue_step_recovery_test().await;

let (unrelated_prepare_init, unrelated_transcript) = test_case
.prepare_init_generator
.next(&IdpfInput::from_bools(&[false]));
let (unrelated_prepare_init, unrelated_transcript) =
test_case.prepare_init_generator.next(&13);

let (before_aggregation_job, before_report_aggregations) = test_case
.datastore
Expand All @@ -746,16 +729,16 @@ mod tests {
.unwrap();

let aggregation_job = tx
.get_aggregation_job::<VERIFY_KEY_LENGTH, TimeInterval, Poplar1<XofTurboShake128, 16>>(
.get_aggregation_job::<0, TimeInterval, dummy::Vdaf>(
&task_id,
&aggregation_job_id,
)
.await
.unwrap();

let report_aggregations = tx
.get_report_aggregations_for_aggregation_job::<VERIFY_KEY_LENGTH, Poplar1<XofTurboShake128, 16>>(
&Poplar1::new_turboshake128(1),
.get_report_aggregations_for_aggregation_job::<0, dummy::Vdaf>(
&dummy::Vdaf::new(2),
&Role::Helper,
&task_id,
&aggregation_job_id,
Expand Down Expand Up @@ -798,16 +781,16 @@ mod tests {
(*test_case.task.id(), test_case.aggregation_job_id);
Box::pin(async move {
let aggregation_job = tx
.get_aggregation_job::<VERIFY_KEY_LENGTH, TimeInterval, Poplar1<XofTurboShake128, 16>>(
.get_aggregation_job::<0, TimeInterval, dummy::Vdaf>(
&task_id,
&aggregation_job_id,
)
.await
.unwrap();

let report_aggregations = tx
.get_report_aggregations_for_aggregation_job::<VERIFY_KEY_LENGTH, Poplar1<XofTurboShake128, 16>>(
&Poplar1::new_turboshake128(1),
.get_report_aggregations_for_aggregation_job::<0, dummy::Vdaf>(
&dummy::Vdaf::new(2),
&Role::Helper,
&task_id,
&aggregation_job_id,
Expand Down Expand Up @@ -835,12 +818,8 @@ mod tests {
let (task_id, aggregation_job_id) =
(*test_case.task.id(), test_case.aggregation_job_id);
Box::pin(async move {
// This is a cheat: dummy_vdaf only has a single step, so we artificially force
// this job into step 2 so that we can send a request for step 1 and force a
// step mismatch error instead of tripping the check for a request to continue
// to step 0.
let aggregation_job = tx
.get_aggregation_job::<VERIFY_KEY_LENGTH, TimeInterval, Poplar1<XofTurboShake128, 16>>(
.get_aggregation_job::<0, TimeInterval, dummy::Vdaf>(
&task_id,
&aggregation_job_id,
)
Expand Down Expand Up @@ -931,7 +910,7 @@ mod tests {
(*test_case.task.id(), test_case.aggregation_job_id);
Box::pin(async move {
let aggregation_job = tx
.get_aggregation_job::<VERIFY_KEY_LENGTH, TimeInterval, Poplar1<XofTurboShake128, 16>>(
.get_aggregation_job::<0, TimeInterval, dummy::Vdaf>(
&task_id,
&aggregation_job_id,
)
Expand All @@ -948,9 +927,7 @@ mod tests {
.unwrap();

// Subsequent attempts to initialize the job should fail.
let (prep_init, _) = test_case
.prepare_init_generator
.next(&IdpfInput::from_bools(&[true]));
let (prep_init, _) = test_case.prepare_init_generator.next(&13);
let init_req = AggregationJobInitializeReq::new(
test_case.aggregation_parameter.get_encoded().unwrap(),
PartialBatchSelector::new_time_interval(),
Expand Down
Loading

0 comments on commit 6f18526

Please sign in to comment.