diff --git a/crates/accelerate/src/sabre_swap/layer.rs b/crates/accelerate/src/sabre_swap/layer.rs index 6c792b5364cd..30d8cb8012cf 100644 --- a/crates/accelerate/src/sabre_swap/layer.rs +++ b/crates/accelerate/src/sabre_swap/layer.rs @@ -15,17 +15,24 @@ use indexmap::IndexMap; use ndarray::prelude::*; use rustworkx_core::petgraph::prelude::*; -use crate::nlayout::{NLayout, VirtualQubit}; +use crate::nlayout::PhysicalQubit; /// A container for the current non-routable parts of the front layer. This only ever holds /// two-qubit gates; the only reason a 0q- or 1q operation can be unroutable is because it has an /// unsatisfied 2q predecessor, which disqualifies it from being in the front layer. +/// +/// It would be more algorithmically natural for this struct to work in terms of virtual qubits, +/// because then a swap insertion would not change the data contained. However, for each swap we +/// insert, we score tens or hundreds, yet the subsequent update only affects two qubits. This +/// makes it more efficient to do everything in terms of physical qubits, so the conversion between +/// physical and virtual qubits via the layout happens once per inserted swap and on layer +/// extension, not for every swap trialled. pub struct FrontLayer { /// Map of the (index to the) node to the qubits it acts on. - nodes: IndexMap, + nodes: IndexMap, /// Map of each qubit to the node that acts on it and the other qubit that node acts on, if this /// qubit is active (otherwise `None`). - qubits: Vec>, + qubits: Vec>, } impl FrontLayer { @@ -42,7 +49,7 @@ impl FrontLayer { } /// Add a node into the front layer, with the two qubits it operates on. - pub fn insert(&mut self, index: NodeIndex, qubits: [VirtualQubit; 2]) { + pub fn insert(&mut self, index: NodeIndex, qubits: [PhysicalQubit; 2]) { let [a, b] = qubits; self.qubits[a.index()] = Some((index, b)); self.qubits[b.index()] = Some((index, a)); @@ -51,20 +58,20 @@ impl FrontLayer { /// Remove a node from the front layer. pub fn remove(&mut self, index: &NodeIndex) { - let [q0, q1] = self.nodes.remove(index).unwrap(); - self.qubits[q0.index()] = None; - self.qubits[q1.index()] = None; + let [a, b] = self.nodes.remove(index).unwrap(); + self.qubits[a.index()] = None; + self.qubits[b.index()] = None; } /// Query whether a qubit has an active node. #[inline] - pub fn is_active(&self, qubit: VirtualQubit) -> bool { + pub fn is_active(&self, qubit: PhysicalQubit) -> bool { self.qubits[qubit.index()].is_some() } /// Calculate the score _difference_ caused by this swap, compared to not making the swap. #[inline] - pub fn score(&self, swap: [VirtualQubit; 2], layout: &NLayout, dist: &ArrayView2) -> f64 { + pub fn score(&self, swap: [PhysicalQubit; 2], dist: &ArrayView2) -> f64 { if self.is_empty() { return 0.0; } @@ -77,25 +84,21 @@ impl FrontLayer { let [a, b] = swap; let mut total = 0.0; if let Some((_, c)) = self.qubits[a.index()] { - let p_c = c.to_phys(layout); - total += dist[[b.to_phys(layout).index(), p_c.index()]] - - dist[[a.to_phys(layout).index(), p_c.index()]] + total += dist[[b.index(), c.index()]] - dist[[a.index(), c.index()]] } if let Some((_, c)) = self.qubits[b.index()] { - let p_c = c.to_phys(layout); - total += dist[[a.to_phys(layout).index(), p_c.index()]] - - dist[[b.to_phys(layout).index(), p_c.index()]] + total += dist[[a.index(), c.index()]] - dist[[b.index(), c.index()]] } total / self.nodes.len() as f64 } /// Calculate the total absolute of the current front layer on the given layer. - pub fn total_score(&self, layout: &NLayout, dist: &ArrayView2) -> f64 { + pub fn total_score(&self, dist: &ArrayView2) -> f64 { if self.is_empty() { return 0.0; } self.iter() - .map(|(_, &[a, b])| dist[[a.to_phys(layout).index(), b.to_phys(layout).index()]]) + .map(|(_, &[a, b])| dist[[a.index(), b.index()]]) .sum::() / self.nodes.len() as f64 } @@ -105,29 +108,46 @@ impl FrontLayer { pub fn routable_after( &self, routable: &mut Vec, - swap: &[VirtualQubit; 2], - layout: &NLayout, + swap: &[PhysicalQubit; 2], coupling: &DiGraph<(), ()>, ) { let [a, b] = *swap; if let Some((node, c)) = self.qubits[a.index()] { - if coupling.contains_edge( - NodeIndex::new(b.to_phys(layout).index()), - NodeIndex::new(c.to_phys(layout).index()), - ) { + if coupling.contains_edge(NodeIndex::new(b.index()), NodeIndex::new(c.index())) { routable.push(node); } } if let Some((node, c)) = self.qubits[b.index()] { - if coupling.contains_edge( - NodeIndex::new(a.to_phys(layout).index()), - NodeIndex::new(c.to_phys(layout).index()), - ) { + if coupling.contains_edge(NodeIndex::new(a.index()), NodeIndex::new(c.index())) { routable.push(node); } } } + /// Apply a physical swap to the current layout data structure. + pub fn apply_swap(&mut self, swap: [PhysicalQubit; 2]) { + let [a, b] = swap; + match (self.qubits[a.index()], self.qubits[b.index()]) { + (Some((index1, _)), Some((index2, _))) if index1 == index2 => { + let entry = self.nodes.get_mut(&index1).unwrap(); + *entry = [entry[1], entry[0]]; + return; + } + _ => {} + } + if let Some((index, c)) = self.qubits[a.index()] { + self.qubits[c.index()] = Some((index, b)); + let entry = self.nodes.get_mut(&index).unwrap(); + *entry = if *entry == [a, c] { [b, c] } else { [c, b] }; + } + if let Some((index, c)) = self.qubits[b.index()] { + self.qubits[c.index()] = Some((index, a)); + let entry = self.nodes.get_mut(&index).unwrap(); + *entry = if *entry == [b, c] { [a, c] } else { [c, a] }; + } + self.qubits.swap(a.index(), b.index()); + } + /// True if there are no nodes in the current layer. #[inline] pub fn is_empty(&self) -> bool { @@ -135,7 +155,7 @@ impl FrontLayer { } /// Iterator over the nodes and the pair of qubits they act on. - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.nodes.iter() } @@ -145,47 +165,41 @@ impl FrontLayer { } /// Iterator over the qubits that have active nodes on them. - pub fn iter_active(&self) -> impl Iterator { + pub fn iter_active(&self) -> impl Iterator { self.nodes.values().flatten() } } -/// This is largely similar to the `FrontLayer` struct but can have more than one node on each active -/// qubit. This does not have `remove` method (and its data structures aren't optimised for fast -/// removal), since the extended set is built from scratch each time a new gate is routed. +/// This structure is currently reconstructed after each gate is routed, so there's no need to +/// worry about tracking gate indices or anything like that. We track length manually just to +/// avoid a summation. pub struct ExtendedSet { - nodes: IndexMap, - qubits: Vec>, + qubits: Vec>, + len: usize, } impl ExtendedSet { - pub fn new(num_qubits: u32, max_size: usize) -> Self { + pub fn new(num_qubits: u32) -> Self { ExtendedSet { - nodes: IndexMap::with_capacity_and_hasher(max_size, ahash::RandomState::default()), qubits: vec![Vec::new(); num_qubits as usize], + len: 0, } } /// Add a node and its active qubits to the extended set. - pub fn insert(&mut self, index: NodeIndex, qubits: &[VirtualQubit; 2]) -> bool { - let [a, b] = *qubits; - if self.nodes.insert(index, *qubits).is_none() { - self.qubits[a.index()].push(b); - self.qubits[b.index()].push(a); - true - } else { - false - } + pub fn push(&mut self, qubits: [PhysicalQubit; 2]) { + let [a, b] = qubits; + self.qubits[a.index()].push(b); + self.qubits[b.index()].push(a); + self.len += 1; } /// Calculate the score of applying the given swap, relative to not applying it. - pub fn score(&self, swap: [VirtualQubit; 2], layout: &NLayout, dist: &ArrayView2) -> f64 { - if self.nodes.is_empty() { + pub fn score(&self, swap: [PhysicalQubit; 2], dist: &ArrayView2) -> f64 { + if self.len == 0 { return 0.0; } let [a, b] = swap; - let p_a = a.to_phys(layout); - let p_b = b.to_phys(layout); let mut total = 0.0; for other in self.qubits[a.index()].iter() { // If the other qubit is also active then the score won't have changed, but since the @@ -193,42 +207,68 @@ impl ExtendedSet { if *other == b { continue; } - let p_other = other.to_phys(layout); - total += dist[[p_b.index(), p_other.index()]] - dist[[p_a.index(), p_other.index()]]; + total += dist[[b.index(), other.index()]] - dist[[a.index(), other.index()]]; } for other in self.qubits[b.index()].iter() { if *other == a { continue; } - let p_other = other.to_phys(layout); - total += dist[[p_a.index(), p_other.index()]] - dist[[p_b.index(), p_other.index()]]; + total += dist[[a.index(), other.index()]] - dist[[b.index(), other.index()]]; } - total / self.nodes.len() as f64 + total / self.len as f64 } /// Calculate the total absolute score of this set of nodes over the given layout. - pub fn total_score(&self, layout: &NLayout, dist: &ArrayView2) -> f64 { - if self.nodes.is_empty() { + pub fn total_score(&self, dist: &ArrayView2) -> f64 { + if self.len == 0 { return 0.0; } - self.nodes - .values() - .map(|&[a, b]| dist[[a.to_phys(layout).index(), b.to_phys(layout).index()]]) + self.qubits + .iter() + .enumerate() + .map(move |(a_index, others)| { + others + .iter() + .map(|b| { + let b_index = b.index(); + if a_index <= b_index { + dist[[a_index, b_index]] + } else { + 0.0 + } + }) + .sum::() + }) .sum::() - / self.nodes.len() as f64 + / self.len as f64 } /// Clear all nodes from the extended set. pub fn clear(&mut self) { - for &[a, b] in self.nodes.values() { - self.qubits[a.index()].clear(); - self.qubits[b.index()].clear(); + for others in self.qubits.iter_mut() { + others.clear() } - self.nodes.clear() + self.len = 0; } /// Number of nodes in the set. pub fn len(&self) -> usize { - self.nodes.len() + self.len + } + + /// Apply a physical swap to the current layout data structure. + pub fn apply_swap(&mut self, swap: [PhysicalQubit; 2]) { + let [a, b] = swap; + for other in self.qubits[a.index()].iter_mut() { + if *other == b { + *other = a + } + } + for other in self.qubits[b.index()].iter_mut() { + if *other == a { + *other = b + } + } + self.qubits.swap(a.index(), b.index()); } } diff --git a/crates/accelerate/src/sabre_swap/mod.rs b/crates/accelerate/src/sabre_swap/mod.rs index 6a1aa9b7bc4e..d4345a02e2c1 100644 --- a/crates/accelerate/src/sabre_swap/mod.rs +++ b/crates/accelerate/src/sabre_swap/mod.rs @@ -38,7 +38,7 @@ use rustworkx_core::token_swapper::token_swapper; use std::cmp::Ordering; use crate::getenv_use_multiple_threads; -use crate::nlayout::{NLayout, PhysicalQubit, VirtualQubit}; +use crate::nlayout::{NLayout, PhysicalQubit}; use layer::{ExtendedSet, FrontLayer}; use neighbor_table::NeighborTable; @@ -120,7 +120,7 @@ impl NodeBlockResults { pub struct BlockResult { #[pyo3(get)] pub result: SabreResult, - pub swap_epilogue: Vec<[VirtualQubit; 2]>, + pub swap_epilogue: Vec<[PhysicalQubit; 2]>, } #[pymethods] @@ -145,19 +145,15 @@ impl BlockResult { fn obtain_swaps<'a>( front_layer: &'a FrontLayer, neighbors: &'a NeighborTable, - layout: &'a NLayout, -) -> impl Iterator + 'a { - front_layer.iter_active().flat_map(move |&v| { - neighbors[v.to_phys(layout)] - .iter() - .filter_map(move |p_neighbor| { - let neighbor = p_neighbor.to_virt(layout); - if neighbor > v || !front_layer.is_active(neighbor) { - Some([v, neighbor]) - } else { - None - } - }) +) -> impl Iterator + 'a { + front_layer.iter_active().flat_map(move |&p| { + neighbors[p].iter().filter_map(move |&neighbor| { + if neighbor > p || !front_layer.is_active(neighbor) { + Some([p, neighbor]) + } else { + None + } + }) }) } @@ -168,6 +164,7 @@ fn populate_extended_set( extended_set: &mut ExtendedSet, dag: &SabreDAG, front_layer: &FrontLayer, + layout: &NLayout, required_predecessors: &mut [u32], ) { let mut to_visit = front_layer.iter_nodes().copied().collect::>(); @@ -183,7 +180,7 @@ fn populate_extended_set( if required_predecessors[successor_index] == 0 { if !dag.node_blocks.contains_key(&successor_index) { if let [a, b] = dag.dag[successor_node].qubits[..] { - extended_set.insert(successor_node, &[a, b]); + extended_set.push([a.to_phys(layout), b.to_phys(layout)]); } } to_visit.push(successor_node); @@ -328,10 +325,10 @@ fn swap_map_trial( initial_layout: &NLayout, ) -> (SabreResult, NLayout) { let max_iterations_without_progress = 10 * num_qubits as usize; - let mut out_map: HashMap> = HashMap::new(); + let mut out_map: HashMap> = HashMap::new(); let mut gate_order = Vec::with_capacity(dag.dag.node_count()); let mut front_layer = FrontLayer::new(num_qubits); - let mut extended_set = ExtendedSet::new(num_qubits, EXTENDED_SET_SIZE); + let mut extended_set = ExtendedSet::new(num_qubits); let mut required_predecessors: Vec = vec![0; dag.dag.node_count()]; let mut layout = initial_layout.clone(); let mut num_search_steps: u8 = 0; @@ -378,6 +375,7 @@ fn swap_map_trial( &mut extended_set, dag, &front_layer, + &layout, &mut required_predecessors, ); // Main logic loop; the front layer only becomes empty when all nodes have been routed. At @@ -386,16 +384,15 @@ fn swap_map_trial( // Reusable allocated storage space for choosing the best swap. This is owned outside of the // `choose_best_swap` function so that we don't need to reallocate and then re-grow the // collection on every entry. - let mut swap_scratch = Vec::<[VirtualQubit; 2]>::new(); + let mut swap_scratch = Vec::<[PhysicalQubit; 2]>::new(); while !front_layer.is_empty() { - let mut current_swaps: Vec<[VirtualQubit; 2]> = Vec::new(); + let mut current_swaps: Vec<[PhysicalQubit; 2]> = Vec::new(); // Swap-mapping loop. This is the main part of the algorithm, which we repeat until we // either successfully route a node, or exceed the maximum number of attempts. while routable_nodes.is_empty() && current_swaps.len() <= max_iterations_without_progress { let best_swap = choose_best_swap( &front_layer, &extended_set, - &layout, neighbor_table, dist, &qubits_decay, @@ -403,16 +400,18 @@ fn swap_map_trial( &mut rng, &mut swap_scratch, ); - front_layer.routable_after(&mut routable_nodes, &best_swap, &layout, coupling_graph); + front_layer.routable_after(&mut routable_nodes, &best_swap, coupling_graph); + front_layer.apply_swap(best_swap); + extended_set.apply_swap(best_swap); + layout.swap_physical(best_swap[0], best_swap[1]); current_swaps.push(best_swap); - layout.swap_virtual(best_swap[0], best_swap[1]); num_search_steps += 1; if num_search_steps >= DECAY_RESET_INTERVAL { qubits_decay.fill(1.); num_search_steps = 0; } else { - qubits_decay[best_swap[0].to_phys(&layout).index()] += DECAY_RATE; - qubits_decay[best_swap[1].to_phys(&layout).index()] += DECAY_RATE; + qubits_decay[best_swap[0].index()] += DECAY_RATE; + qubits_decay[best_swap[1].index()] += DECAY_RATE; } } // If we exceeded the number of allowed attempts without successfully routing a node, we @@ -423,11 +422,18 @@ fn swap_map_trial( // ideally never be taken, and it doesn't matter if it's not the speediest---it's better to // keep the other path faster. if routable_nodes.is_empty() { - undo_swaps(&mut current_swaps, &mut layout); - let (node, qubits) = closest_operation(&front_layer, &layout, dist); - swaps_to_route(&mut current_swaps, &qubits, &layout, coupling_graph); + undo_swaps( + &mut current_swaps, + &mut front_layer, + &mut extended_set, + &mut layout, + ); + let (&node, &qubits) = closest_operation(&front_layer, dist); + swaps_to_route(&mut current_swaps, &qubits, coupling_graph); for &[a, b] in current_swaps.iter() { - layout.swap_virtual(a, b); + front_layer.apply_swap([a, b]); + extended_set.apply_swap([a, b]); + layout.swap_physical(a, b); } routable_nodes.push(node); } @@ -468,12 +474,12 @@ fn swap_map_trial( fn update_route( seed: u64, nodes: &[NodeIndex], - swaps: Vec<[VirtualQubit; 2]>, + swaps: Vec<[PhysicalQubit; 2]>, dag: &SabreDAG, layout: &NLayout, coupling: &DiGraph<(), ()>, gate_order: &mut Vec, - out_map: &mut HashMap>, + out_map: &mut HashMap>, front_layer: &mut FrontLayer, extended_set: &mut ExtendedSet, required_predecessors: &mut [u32], @@ -504,7 +510,13 @@ fn update_route( // its construction strongly to the iteration order through the front layer, it's not easy to // do better than just emptying it and rebuilding. extended_set.clear(); - populate_extended_set(extended_set, dag, front_layer, required_predecessors); + populate_extended_set( + extended_set, + dag, + front_layer, + layout, + required_predecessors, + ); } fn gen_swap_epilogue( @@ -512,7 +524,7 @@ fn gen_swap_epilogue( mut from_layout: NLayout, to_layout: &NLayout, seed: u64, -) -> Vec<[VirtualQubit; 2]> { +) -> Vec<[PhysicalQubit; 2]> { // Map physical location in from_layout to physical location in to_layout let mapping: HashMap = from_layout .iter_physical() @@ -538,9 +550,8 @@ fn gen_swap_epilogue( .map(|(l, r)| { let p_l = PhysicalQubit::new(l.index().try_into().unwrap()); let p_r = PhysicalQubit::new(r.index().try_into().unwrap()); - let ret = [p_l.to_virt(&from_layout), p_r.to_virt(&from_layout)]; from_layout.swap_physical(p_l, p_r); - ret + [p_l, p_r] }) .collect() } @@ -601,7 +612,7 @@ fn route_reachable_nodes( { // 2Q op that cannot be placed. Add it to the front layer // and move on. - front_layer.insert(node_id, [a, b]); + front_layer.insert(node_id, [a.to_phys(layout), b.to_phys(layout)]); continue; } _ => {} @@ -621,65 +632,68 @@ fn route_reachable_nodes( } /// Walk through the swaps in the given vector, undoing them on the layout and removing them. -fn undo_swaps(swaps: &mut Vec<[VirtualQubit; 2]>, layout: &mut NLayout) { - swaps - .drain(..) - .rev() - .for_each(|swap| layout.swap_virtual(swap[0], swap[1])); +fn undo_swaps( + swaps: &mut Vec<[PhysicalQubit; 2]>, + front_layer: &mut FrontLayer, + extended_set: &mut ExtendedSet, + layout: &mut NLayout, +) { + swaps.drain(..).rev().for_each(|swap| { + front_layer.apply_swap(swap); + extended_set.apply_swap(swap); + layout.swap_physical(swap[0], swap[1]); + }); } /// Find the node index and its associated virtual qubits that is currently the closest to being /// routable in terms of number of swaps. -fn closest_operation( - front_layer: &FrontLayer, - layout: &NLayout, - dist: &ArrayView2, -) -> (NodeIndex, [VirtualQubit; 2]) { - let (&node, qubits) = front_layer +fn closest_operation<'a>( + front_layer: &'a FrontLayer, + dist: &'_ ArrayView2, +) -> (&'a NodeIndex, &'a [PhysicalQubit; 2]) { + front_layer .iter() - .map(|(node, qubits)| (node, [qubits[0].to_phys(layout), qubits[1].to_phys(layout)])) .min_by(|(_, qubits_a), (_, qubits_b)| { dist[[qubits_a[0].index(), qubits_a[1].index()]] .partial_cmp(&dist[[qubits_b[0].index(), qubits_b[1].index()]]) .unwrap_or(Ordering::Equal) }) - .unwrap(); - (node, [qubits[0].to_virt(layout), qubits[1].to_virt(layout)]) + .unwrap() } /// Add the minimal set of swaps to the `swaps` vector that bring the two `qubits` together so that /// a 2q gate on them could be routed. fn swaps_to_route( - swaps: &mut Vec<[VirtualQubit; 2]>, - qubits: &[VirtualQubit; 2], - layout: &NLayout, + swaps: &mut Vec<[PhysicalQubit; 2]>, + qubits: &[PhysicalQubit; 2], coupling_graph: &DiGraph<(), ()>, ) { let mut shortest_paths: DictMap> = DictMap::new(); (dijkstra( coupling_graph, - NodeIndex::new(qubits[0].to_phys(layout).index()), - Some(NodeIndex::new(qubits[1].to_phys(layout).index())), + NodeIndex::new(qubits[0].index()), + Some(NodeIndex::new(qubits[1].index())), |_| Ok(1.), Some(&mut shortest_paths), ) as PyResult>>) .unwrap(); let shortest_path = shortest_paths - .get(&NodeIndex::new(qubits[1].to_phys(layout).index())) + .get(&NodeIndex::new(qubits[1].index())) .unwrap() .iter() .map(|n| PhysicalQubit::new(n.index() as u32)) .collect::>(); - // Insert greedy swaps along that shortest path + // Insert greedy swaps along that shortest path, splitting them between moving the left side + // and moving the right side to minimise the depth. One side needs to move up to the split + // point and the other can stop one short because the gate will be routable then. let split: usize = shortest_path.len() / 2; - let forwards = &shortest_path[1..split]; - let backwards = &shortest_path[split..shortest_path.len() - 1]; swaps.reserve(shortest_path.len() - 2); - for swap in forwards { - swaps.push([qubits[0], swap.to_virt(layout)]); + for i in 0..split { + swaps.push([shortest_path[i], shortest_path[i + 1]]); } - for swap in backwards.iter().rev() { - swaps.push([qubits[1], swap.to_virt(layout)]); + for i in 0..split - 1 { + let end = shortest_path.len() - 1 - i; + swaps.push([shortest_path[end], shortest_path[end - 1]]); } } @@ -687,37 +701,33 @@ fn swaps_to_route( fn choose_best_swap( layer: &FrontLayer, extended_set: &ExtendedSet, - layout: &NLayout, neighbor_table: &NeighborTable, dist: &ArrayView2, qubits_decay: &[f64], heuristic: &Heuristic, rng: &mut Pcg64Mcg, - best_swaps: &mut Vec<[VirtualQubit; 2]>, -) -> [VirtualQubit; 2] { + best_swaps: &mut Vec<[PhysicalQubit; 2]>, +) -> [PhysicalQubit; 2] { best_swaps.clear(); let mut min_score = f64::MAX; // The decay heuristic is the only one that actually needs the absolute score. let absolute_score = match heuristic { Heuristic::Decay => { - layer.total_score(layout, dist) - + EXTENDED_SET_WEIGHT * extended_set.total_score(layout, dist) + layer.total_score(dist) + EXTENDED_SET_WEIGHT * extended_set.total_score(dist) } _ => 0.0, }; - for swap in obtain_swaps(layer, neighbor_table, layout) { + for swap in obtain_swaps(layer, neighbor_table) { let score = match heuristic { - Heuristic::Basic => layer.score(swap, layout, dist), + Heuristic::Basic => layer.score(swap, dist), Heuristic::Lookahead => { - layer.score(swap, layout, dist) - + EXTENDED_SET_WEIGHT * extended_set.score(swap, layout, dist) + layer.score(swap, dist) + EXTENDED_SET_WEIGHT * extended_set.score(swap, dist) } Heuristic::Decay => { - qubits_decay[swap[0].to_phys(layout).index()] - .max(qubits_decay[swap[1].to_phys(layout).index()]) + qubits_decay[swap[0].index()].max(qubits_decay[swap[1].index()]) * (absolute_score - + layer.score(swap, layout, dist) - + EXTENDED_SET_WEIGHT * extended_set.score(swap, layout, dist)) + + layer.score(swap, dist) + + EXTENDED_SET_WEIGHT * extended_set.score(swap, dist)) } }; if score < min_score - BEST_EPSILON { diff --git a/crates/accelerate/src/sabre_swap/swap_map.rs b/crates/accelerate/src/sabre_swap/swap_map.rs index eafecaf1d189..1947022345e1 100644 --- a/crates/accelerate/src/sabre_swap/swap_map.rs +++ b/crates/accelerate/src/sabre_swap/swap_map.rs @@ -14,13 +14,13 @@ use hashbrown::HashMap; use pyo3::exceptions::PyIndexError; use pyo3::prelude::*; -use crate::nlayout::VirtualQubit; +use crate::nlayout::PhysicalQubit; /// A container for required swaps before a gate qubit #[pyclass(module = "qiskit._accelerate.sabre_swap")] #[derive(Clone, Debug)] pub struct SwapMap { - pub map: HashMap>, + pub map: HashMap>, } #[pymethods] @@ -34,7 +34,7 @@ impl SwapMap { self.map.contains_key(&object) } - pub fn __getitem__(&self, object: usize) -> PyResult> { + pub fn __getitem__(&self, object: usize) -> PyResult> { match self.map.get(&object) { Some(val) => Ok(val.clone()), None => Err(PyIndexError::new_err(format!( diff --git a/qiskit/transpiler/passes/routing/sabre_swap.py b/qiskit/transpiler/passes/routing/sabre_swap.py index d7298f5370d6..cc67a5c9bd25 100644 --- a/qiskit/transpiler/passes/routing/sabre_swap.py +++ b/qiskit/transpiler/passes/routing/sabre_swap.py @@ -356,14 +356,8 @@ def empty_dag(block): def apply_swaps(dest_dag, swaps, layout): for a, b in swaps: - # The swaps that come out of Sabre are already in terms of the virtual qubits of the - # outermost DAG, since the scope binding occurred as the `SabreDAG` objects were built - # up; they're all provided to Sabre routing as full-width already. - qubits = ( - physical_qubits[layout.virtual_to_physical(a)], - physical_qubits[layout.virtual_to_physical(b)], - ) - layout.swap_virtual(a, b) + qubits = (physical_qubits[a], physical_qubits[b]) + layout.swap_physical(a, b) dest_dag.apply_operation_back(SwapGate(), qubits, (), check=False) def recurse(dest_dag, source_dag, result, root_logical_map, layout): diff --git a/test/python/transpiler/test_sabre_layout.py b/test/python/transpiler/test_sabre_layout.py index ff9788521c16..7edd9e2346e7 100644 --- a/test/python/transpiler/test_sabre_layout.py +++ b/test/python/transpiler/test_sabre_layout.py @@ -60,7 +60,7 @@ def test_5q_circuit_20q_coupling(self): pass_.run(dag) layout = pass_.property_set["layout"] - self.assertEqual([layout[q] for q in circuit.qubits], [16, 7, 11, 12, 13]) + self.assertEqual([layout[q] for q in circuit.qubits], [11, 10, 16, 5, 17]) def test_6q_circuit_20q_coupling(self): """Test finds layout for 6q circuit on 20q device.""" @@ -158,7 +158,7 @@ def test_layout_with_classical_bits(self): self.assertIsInstance(res, QuantumCircuit) layout = res._layout.initial_layout self.assertEqual( - [layout[q] for q in qc.qubits], [13, 10, 11, 12, 17, 14, 22, 26, 5, 16, 25, 19, 7, 8] + [layout[q] for q in qc.qubits], [11, 19, 18, 16, 26, 8, 21, 1, 5, 15, 3, 12, 14, 13] ) # pylint: disable=line-too-long diff --git a/test/python/transpiler/test_sabre_swap.py b/test/python/transpiler/test_sabre_swap.py index 39ca93628a75..61c680ecc0fe 100644 --- a/test/python/transpiler/test_sabre_swap.py +++ b/test/python/transpiler/test_sabre_swap.py @@ -692,9 +692,9 @@ def test_pre_intra_post_if_else(self): qc.cx(0, 2) qc.x(1) qc.measure(0, 0) - true_body = QuantumCircuit(qreg, creg[[0]]) + true_body = QuantumCircuit(qreg[:], creg[[0]]) true_body.cx(0, 2) - false_body = QuantumCircuit(qreg, creg[[0]]) + false_body = QuantumCircuit(qreg[:], creg[[0]]) false_body.cx(0, 4) qc.if_else((creg[0], 0), true_body, false_body, qreg, creg[[0]]) qc.h(3) @@ -724,11 +724,11 @@ def test_pre_intra_post_if_else(self): efalse_body.swap(2, 3) expected.if_else((creg[0], 0), etrue_body, efalse_body, qreg[[1, 2, 3, 4]], creg[[0]]) - expected.h(3) expected.swap(1, 2) + expected.h(3) expected.cx(3, 2) expected.barrier() - expected.measure(qreg, creg[[1, 2, 0, 3, 4]]) + expected.measure(qreg[[2, 0, 1, 3, 4]], creg) self.assertEqual(dag_to_circuit(cdag), expected) def test_if_expr(self):