Skip to content
This repository has been archived by the owner on Nov 21, 2022. It is now read-only.

Commit

Permalink
KVM: arm64: Take a pointer to walker data in kvm_dereference_pteref()
Browse files Browse the repository at this point in the history
Rather than passing through the state of the KVM_PGTABLE_WALK_SHARED
flag, just take a pointer to the whole walker structure instead. Move
around struct kvm_pgtable and the RCU indirection such that the
associated ifdeffery remains in one place while ensuring the walker +
flags definitions precede their use.

No functional change intended.

Signed-off-by: Oliver Upton <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
oupton authored and Marc Zyngier committed Nov 17, 2022
1 parent 1577cb5 commit d312e58
Show file tree
Hide file tree
Showing 2 changed files with 76 additions and 74 deletions.
144 changes: 73 additions & 71 deletions arch/arm64/include/asm/kvm_pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,54 +37,6 @@ static inline u64 kvm_get_parange(u64 mmfr0)

typedef u64 kvm_pte_t;

/*
* RCU cannot be used in a non-kernel context such as the hyp. As such, page
* table walkers used in hyp do not call into RCU and instead use other
* synchronization mechanisms (such as a spinlock).
*/
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)

typedef kvm_pte_t *kvm_pteref_t;

static inline kvm_pte_t *kvm_dereference_pteref(kvm_pteref_t pteref, bool shared)
{
return pteref;
}

static inline void kvm_pgtable_walk_begin(void) {}
static inline void kvm_pgtable_walk_end(void) {}

static inline bool kvm_pgtable_walk_lock_held(void)
{
return true;
}

#else

typedef kvm_pte_t __rcu *kvm_pteref_t;

static inline kvm_pte_t *kvm_dereference_pteref(kvm_pteref_t pteref, bool shared)
{
return rcu_dereference_check(pteref, !shared);
}

static inline void kvm_pgtable_walk_begin(void)
{
rcu_read_lock();
}

static inline void kvm_pgtable_walk_end(void)
{
rcu_read_unlock();
}

static inline bool kvm_pgtable_walk_lock_held(void)
{
return rcu_read_lock_held();
}

#endif

#define KVM_PTE_VALID BIT(0)

#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
Expand Down Expand Up @@ -212,29 +164,6 @@ enum kvm_pgtable_prot {
typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
enum kvm_pgtable_prot prot);

/**
* struct kvm_pgtable - KVM page-table.
* @ia_bits: Maximum input address size, in bits.
* @start_level: Level at which the page-table walk starts.
* @pgd: Pointer to the first top-level entry of the page-table.
* @mm_ops: Memory management callbacks.
* @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
* @flags: Stage-2 page-table flags.
* @force_pte_cb: Function that returns true if page level mappings must
* be used instead of block mappings.
*/
struct kvm_pgtable {
u32 ia_bits;
u32 start_level;
kvm_pteref_t pgd;
struct kvm_pgtable_mm_ops *mm_ops;

/* Stage-2 only */
struct kvm_s2_mmu *mmu;
enum kvm_pgtable_stage2_flags flags;
kvm_pgtable_force_pte_cb_t force_pte_cb;
};

/**
* enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
* @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
Expand Down Expand Up @@ -285,6 +214,79 @@ struct kvm_pgtable_walker {
const enum kvm_pgtable_walk_flags flags;
};

/*
* RCU cannot be used in a non-kernel context such as the hyp. As such, page
* table walkers used in hyp do not call into RCU and instead use other
* synchronization mechanisms (such as a spinlock).
*/
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)

typedef kvm_pte_t *kvm_pteref_t;

static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
kvm_pteref_t pteref)
{
return pteref;
}

static inline void kvm_pgtable_walk_begin(void) {}
static inline void kvm_pgtable_walk_end(void) {}

static inline bool kvm_pgtable_walk_lock_held(void)
{
return true;
}

#else

typedef kvm_pte_t __rcu *kvm_pteref_t;

static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
kvm_pteref_t pteref)
{
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
}

static inline void kvm_pgtable_walk_begin(void)
{
rcu_read_lock();
}

static inline void kvm_pgtable_walk_end(void)
{
rcu_read_unlock();
}

static inline bool kvm_pgtable_walk_lock_held(void)
{
return rcu_read_lock_held();
}

#endif

/**
* struct kvm_pgtable - KVM page-table.
* @ia_bits: Maximum input address size, in bits.
* @start_level: Level at which the page-table walk starts.
* @pgd: Pointer to the first top-level entry of the page-table.
* @mm_ops: Memory management callbacks.
* @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
* @flags: Stage-2 page-table flags.
* @force_pte_cb: Function that returns true if page level mappings must
* be used instead of block mappings.
*/
struct kvm_pgtable {
u32 ia_bits;
u32 start_level;
kvm_pteref_t pgd;
struct kvm_pgtable_mm_ops *mm_ops;

/* Stage-2 only */
struct kvm_s2_mmu *mmu;
enum kvm_pgtable_stage2_flags flags;
kvm_pgtable_force_pte_cb_t force_pte_cb;
};

/**
* kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
* @pgt: Uninitialised page-table structure to initialise.
Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/kvm/hyp/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
kvm_pteref_t pteref, u32 level)
{
enum kvm_pgtable_walk_flags flags = data->walker->flags;
kvm_pte_t *ptep = kvm_dereference_pteref(pteref, flags & KVM_PGTABLE_WALK_SHARED);
kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
struct kvm_pgtable_visit_ctx ctx = {
.ptep = ptep,
.old = READ_ONCE(*ptep),
Expand Down Expand Up @@ -558,7 +558,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
};

WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
pgt->mm_ops->put_page(kvm_dereference_pteref(pgt->pgd, false));
pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
pgt->pgd = NULL;
}

Expand Down Expand Up @@ -1241,7 +1241,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)

WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(pgt->pgd, false), pgd_sz);
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
pgt->pgd = NULL;
}

Expand Down

0 comments on commit d312e58

Please sign in to comment.