Skip to content

Commit

Permalink
vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro
Browse files Browse the repository at this point in the history
[ Upstream commit 729ce5a ]

it's a nice refactor to make use of
PFN_PHYS/PFN_UP/PFN_DOWN helper macro

Signed-off-by: Cai Huoqing <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Acked-by: Jason Wang <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Stable-dep-of: 0823dc6 ("vhost-vdpa: switch to use vmf_insert_pfn() in the fault handler")
Signed-off-by: Sasha Levin <[email protected]>
  • Loading branch information
caihuoqing1990 authored and gregkh committed Aug 19, 2024
1 parent b21ea49 commit 06e9e6a
Showing 1 changed file with 12 additions and 12 deletions.
24 changes: 12 additions & 12 deletions drivers/vhost/vdpa.c
Original file line number Diff line number Diff line change
Expand Up @@ -519,15 +519,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
unsigned long pfn, pinned;

while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
pinned = map->size >> PAGE_SHIFT;
for (pfn = map->addr >> PAGE_SHIFT;
pinned = PFN_DOWN(map->size);
for (pfn = PFN_DOWN(map->addr);
pinned > 0; pfn++, pinned--) {
page = pfn_to_page(pfn);
if (map->perm & VHOST_ACCESS_WO)
set_page_dirty_lock(page);
unpin_user_page(page);
}
atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
vhost_iotlb_map_free(iotlb, map);
}
}
Expand Down Expand Up @@ -589,7 +589,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
if (r)
vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
else
atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);

return r;
}
Expand Down Expand Up @@ -643,15 +643,15 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (msg->perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE;

npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
npages = PFN_UP(msg->size + (iova & ~PAGE_MASK));
if (!npages) {
ret = -EINVAL;
goto free;
}

mmap_read_lock(dev->mm);

lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
ret = -ENOMEM;
goto unlock;
Expand Down Expand Up @@ -685,9 +685,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,

if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iova, csize,
map_pfn << PAGE_SHIFT,
PFN_PHYS(map_pfn),
msg->perm);
if (ret) {
/*
Expand All @@ -711,13 +711,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
last_pfn = this_pfn;
}

cur_base += pinned << PAGE_SHIFT;
cur_base += PFN_PHYS(pinned);
npages -= pinned;
}

/* Pin the rest chunk */
ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
map_pfn << PAGE_SHIFT, msg->perm);
ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
PFN_PHYS(map_pfn), msg->perm);
out:
if (ret) {
if (nchunks) {
Expand Down Expand Up @@ -961,7 +961,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)

vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
notify.addr >> PAGE_SHIFT, PAGE_SIZE,
PFN_DOWN(notify.addr), PAGE_SIZE,
vma->vm_page_prot))
return VM_FAULT_SIGBUS;

Expand Down

0 comments on commit 06e9e6a

Please sign in to comment.