summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorZhang, Yanmin <yanmin.zhang@intel.com>2006-07-10 04:44:49 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-10 13:24:21 -0700
commitb6174df5eec9cdfd598c03d6d0807e344e109213 (patch)
treed61c8627138a8feee31de8320e337251d567fca9 /fs
parentd6b7d3b62069be60d5b13358bac8670dacdd7a81 (diff)
downloadkernel-crypto-b6174df5eec9cdfd598c03d6d0807e344e109213.tar.gz
kernel-crypto-b6174df5eec9cdfd598c03d6d0807e344e109213.tar.xz
kernel-crypto-b6174df5eec9cdfd598c03d6d0807e344e109213.zip
[PATCH] mmap zero-length hugetlb file with PROT_NONE to protect a hugetlb virtual area
Sometimes, applications need below call to be successful although "/mnt/hugepages/file1" doesn't exist. fd = open("/mnt/hugepages/file1", O_CREAT|O_RDWR, 0755); *addr = mmap(NULL, 0x1024*1024*256, PROT_NONE, 0, fd, 0); As for regular pages (or files), above call does work, but as for huge pages, above call would fail because hugetlbfs_file_mmap would fail if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size). This capability on huge page is useful on ia64 when the process wants to protect one area on region 4, so other threads couldn't read/write this area. A famous JVM (Java Virtual Machine) implementation on IA64 needs the capability. Signed-off-by: Zhang Yanmin <yanmin.zhang@intel.com> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: Hugh Dickins <hugh@veritas.com> [ Expand-on-mmap semantics again... this time matching normal fs's. wli ] Acked-by: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/hugetlbfs/inode.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 6449cb69796..c3920c96dad 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -83,8 +83,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
ret = -ENOMEM;
len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size)
- goto out;
if (vma->vm_flags & VM_MAYSHARE &&
hugetlb_reserve_pages(inode, vma->vm_pgoff >> (HPAGE_SHIFT-PAGE_SHIFT),
@@ -93,7 +91,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
ret = 0;
hugetlb_prefault_arch_hook(vma->vm_mm);
- if (inode->i_size < len)
+ if (vma->vm_flags & VM_WRITE && inode->i_size < len)
inode->i_size = len;
out:
mutex_unlock(&inode->i_mutex);