truncate: new helpers
Introduce new truncate helpers truncate_pagecache and inode_newsize_ok. vmtruncate is also consolidated from mm/memory.c and mm/nommu.c and into mm/truncate.c. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
62
mm/memory.c
62
mm/memory.c
@ -297,7 +297,8 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
unsigned long addr = vma->vm_start;
|
||||
|
||||
/*
|
||||
* Hide vma from rmap and vmtruncate before freeing pgtables
|
||||
* Hide vma from rmap and truncate_pagecache before freeing
|
||||
* pgtables
|
||||
*/
|
||||
anon_vma_unlink(vma);
|
||||
unlink_file_vma(vma);
|
||||
@ -2407,7 +2408,7 @@ restart:
|
||||
* @mapping: the address space containing mmaps to be unmapped.
|
||||
* @holebegin: byte in first page to unmap, relative to the start of
|
||||
* the underlying file. This will be rounded down to a PAGE_SIZE
|
||||
* boundary. Note that this is different from vmtruncate(), which
|
||||
* boundary. Note that this is different from truncate_pagecache(), which
|
||||
* must keep the partial page. In contrast, we must get rid of
|
||||
* partial pages.
|
||||
* @holelen: size of prospective hole in bytes. This will be rounded
|
||||
@ -2458,63 +2459,6 @@ void unmap_mapping_range(struct address_space *mapping,
|
||||
}
|
||||
EXPORT_SYMBOL(unmap_mapping_range);
|
||||
|
||||
/**
|
||||
* vmtruncate - unmap mappings "freed" by truncate() syscall
|
||||
* @inode: inode of the file used
|
||||
* @offset: file offset to start truncating
|
||||
*
|
||||
* NOTE! We have to be ready to update the memory sharing
|
||||
* between the file and the memory map for a potential last
|
||||
* incomplete page. Ugly, but necessary.
|
||||
*/
|
||||
int vmtruncate(struct inode * inode, loff_t offset)
|
||||
{
|
||||
if (inode->i_size < offset) {
|
||||
unsigned long limit;
|
||||
|
||||
limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
|
||||
if (limit != RLIM_INFINITY && offset > limit)
|
||||
goto out_sig;
|
||||
if (offset > inode->i_sb->s_maxbytes)
|
||||
goto out_big;
|
||||
i_size_write(inode, offset);
|
||||
} else {
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
|
||||
/*
|
||||
* truncation of in-use swapfiles is disallowed - it would
|
||||
* cause subsequent swapout to scribble on the now-freed
|
||||
* blocks.
|
||||
*/
|
||||
if (IS_SWAPFILE(inode))
|
||||
return -ETXTBSY;
|
||||
i_size_write(inode, offset);
|
||||
|
||||
/*
|
||||
* unmap_mapping_range is called twice, first simply for
|
||||
* efficiency so that truncate_inode_pages does fewer
|
||||
* single-page unmaps. However after this first call, and
|
||||
* before truncate_inode_pages finishes, it is possible for
|
||||
* private pages to be COWed, which remain after
|
||||
* truncate_inode_pages finishes, hence the second
|
||||
* unmap_mapping_range call must be made for correctness.
|
||||
*/
|
||||
unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
|
||||
truncate_inode_pages(mapping, offset);
|
||||
unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
|
||||
}
|
||||
|
||||
if (inode->i_op->truncate)
|
||||
inode->i_op->truncate(inode);
|
||||
return 0;
|
||||
|
||||
out_sig:
|
||||
send_sig(SIGXFSZ, current, 0);
|
||||
out_big:
|
||||
return -EFBIG;
|
||||
}
|
||||
EXPORT_SYMBOL(vmtruncate);
|
||||
|
||||
int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
|
Reference in New Issue
Block a user