vm_area_operations: kill ->migrate()
the only instance this method has ever grown was one in kernfs - one that call ->migrate() of another vm_ops if it exists. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
@@ -448,27 +448,6 @@ static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
|
|||||||
return pol;
|
return pol;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kernfs_vma_migrate(struct vm_area_struct *vma,
|
|
||||||
const nodemask_t *from, const nodemask_t *to,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
struct file *file = vma->vm_file;
|
|
||||||
struct kernfs_open_file *of = kernfs_of(file);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!of->vm_ops)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!kernfs_get_active(of->kn))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = 0;
|
|
||||||
if (of->vm_ops->migrate)
|
|
||||||
ret = of->vm_ops->migrate(vma, from, to, flags);
|
|
||||||
|
|
||||||
kernfs_put_active(of->kn);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct vm_operations_struct kernfs_vm_ops = {
|
static const struct vm_operations_struct kernfs_vm_ops = {
|
||||||
@@ -479,7 +458,6 @@ static const struct vm_operations_struct kernfs_vm_ops = {
|
|||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
.set_policy = kernfs_vma_set_policy,
|
.set_policy = kernfs_vma_set_policy,
|
||||||
.get_policy = kernfs_vma_get_policy,
|
.get_policy = kernfs_vma_get_policy,
|
||||||
.migrate = kernfs_vma_migrate,
|
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -36,9 +36,6 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
|
|||||||
|
|
||||||
extern int migrate_prep(void);
|
extern int migrate_prep(void);
|
||||||
extern int migrate_prep_local(void);
|
extern int migrate_prep_local(void);
|
||||||
extern int migrate_vmas(struct mm_struct *mm,
|
|
||||||
const nodemask_t *from, const nodemask_t *to,
|
|
||||||
unsigned long flags);
|
|
||||||
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
||||||
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||||
struct page *newpage, struct page *page);
|
struct page *newpage, struct page *page);
|
||||||
@@ -57,13 +54,6 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
|
|||||||
static inline int migrate_prep(void) { return -ENOSYS; }
|
static inline int migrate_prep(void) { return -ENOSYS; }
|
||||||
static inline int migrate_prep_local(void) { return -ENOSYS; }
|
static inline int migrate_prep_local(void) { return -ENOSYS; }
|
||||||
|
|
||||||
static inline int migrate_vmas(struct mm_struct *mm,
|
|
||||||
const nodemask_t *from, const nodemask_t *to,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
return -ENOSYS;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void migrate_page_copy(struct page *newpage,
|
static inline void migrate_page_copy(struct page *newpage,
|
||||||
struct page *page) {}
|
struct page *page) {}
|
||||||
|
|
||||||
|
@@ -286,8 +286,6 @@ struct vm_operations_struct {
|
|||||||
*/
|
*/
|
||||||
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
|
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
|
||||||
unsigned long addr);
|
unsigned long addr);
|
||||||
int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
|
|
||||||
const nodemask_t *to, unsigned long flags);
|
|
||||||
#endif
|
#endif
|
||||||
/* called by sys_remap_file_pages() to populate non-linear mapping */
|
/* called by sys_remap_file_pages() to populate non-linear mapping */
|
||||||
int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
|
int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
@@ -1047,10 +1047,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
|||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
|
|
||||||
err = migrate_vmas(mm, from, to, flags);
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
|
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
|
||||||
* bit in 'to' is not also set in 'tmp'. Clear the found 'source'
|
* bit in 'to' is not also set in 'tmp'. Clear the found 'source'
|
||||||
@@ -1130,7 +1126,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
out:
|
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
21
mm/migrate.c
21
mm/migrate.c
@@ -1536,27 +1536,6 @@ out:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Call migration functions in the vma_ops that may prepare
|
|
||||||
* memory in a vm for migration. migration functions may perform
|
|
||||||
* the migration for vmas that do not have an underlying page struct.
|
|
||||||
*/
|
|
||||||
int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
|
|
||||||
const nodemask_t *from, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct vm_area_struct *vma;
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
|
|
||||||
if (vma->vm_ops && vma->vm_ops->migrate) {
|
|
||||||
err = vma->vm_ops->migrate(vma, to, from, flags);
|
|
||||||
if (err)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
/*
|
/*
|
||||||
* Returns true if this is a safe migration target node for misplaced NUMA
|
* Returns true if this is a safe migration target node for misplaced NUMA
|
||||||
|
Reference in New Issue
Block a user