mm: clean up and kernelify shrinker registration
I can never remember what the function to register to receive VM pressure is called. I have to trace down from __alloc_pages() to find it. It's called "set_shrinker()", and it needs Your Help. 1) Don't hide struct shrinker. It contains no magic. 2) Don't allocate "struct shrinker". It's not helpful. 3) Call them "register_shrinker" and "unregister_shrinker". 4) Call the function "shrink" not "shrinker". 5) Reduce the 17 lines of waffly comments to 13, but document it properly. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: David Chinner <dgc@sgi.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
5ad333eb66
commit
8e1f936b73
@@ -883,6 +883,11 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
|
|||||||
return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
|
return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct shrinker dcache_shrinker = {
|
||||||
|
.shrink = shrink_dcache_memory,
|
||||||
|
.seeks = DEFAULT_SEEKS,
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* d_alloc - allocate a dcache entry
|
* d_alloc - allocate a dcache entry
|
||||||
* @parent: parent of entry to allocate
|
* @parent: parent of entry to allocate
|
||||||
@@ -2115,7 +2120,7 @@ static void __init dcache_init(unsigned long mempages)
|
|||||||
dentry_cache = KMEM_CACHE(dentry,
|
dentry_cache = KMEM_CACHE(dentry,
|
||||||
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
|
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
|
||||||
|
|
||||||
set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
|
register_shrinker(&dcache_shrinker);
|
||||||
|
|
||||||
/* Hash may have been set up in dcache_init_early */
|
/* Hash may have been set up in dcache_init_early */
|
||||||
if (!hashdist)
|
if (!hashdist)
|
||||||
|
@@ -538,6 +538,11 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
|
|||||||
return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
|
return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct shrinker dqcache_shrinker = {
|
||||||
|
.shrink = shrink_dqcache_memory,
|
||||||
|
.seeks = DEFAULT_SEEKS,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Put reference to dquot
|
* Put reference to dquot
|
||||||
* NOTE: If you change this function please check whether dqput_blocks() works right...
|
* NOTE: If you change this function please check whether dqput_blocks() works right...
|
||||||
@@ -1870,7 +1875,7 @@ static int __init dquot_init(void)
|
|||||||
printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
|
printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
|
||||||
nr_hash, order, (PAGE_SIZE << order));
|
nr_hash, order, (PAGE_SIZE << order));
|
||||||
|
|
||||||
set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
|
register_shrinker(&dqcache_shrinker);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -462,6 +462,11 @@ static int shrink_icache_memory(int nr, gfp_t gfp_mask)
|
|||||||
return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
|
return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct shrinker icache_shrinker = {
|
||||||
|
.shrink = shrink_icache_memory,
|
||||||
|
.seeks = DEFAULT_SEEKS,
|
||||||
|
};
|
||||||
|
|
||||||
static void __wait_on_freeing_inode(struct inode *inode);
|
static void __wait_on_freeing_inode(struct inode *inode);
|
||||||
/*
|
/*
|
||||||
* Called with the inode lock held.
|
* Called with the inode lock held.
|
||||||
@@ -1385,7 +1390,7 @@ void __init inode_init(unsigned long mempages)
|
|||||||
SLAB_MEM_SPREAD),
|
SLAB_MEM_SPREAD),
|
||||||
init_once,
|
init_once,
|
||||||
NULL);
|
NULL);
|
||||||
set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
|
register_shrinker(&icache_shrinker);
|
||||||
|
|
||||||
/* Hash may have been set up in inode_init_early */
|
/* Hash may have been set up in inode_init_early */
|
||||||
if (!hashdist)
|
if (!hashdist)
|
||||||
|
@@ -100,7 +100,6 @@ struct mb_cache {
|
|||||||
static LIST_HEAD(mb_cache_list);
|
static LIST_HEAD(mb_cache_list);
|
||||||
static LIST_HEAD(mb_cache_lru_list);
|
static LIST_HEAD(mb_cache_lru_list);
|
||||||
static DEFINE_SPINLOCK(mb_cache_spinlock);
|
static DEFINE_SPINLOCK(mb_cache_spinlock);
|
||||||
static struct shrinker *mb_shrinker;
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
mb_cache_indexes(struct mb_cache *cache)
|
mb_cache_indexes(struct mb_cache *cache)
|
||||||
@@ -118,6 +117,10 @@ mb_cache_indexes(struct mb_cache *cache)
|
|||||||
|
|
||||||
static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
|
static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
|
||||||
|
|
||||||
|
static struct shrinker mb_cache_shrinker = {
|
||||||
|
.shrink = mb_cache_shrink_fn,
|
||||||
|
.seeks = DEFAULT_SEEKS,
|
||||||
|
};
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
|
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
|
||||||
@@ -662,13 +665,13 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
|
|||||||
|
|
||||||
static int __init init_mbcache(void)
|
static int __init init_mbcache(void)
|
||||||
{
|
{
|
||||||
mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
|
register_shrinker(&mb_cache_shrinker);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit exit_mbcache(void)
|
static void __exit exit_mbcache(void)
|
||||||
{
|
{
|
||||||
remove_shrinker(mb_shrinker);
|
unregister_shrinker(&mb_cache_shrinker);
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(init_mbcache)
|
module_init(init_mbcache)
|
||||||
|
@@ -300,7 +300,10 @@ static const struct super_operations nfs4_sops = {
|
|||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct shrinker *acl_shrinker;
|
static struct shrinker acl_shrinker = {
|
||||||
|
.shrink = nfs_access_cache_shrinker,
|
||||||
|
.seeks = DEFAULT_SEEKS,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register the NFS filesystems
|
* Register the NFS filesystems
|
||||||
@@ -321,7 +324,7 @@ int __init register_nfs_fs(void)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error_2;
|
goto error_2;
|
||||||
#endif
|
#endif
|
||||||
acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
|
register_shrinker(&acl_shrinker);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
#ifdef CONFIG_NFS_V4
|
#ifdef CONFIG_NFS_V4
|
||||||
@@ -339,8 +342,7 @@ error_0:
|
|||||||
*/
|
*/
|
||||||
void __exit unregister_nfs_fs(void)
|
void __exit unregister_nfs_fs(void)
|
||||||
{
|
{
|
||||||
if (acl_shrinker != NULL)
|
unregister_shrinker(&acl_shrinker);
|
||||||
remove_shrinker(acl_shrinker);
|
|
||||||
#ifdef CONFIG_NFS_V4
|
#ifdef CONFIG_NFS_V4
|
||||||
unregister_filesystem(&nfs4_fs_type);
|
unregister_filesystem(&nfs4_fs_type);
|
||||||
nfs_unregister_sysctl();
|
nfs_unregister_sysctl();
|
||||||
|
@@ -35,10 +35,13 @@
|
|||||||
#include <linux/freezer.h>
|
#include <linux/freezer.h>
|
||||||
|
|
||||||
static kmem_zone_t *xfs_buf_zone;
|
static kmem_zone_t *xfs_buf_zone;
|
||||||
static struct shrinker *xfs_buf_shake;
|
|
||||||
STATIC int xfsbufd(void *);
|
STATIC int xfsbufd(void *);
|
||||||
STATIC int xfsbufd_wakeup(int, gfp_t);
|
STATIC int xfsbufd_wakeup(int, gfp_t);
|
||||||
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
|
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
|
||||||
|
static struct shrinker xfs_buf_shake = {
|
||||||
|
.shrink = xfsbufd_wakeup,
|
||||||
|
.seeks = DEFAULT_SEEKS,
|
||||||
|
};
|
||||||
|
|
||||||
static struct workqueue_struct *xfslogd_workqueue;
|
static struct workqueue_struct *xfslogd_workqueue;
|
||||||
struct workqueue_struct *xfsdatad_workqueue;
|
struct workqueue_struct *xfsdatad_workqueue;
|
||||||
@@ -1832,14 +1835,9 @@ xfs_buf_init(void)
|
|||||||
if (!xfsdatad_workqueue)
|
if (!xfsdatad_workqueue)
|
||||||
goto out_destroy_xfslogd_workqueue;
|
goto out_destroy_xfslogd_workqueue;
|
||||||
|
|
||||||
xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup);
|
register_shrinker(&xfs_buf_shake);
|
||||||
if (!xfs_buf_shake)
|
|
||||||
goto out_destroy_xfsdatad_workqueue;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_destroy_xfsdatad_workqueue:
|
|
||||||
destroy_workqueue(xfsdatad_workqueue);
|
|
||||||
out_destroy_xfslogd_workqueue:
|
out_destroy_xfslogd_workqueue:
|
||||||
destroy_workqueue(xfslogd_workqueue);
|
destroy_workqueue(xfslogd_workqueue);
|
||||||
out_free_buf_zone:
|
out_free_buf_zone:
|
||||||
@@ -1854,7 +1852,7 @@ xfs_buf_init(void)
|
|||||||
void
|
void
|
||||||
xfs_buf_terminate(void)
|
xfs_buf_terminate(void)
|
||||||
{
|
{
|
||||||
remove_shrinker(xfs_buf_shake);
|
unregister_shrinker(&xfs_buf_shake);
|
||||||
destroy_workqueue(xfsdatad_workqueue);
|
destroy_workqueue(xfsdatad_workqueue);
|
||||||
destroy_workqueue(xfslogd_workqueue);
|
destroy_workqueue(xfslogd_workqueue);
|
||||||
kmem_zone_destroy(xfs_buf_zone);
|
kmem_zone_destroy(xfs_buf_zone);
|
||||||
|
@@ -62,7 +62,6 @@ uint ndquot;
|
|||||||
|
|
||||||
kmem_zone_t *qm_dqzone;
|
kmem_zone_t *qm_dqzone;
|
||||||
kmem_zone_t *qm_dqtrxzone;
|
kmem_zone_t *qm_dqtrxzone;
|
||||||
static struct shrinker *xfs_qm_shaker;
|
|
||||||
|
|
||||||
static cred_t xfs_zerocr;
|
static cred_t xfs_zerocr;
|
||||||
|
|
||||||
@@ -78,6 +77,11 @@ STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
|
|||||||
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
|
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
|
||||||
STATIC int xfs_qm_shake(int, gfp_t);
|
STATIC int xfs_qm_shake(int, gfp_t);
|
||||||
|
|
||||||
|
static struct shrinker xfs_qm_shaker = {
|
||||||
|
.shrink = xfs_qm_shake,
|
||||||
|
.seeks = DEFAULT_SEEKS,
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
extern mutex_t qcheck_lock;
|
extern mutex_t qcheck_lock;
|
||||||
#endif
|
#endif
|
||||||
@@ -149,7 +153,7 @@ xfs_Gqm_init(void)
|
|||||||
} else
|
} else
|
||||||
xqm->qm_dqzone = qm_dqzone;
|
xqm->qm_dqzone = qm_dqzone;
|
||||||
|
|
||||||
xfs_qm_shaker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake);
|
register_shrinker(&xfs_qm_shaker);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The t_dqinfo portion of transactions.
|
* The t_dqinfo portion of transactions.
|
||||||
@@ -181,7 +185,7 @@ xfs_qm_destroy(
|
|||||||
|
|
||||||
ASSERT(xqm != NULL);
|
ASSERT(xqm != NULL);
|
||||||
ASSERT(xqm->qm_nrefs == 0);
|
ASSERT(xqm->qm_nrefs == 0);
|
||||||
remove_shrinker(xfs_qm_shaker);
|
unregister_shrinker(&xfs_qm_shaker);
|
||||||
hsize = xqm->qm_dqhashmask + 1;
|
hsize = xqm->qm_dqhashmask + 1;
|
||||||
for (i = 0; i < hsize; i++) {
|
for (i = 0; i < hsize; i++) {
|
||||||
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
|
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
|
||||||
|
@@ -810,27 +810,31 @@ extern unsigned long do_mremap(unsigned long addr,
|
|||||||
unsigned long flags, unsigned long new_addr);
|
unsigned long flags, unsigned long new_addr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prototype to add a shrinker callback for ageable caches.
|
* A callback you can register to apply pressure to ageable caches.
|
||||||
*
|
|
||||||
* These functions are passed a count `nr_to_scan' and a gfpmask. They should
|
|
||||||
* scan `nr_to_scan' objects, attempting to free them.
|
|
||||||
*
|
*
|
||||||
* The callback must return the number of objects which remain in the cache.
|
* 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
|
||||||
|
* look through the least-recently-used 'nr_to_scan' entries and
|
||||||
|
* attempt to free them up. It should return the number of objects
|
||||||
|
* which remain in the cache. If it returns -1, it means it cannot do
|
||||||
|
* any scanning at this time (eg. there is a risk of deadlock).
|
||||||
*
|
*
|
||||||
* The callback will be passed nr_to_scan == 0 when the VM is querying the
|
* The 'gfpmask' refers to the allocation we are currently trying to
|
||||||
* cache size, so a fastpath for that case is appropriate.
|
* fulfil.
|
||||||
|
*
|
||||||
|
* Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
|
||||||
|
* querying the cache size, so a fastpath for that case is appropriate.
|
||||||
*/
|
*/
|
||||||
typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
|
struct shrinker {
|
||||||
|
int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
|
||||||
|
int seeks; /* seeks to recreate an obj */
|
||||||
|
|
||||||
/*
|
/* These are for internal use */
|
||||||
* Add an aging callback. The int is the number of 'seeks' it takes
|
struct list_head list;
|
||||||
* to recreate one of the objects that these functions age.
|
long nr; /* objs pending delete */
|
||||||
*/
|
};
|
||||||
|
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
|
||||||
#define DEFAULT_SEEKS 2
|
extern void register_shrinker(struct shrinker *);
|
||||||
struct shrinker;
|
extern void unregister_shrinker(struct shrinker *);
|
||||||
extern struct shrinker *set_shrinker(int, shrinker_t);
|
|
||||||
extern void remove_shrinker(struct shrinker *shrinker);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some shared mappigns will want the pages marked read-only
|
* Some shared mappigns will want the pages marked read-only
|
||||||
|
42
mm/vmscan.c
42
mm/vmscan.c
@@ -70,17 +70,6 @@ struct scan_control {
|
|||||||
int order;
|
int order;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* The list of shrinker callbacks used by to apply pressure to
|
|
||||||
* ageable caches.
|
|
||||||
*/
|
|
||||||
struct shrinker {
|
|
||||||
shrinker_t shrinker;
|
|
||||||
struct list_head list;
|
|
||||||
int seeks; /* seeks to recreate an obj */
|
|
||||||
long nr; /* objs pending delete */
|
|
||||||
};
|
|
||||||
|
|
||||||
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
|
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
|
||||||
|
|
||||||
#ifdef ARCH_HAS_PREFETCH
|
#ifdef ARCH_HAS_PREFETCH
|
||||||
@@ -123,34 +112,25 @@ static DECLARE_RWSEM(shrinker_rwsem);
|
|||||||
/*
|
/*
|
||||||
* Add a shrinker callback to be called from the vm
|
* Add a shrinker callback to be called from the vm
|
||||||
*/
|
*/
|
||||||
struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
|
void register_shrinker(struct shrinker *shrinker)
|
||||||
{
|
{
|
||||||
struct shrinker *shrinker;
|
shrinker->nr = 0;
|
||||||
|
down_write(&shrinker_rwsem);
|
||||||
shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
|
list_add_tail(&shrinker->list, &shrinker_list);
|
||||||
if (shrinker) {
|
up_write(&shrinker_rwsem);
|
||||||
shrinker->shrinker = theshrinker;
|
|
||||||
shrinker->seeks = seeks;
|
|
||||||
shrinker->nr = 0;
|
|
||||||
down_write(&shrinker_rwsem);
|
|
||||||
list_add_tail(&shrinker->list, &shrinker_list);
|
|
||||||
up_write(&shrinker_rwsem);
|
|
||||||
}
|
|
||||||
return shrinker;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(set_shrinker);
|
EXPORT_SYMBOL(register_shrinker);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove one
|
* Remove one
|
||||||
*/
|
*/
|
||||||
void remove_shrinker(struct shrinker *shrinker)
|
void unregister_shrinker(struct shrinker *shrinker)
|
||||||
{
|
{
|
||||||
down_write(&shrinker_rwsem);
|
down_write(&shrinker_rwsem);
|
||||||
list_del(&shrinker->list);
|
list_del(&shrinker->list);
|
||||||
up_write(&shrinker_rwsem);
|
up_write(&shrinker_rwsem);
|
||||||
kfree(shrinker);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(remove_shrinker);
|
EXPORT_SYMBOL(unregister_shrinker);
|
||||||
|
|
||||||
#define SHRINK_BATCH 128
|
#define SHRINK_BATCH 128
|
||||||
/*
|
/*
|
||||||
@@ -187,7 +167,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
|
|||||||
list_for_each_entry(shrinker, &shrinker_list, list) {
|
list_for_each_entry(shrinker, &shrinker_list, list) {
|
||||||
unsigned long long delta;
|
unsigned long long delta;
|
||||||
unsigned long total_scan;
|
unsigned long total_scan;
|
||||||
unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
|
unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
|
||||||
|
|
||||||
delta = (4 * scanned) / shrinker->seeks;
|
delta = (4 * scanned) / shrinker->seeks;
|
||||||
delta *= max_pass;
|
delta *= max_pass;
|
||||||
@@ -215,8 +195,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
|
|||||||
int shrink_ret;
|
int shrink_ret;
|
||||||
int nr_before;
|
int nr_before;
|
||||||
|
|
||||||
nr_before = (*shrinker->shrinker)(0, gfp_mask);
|
nr_before = (*shrinker->shrink)(0, gfp_mask);
|
||||||
shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
|
shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
|
||||||
if (shrink_ret == -1)
|
if (shrink_ret == -1)
|
||||||
break;
|
break;
|
||||||
if (shrink_ret < nr_before)
|
if (shrink_ret < nr_before)
|
||||||
|
@@ -543,17 +543,18 @@ rpcauth_uptodatecred(struct rpc_task *task)
|
|||||||
test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
|
test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct shrinker rpc_cred_shrinker = {
|
||||||
static struct shrinker *rpc_cred_shrinker;
|
.shrink = rpcauth_cache_shrinker,
|
||||||
|
.seeks = DEFAULT_SEEKS,
|
||||||
|
};
|
||||||
|
|
||||||
void __init rpcauth_init_module(void)
|
void __init rpcauth_init_module(void)
|
||||||
{
|
{
|
||||||
rpc_init_authunix();
|
rpc_init_authunix();
|
||||||
rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
|
register_shrinker(&rpc_cred_shrinker);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __exit rpcauth_remove_module(void)
|
void __exit rpcauth_remove_module(void)
|
||||||
{
|
{
|
||||||
if (rpc_cred_shrinker != NULL)
|
unregister_shrinker(&rpc_cred_shrinker);
|
||||||
remove_shrinker(rpc_cred_shrinker);
|
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user