NFS: Add a global LRU list for the ACCESS cache
...in order to allow the addition of a memory shrinker. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
35
fs/nfs/dir.c
35
fs/nfs/dir.c
@@ -1638,10 +1638,17 @@ out:
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static DEFINE_SPINLOCK(nfs_access_lru_lock);
|
||||||
|
static LIST_HEAD(nfs_access_lru_list);
|
||||||
|
static atomic_long_t nfs_access_nr_entries;
|
||||||
|
|
||||||
static void nfs_access_free_entry(struct nfs_access_entry *entry)
|
static void nfs_access_free_entry(struct nfs_access_entry *entry)
|
||||||
{
|
{
|
||||||
put_rpccred(entry->cred);
|
put_rpccred(entry->cred);
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
|
smp_mb__before_atomic_dec();
|
||||||
|
atomic_long_dec(&nfs_access_nr_entries);
|
||||||
|
smp_mb__after_atomic_dec();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __nfs_access_zap_cache(struct inode *inode)
|
static void __nfs_access_zap_cache(struct inode *inode)
|
||||||
@@ -1655,6 +1662,7 @@ static void __nfs_access_zap_cache(struct inode *inode)
|
|||||||
while ((n = rb_first(root_node)) != NULL) {
|
while ((n = rb_first(root_node)) != NULL) {
|
||||||
entry = rb_entry(n, struct nfs_access_entry, rb_node);
|
entry = rb_entry(n, struct nfs_access_entry, rb_node);
|
||||||
rb_erase(n, root_node);
|
rb_erase(n, root_node);
|
||||||
|
list_del(&entry->lru);
|
||||||
n->rb_left = dispose;
|
n->rb_left = dispose;
|
||||||
dispose = n;
|
dispose = n;
|
||||||
}
|
}
|
||||||
@@ -1671,6 +1679,13 @@ static void __nfs_access_zap_cache(struct inode *inode)
|
|||||||
|
|
||||||
void nfs_access_zap_cache(struct inode *inode)
|
void nfs_access_zap_cache(struct inode *inode)
|
||||||
{
|
{
|
||||||
|
/* Remove from global LRU init */
|
||||||
|
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
|
||||||
|
spin_lock(&nfs_access_lru_lock);
|
||||||
|
list_del_init(&NFS_I(inode)->access_cache_inode_lru);
|
||||||
|
spin_unlock(&nfs_access_lru_lock);
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
/* This will release the spinlock */
|
/* This will release the spinlock */
|
||||||
__nfs_access_zap_cache(inode);
|
__nfs_access_zap_cache(inode);
|
||||||
@@ -1711,12 +1726,14 @@ int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs
|
|||||||
res->jiffies = cache->jiffies;
|
res->jiffies = cache->jiffies;
|
||||||
res->cred = cache->cred;
|
res->cred = cache->cred;
|
||||||
res->mask = cache->mask;
|
res->mask = cache->mask;
|
||||||
|
list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
|
||||||
err = 0;
|
err = 0;
|
||||||
out:
|
out:
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return err;
|
return err;
|
||||||
out_stale:
|
out_stale:
|
||||||
rb_erase(&cache->rb_node, &nfsi->access_cache);
|
rb_erase(&cache->rb_node, &nfsi->access_cache);
|
||||||
|
list_del(&cache->lru);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
nfs_access_free_entry(cache);
|
nfs_access_free_entry(cache);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
@@ -1728,7 +1745,8 @@ out_zap:
|
|||||||
|
|
||||||
static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
|
static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
|
||||||
{
|
{
|
||||||
struct rb_root *root_node = &NFS_I(inode)->access_cache;
|
struct nfs_inode *nfsi = NFS_I(inode);
|
||||||
|
struct rb_root *root_node = &nfsi->access_cache;
|
||||||
struct rb_node **p = &root_node->rb_node;
|
struct rb_node **p = &root_node->rb_node;
|
||||||
struct rb_node *parent = NULL;
|
struct rb_node *parent = NULL;
|
||||||
struct nfs_access_entry *entry;
|
struct nfs_access_entry *entry;
|
||||||
@@ -1747,10 +1765,13 @@ static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *
|
|||||||
}
|
}
|
||||||
rb_link_node(&set->rb_node, parent, p);
|
rb_link_node(&set->rb_node, parent, p);
|
||||||
rb_insert_color(&set->rb_node, root_node);
|
rb_insert_color(&set->rb_node, root_node);
|
||||||
|
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return;
|
return;
|
||||||
found:
|
found:
|
||||||
rb_replace_node(parent, &set->rb_node, root_node);
|
rb_replace_node(parent, &set->rb_node, root_node);
|
||||||
|
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
|
||||||
|
list_del(&entry->lru);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
nfs_access_free_entry(entry);
|
nfs_access_free_entry(entry);
|
||||||
}
|
}
|
||||||
@@ -1766,6 +1787,18 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
|
|||||||
cache->mask = set->mask;
|
cache->mask = set->mask;
|
||||||
|
|
||||||
nfs_access_add_rbtree(inode, cache);
|
nfs_access_add_rbtree(inode, cache);
|
||||||
|
|
||||||
|
/* Update accounting */
|
||||||
|
smp_mb__before_atomic_inc();
|
||||||
|
atomic_long_inc(&nfs_access_nr_entries);
|
||||||
|
smp_mb__after_atomic_inc();
|
||||||
|
|
||||||
|
/* Add inode to global LRU list */
|
||||||
|
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
|
||||||
|
spin_lock(&nfs_access_lru_lock);
|
||||||
|
list_add_tail(&NFS_I(inode)->access_cache_inode_lru, &nfs_access_lru_list);
|
||||||
|
spin_unlock(&nfs_access_lru_lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
|
static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
|
||||||
|
@@ -1104,6 +1104,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
|
|||||||
INIT_LIST_HEAD(&nfsi->dirty);
|
INIT_LIST_HEAD(&nfsi->dirty);
|
||||||
INIT_LIST_HEAD(&nfsi->commit);
|
INIT_LIST_HEAD(&nfsi->commit);
|
||||||
INIT_LIST_HEAD(&nfsi->open_files);
|
INIT_LIST_HEAD(&nfsi->open_files);
|
||||||
|
INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
|
||||||
|
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
|
||||||
INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
|
INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
|
||||||
atomic_set(&nfsi->data_updates, 0);
|
atomic_set(&nfsi->data_updates, 0);
|
||||||
nfsi->ndirty = 0;
|
nfsi->ndirty = 0;
|
||||||
|
@@ -71,6 +71,7 @@
|
|||||||
*/
|
*/
|
||||||
struct nfs_access_entry {
|
struct nfs_access_entry {
|
||||||
struct rb_node rb_node;
|
struct rb_node rb_node;
|
||||||
|
struct list_head lru;
|
||||||
unsigned long jiffies;
|
unsigned long jiffies;
|
||||||
struct rpc_cred * cred;
|
struct rpc_cred * cred;
|
||||||
int mask;
|
int mask;
|
||||||
@@ -148,6 +149,8 @@ struct nfs_inode {
|
|||||||
atomic_t data_updates;
|
atomic_t data_updates;
|
||||||
|
|
||||||
struct rb_root access_cache;
|
struct rb_root access_cache;
|
||||||
|
struct list_head access_cache_entry_lru;
|
||||||
|
struct list_head access_cache_inode_lru;
|
||||||
#ifdef CONFIG_NFS_V3_ACL
|
#ifdef CONFIG_NFS_V3_ACL
|
||||||
struct posix_acl *acl_access;
|
struct posix_acl *acl_access;
|
||||||
struct posix_acl *acl_default;
|
struct posix_acl *acl_default;
|
||||||
@@ -201,6 +204,7 @@ struct nfs_inode {
|
|||||||
#define NFS_INO_REVALIDATING (0) /* revalidating attrs */
|
#define NFS_INO_REVALIDATING (0) /* revalidating attrs */
|
||||||
#define NFS_INO_ADVISE_RDPLUS (1) /* advise readdirplus */
|
#define NFS_INO_ADVISE_RDPLUS (1) /* advise readdirplus */
|
||||||
#define NFS_INO_STALE (2) /* possible stale inode */
|
#define NFS_INO_STALE (2) /* possible stale inode */
|
||||||
|
#define NFS_INO_ACL_LRU_SET (3) /* Inode is on the LRU list */
|
||||||
|
|
||||||
static inline struct nfs_inode *NFS_I(struct inode *inode)
|
static inline struct nfs_inode *NFS_I(struct inode *inode)
|
||||||
{
|
{
|
||||||
|
Reference in New Issue
Block a user