[PATCH] ufs: ufs_trunc_indirect: infinite cycle
Currently, ufs write support have two sets of problems: work with files and work with directories. This series of patches should solve the first problem. This patch is similar to http://lkml.org/lkml/2006/1/17/61 this patch complements it. The situation the same: in ufs_trunc_(not direct), we read block, check if count of links to it is equal to one, if so we finish cycle, if not continue. Because of "count of links" always >=2 this operation cause infinite cycle and hang up the kernel. Signed-off-by: Evgeniy Dushistov <dushistov@mail.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
a9adb8dbcd
commit
2061df0f89
@@ -238,10 +238,6 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
|
|||||||
if (*ubh_get_addr32(ind_ubh,i))
|
if (*ubh_get_addr32(ind_ubh,i))
|
||||||
break;
|
break;
|
||||||
if (i >= uspi->s_apb) {
|
if (i >= uspi->s_apb) {
|
||||||
if (ubh_max_bcount(ind_ubh) != 1) {
|
|
||||||
retry = 1;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
tmp = fs32_to_cpu(sb, *p);
|
tmp = fs32_to_cpu(sb, *p);
|
||||||
*p = 0;
|
*p = 0;
|
||||||
inode->i_blocks -= uspi->s_nspb;
|
inode->i_blocks -= uspi->s_nspb;
|
||||||
@@ -250,7 +246,6 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
|
|||||||
ubh_bforget(ind_ubh);
|
ubh_bforget(ind_ubh);
|
||||||
ind_ubh = NULL;
|
ind_ubh = NULL;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
|
if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
|
||||||
ubh_ll_rw_block (SWRITE, 1, &ind_ubh);
|
ubh_ll_rw_block (SWRITE, 1, &ind_ubh);
|
||||||
ubh_wait_on_buffer (ind_ubh);
|
ubh_wait_on_buffer (ind_ubh);
|
||||||
@@ -306,9 +301,6 @@ static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p)
|
|||||||
if (*ubh_get_addr32 (dind_bh, i))
|
if (*ubh_get_addr32 (dind_bh, i))
|
||||||
break;
|
break;
|
||||||
if (i >= uspi->s_apb) {
|
if (i >= uspi->s_apb) {
|
||||||
if (ubh_max_bcount(dind_bh) != 1)
|
|
||||||
retry = 1;
|
|
||||||
else {
|
|
||||||
tmp = fs32_to_cpu(sb, *p);
|
tmp = fs32_to_cpu(sb, *p);
|
||||||
*p = 0;
|
*p = 0;
|
||||||
inode->i_blocks -= uspi->s_nspb;
|
inode->i_blocks -= uspi->s_nspb;
|
||||||
@@ -317,7 +309,6 @@ static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p)
|
|||||||
ubh_bforget(dind_bh);
|
ubh_bforget(dind_bh);
|
||||||
dind_bh = NULL;
|
dind_bh = NULL;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
|
if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
|
||||||
ubh_ll_rw_block (SWRITE, 1, &dind_bh);
|
ubh_ll_rw_block (SWRITE, 1, &dind_bh);
|
||||||
ubh_wait_on_buffer (dind_bh);
|
ubh_wait_on_buffer (dind_bh);
|
||||||
@@ -370,9 +361,6 @@ static int ufs_trunc_tindirect (struct inode * inode)
|
|||||||
if (*ubh_get_addr32 (tind_bh, i))
|
if (*ubh_get_addr32 (tind_bh, i))
|
||||||
break;
|
break;
|
||||||
if (i >= uspi->s_apb) {
|
if (i >= uspi->s_apb) {
|
||||||
if (ubh_max_bcount(tind_bh) != 1)
|
|
||||||
retry = 1;
|
|
||||||
else {
|
|
||||||
tmp = fs32_to_cpu(sb, *p);
|
tmp = fs32_to_cpu(sb, *p);
|
||||||
*p = 0;
|
*p = 0;
|
||||||
inode->i_blocks -= uspi->s_nspb;
|
inode->i_blocks -= uspi->s_nspb;
|
||||||
@@ -381,7 +369,6 @@ static int ufs_trunc_tindirect (struct inode * inode)
|
|||||||
ubh_bforget(tind_bh);
|
ubh_bforget(tind_bh);
|
||||||
tind_bh = NULL;
|
tind_bh = NULL;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
|
if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
|
||||||
ubh_ll_rw_block (SWRITE, 1, &tind_bh);
|
ubh_ll_rw_block (SWRITE, 1, &tind_bh);
|
||||||
ubh_wait_on_buffer (tind_bh);
|
ubh_wait_on_buffer (tind_bh);
|
||||||
|
@@ -139,18 +139,6 @@ void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
|
|||||||
wait_on_buffer (ubh->bh[i]);
|
wait_on_buffer (ubh->bh[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned ubh_max_bcount (struct ufs_buffer_head * ubh)
|
|
||||||
{
|
|
||||||
unsigned i;
|
|
||||||
unsigned max = 0;
|
|
||||||
if (!ubh)
|
|
||||||
return 0;
|
|
||||||
for ( i = 0; i < ubh->count; i++ )
|
|
||||||
if ( atomic_read(&ubh->bh[i]->b_count) > max )
|
|
||||||
max = atomic_read(&ubh->bh[i]->b_count);
|
|
||||||
return max;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ubh_bforget (struct ufs_buffer_head * ubh)
|
void ubh_bforget (struct ufs_buffer_head * ubh)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
@@ -238,7 +238,6 @@ extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
|
|||||||
extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
|
extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
|
||||||
extern void ubh_ll_rw_block (int, unsigned, struct ufs_buffer_head **);
|
extern void ubh_ll_rw_block (int, unsigned, struct ufs_buffer_head **);
|
||||||
extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
|
extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
|
||||||
extern unsigned ubh_max_bcount (struct ufs_buffer_head *);
|
|
||||||
extern void ubh_bforget (struct ufs_buffer_head *);
|
extern void ubh_bforget (struct ufs_buffer_head *);
|
||||||
extern int ubh_buffer_dirty (struct ufs_buffer_head *);
|
extern int ubh_buffer_dirty (struct ufs_buffer_head *);
|
||||||
#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
|
#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
|
||||||
|
Reference in New Issue
Block a user