udf: remove some ugly macros
remove macros: - UDF_SB_PARTMAPS - UDF_SB_PARTTYPE - UDF_SB_PARTROOT - UDF_SB_PARTLEN - UDF_SB_PARTVSN - UDF_SB_PARTNUM - UDF_SB_TYPESPAR - UDF_SB_TYPEVIRT - UDF_SB_PARTFUNC - UDF_SB_PARTFLAGS - UDF_SB_VOLIDENT - UDF_SB_NUMPARTS - UDF_SB_PARTITION - UDF_SB_SESSION - UDF_SB_ANCHOR - UDF_SB_LASTBLOCK - UDF_SB_LVIDBH - UDF_SB_LVID - UDF_SB_UMASK - UDF_SB_GID - UDF_SB_UID - UDF_SB_RECORDTIME - UDF_SB_SERIALNUM - UDF_SB_UDFREV - UDF_SB_FLAGS - UDF_SB_VAT - UDF_UPDATE_UDFREV - UDF_SB_FREE and open code them convert UDF_SB_LVIDIU macro to udf_sb_lvidiu function rename some struct udf_sb_info fields: - s_volident to s_volume_ident - s_lastblock to s_last_block - s_lvidbh to s_lvid_bh - s_recordtime to s_record_time - s_serialnum to s_serial_number; - s_vat to s_vat_inode; Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com> Cc: Ben Fennema <bfennema@falcon.csc.calpoly.edu> Cc: Jan Kara <jack@suse.cz> Acked-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
3a71fc5de5
commit
6c79e987d6
136
fs/udf/balloc.c
136
fs/udf/balloc.c
@ -88,7 +88,7 @@ static int read_block_bitmap(struct super_block *sb,
|
|||||||
kernel_lb_addr loc;
|
kernel_lb_addr loc;
|
||||||
|
|
||||||
loc.logicalBlockNum = bitmap->s_extPosition;
|
loc.logicalBlockNum = bitmap->s_extPosition;
|
||||||
loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
|
loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
|
||||||
|
|
||||||
bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
|
bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
|
||||||
if (!bh) {
|
if (!bh) {
|
||||||
@ -155,10 +155,10 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
|
|||||||
|
|
||||||
mutex_lock(&sbi->s_alloc_mutex);
|
mutex_lock(&sbi->s_alloc_mutex);
|
||||||
if (bloc.logicalBlockNum < 0 ||
|
if (bloc.logicalBlockNum < 0 ||
|
||||||
(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
|
(bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
|
||||||
udf_debug("%d < %d || %d + %d > %d\n",
|
udf_debug("%d < %d || %d + %d > %d\n",
|
||||||
bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
|
bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
|
||||||
UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
|
sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len);
|
||||||
goto error_return;
|
goto error_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,9 +188,10 @@ do_more:
|
|||||||
} else {
|
} else {
|
||||||
if (inode)
|
if (inode)
|
||||||
DQUOT_FREE_BLOCK(inode, 1);
|
DQUOT_FREE_BLOCK(inode, 1);
|
||||||
if (UDF_SB_LVIDBH(sb)) {
|
if (sbi->s_lvid_bh) {
|
||||||
UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
|
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1);
|
lvid->freeSpaceTable[sbi->s_partition] =
|
||||||
|
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -202,8 +203,8 @@ do_more:
|
|||||||
}
|
}
|
||||||
error_return:
|
error_return:
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
if (UDF_SB_LVIDBH(sb))
|
if (sbi->s_lvid_bh)
|
||||||
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
|
mark_buffer_dirty(sbi->s_lvid_bh);
|
||||||
mutex_unlock(&sbi->s_alloc_mutex);
|
mutex_unlock(&sbi->s_alloc_mutex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -219,16 +220,18 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
|
|||||||
int bit, block, block_group, group_start;
|
int bit, block, block_group, group_start;
|
||||||
int nr_groups, bitmap_nr;
|
int nr_groups, bitmap_nr;
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
|
__u32 part_len;
|
||||||
|
|
||||||
mutex_lock(&sbi->s_alloc_mutex);
|
mutex_lock(&sbi->s_alloc_mutex);
|
||||||
if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
|
part_len = sbi->s_partmaps[partition].s_partition_len;
|
||||||
|
if (first_block < 0 || first_block >= part_len)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
|
if (first_block + block_count > part_len)
|
||||||
block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
|
block_count = part_len - first_block;
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
nr_groups = (UDF_SB_PARTLEN(sb, partition) +
|
nr_groups = (sbi->s_partmaps[partition].s_partition_len +
|
||||||
(sizeof(struct spaceBitmapDesc) << 3) +
|
(sizeof(struct spaceBitmapDesc) << 3) +
|
||||||
(sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
|
(sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
|
||||||
block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
|
block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
|
||||||
@ -261,10 +264,11 @@ repeat:
|
|||||||
if (block_count > 0)
|
if (block_count > 0)
|
||||||
goto repeat;
|
goto repeat;
|
||||||
out:
|
out:
|
||||||
if (UDF_SB_LVIDBH(sb)) {
|
if (sbi->s_lvid_bh) {
|
||||||
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
|
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
|
lvid->freeSpaceTable[partition] =
|
||||||
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
|
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
|
||||||
|
mark_buffer_dirty(sbi->s_lvid_bh);
|
||||||
}
|
}
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
mutex_unlock(&sbi->s_alloc_mutex);
|
mutex_unlock(&sbi->s_alloc_mutex);
|
||||||
@ -287,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
|
|||||||
mutex_lock(&sbi->s_alloc_mutex);
|
mutex_lock(&sbi->s_alloc_mutex);
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
|
if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
|
||||||
goal = 0;
|
goal = 0;
|
||||||
|
|
||||||
nr_groups = bitmap->s_nr_groups;
|
nr_groups = bitmap->s_nr_groups;
|
||||||
@ -389,10 +393,11 @@ got_block:
|
|||||||
|
|
||||||
mark_buffer_dirty(bh);
|
mark_buffer_dirty(bh);
|
||||||
|
|
||||||
if (UDF_SB_LVIDBH(sb)) {
|
if (sbi->s_lvid_bh) {
|
||||||
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
|
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
|
lvid->freeSpaceTable[partition] =
|
||||||
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
|
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
|
||||||
|
mark_buffer_dirty(sbi->s_lvid_bh);
|
||||||
}
|
}
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
mutex_unlock(&sbi->s_alloc_mutex);
|
mutex_unlock(&sbi->s_alloc_mutex);
|
||||||
@ -421,10 +426,10 @@ static void udf_table_free_blocks(struct super_block *sb,
|
|||||||
|
|
||||||
mutex_lock(&sbi->s_alloc_mutex);
|
mutex_lock(&sbi->s_alloc_mutex);
|
||||||
if (bloc.logicalBlockNum < 0 ||
|
if (bloc.logicalBlockNum < 0 ||
|
||||||
(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
|
(bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
|
||||||
udf_debug("%d < %d || %d + %d > %d\n",
|
udf_debug("%d < %d || %d + %d > %d\n",
|
||||||
bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
|
bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
|
||||||
UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
|
sbi->s_partmaps[bloc.partitionReferenceNum]->s_partition_len);
|
||||||
goto error_return;
|
goto error_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -432,10 +437,11 @@ static void udf_table_free_blocks(struct super_block *sb,
|
|||||||
but.. oh well */
|
but.. oh well */
|
||||||
if (inode)
|
if (inode)
|
||||||
DQUOT_FREE_BLOCK(inode, count);
|
DQUOT_FREE_BLOCK(inode, count);
|
||||||
if (UDF_SB_LVIDBH(sb)) {
|
if (sbi->s_lvid_bh) {
|
||||||
UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
|
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
|
lvid->freeSpaceTable[sbi->s_partition] =
|
||||||
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
|
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + count);
|
||||||
|
mark_buffer_dirty(sbi->s_lvid_bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
start = bloc.logicalBlockNum + offset;
|
start = bloc.logicalBlockNum + offset;
|
||||||
@ -559,7 +565,7 @@ static void udf_table_free_blocks(struct super_block *sb,
|
|||||||
}
|
}
|
||||||
epos.offset = sizeof(struct allocExtDesc);
|
epos.offset = sizeof(struct allocExtDesc);
|
||||||
}
|
}
|
||||||
if (UDF_SB_UDFREV(sb) >= 0x0200)
|
if (sbi->s_udfrev >= 0x0200)
|
||||||
udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
|
udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
|
||||||
epos.block.logicalBlockNum, sizeof(tag));
|
epos.block.logicalBlockNum, sizeof(tag));
|
||||||
else
|
else
|
||||||
@ -627,7 +633,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
|
|||||||
struct extent_position epos;
|
struct extent_position epos;
|
||||||
int8_t etype = -1;
|
int8_t etype = -1;
|
||||||
|
|
||||||
if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
|
if (first_block < 0 || first_block >= sbi->s_partmaps[partition].s_partition_len)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
|
if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
|
||||||
@ -670,10 +676,11 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
|
|||||||
|
|
||||||
brelse(epos.bh);
|
brelse(epos.bh);
|
||||||
|
|
||||||
if (alloc_count && UDF_SB_LVIDBH(sb)) {
|
if (alloc_count && sbi->s_lvid_bh) {
|
||||||
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
|
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
|
lvid->freeSpaceTable[partition] =
|
||||||
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
|
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
|
||||||
|
mark_buffer_dirty(sbi->s_lvid_bh);
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
}
|
}
|
||||||
mutex_unlock(&sbi->s_alloc_mutex);
|
mutex_unlock(&sbi->s_alloc_mutex);
|
||||||
@ -703,7 +710,7 @@ static int udf_table_new_block(struct super_block *sb,
|
|||||||
return newblock;
|
return newblock;
|
||||||
|
|
||||||
mutex_lock(&sbi->s_alloc_mutex);
|
mutex_lock(&sbi->s_alloc_mutex);
|
||||||
if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
|
if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
|
||||||
goal = 0;
|
goal = 0;
|
||||||
|
|
||||||
/* We search for the closest matching block to goal. If we find a exact hit,
|
/* We search for the closest matching block to goal. If we find a exact hit,
|
||||||
@ -771,10 +778,11 @@ static int udf_table_new_block(struct super_block *sb,
|
|||||||
udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
|
udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
|
||||||
brelse(goal_epos.bh);
|
brelse(goal_epos.bh);
|
||||||
|
|
||||||
if (UDF_SB_LVIDBH(sb)) {
|
if (sbi->s_lvid_bh) {
|
||||||
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
|
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
|
lvid->freeSpaceTable[partition] =
|
||||||
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
|
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
|
||||||
|
mark_buffer_dirty(sbi->s_lvid_bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
@ -789,22 +797,23 @@ inline void udf_free_blocks(struct super_block *sb,
|
|||||||
uint32_t count)
|
uint32_t count)
|
||||||
{
|
{
|
||||||
uint16_t partition = bloc.partitionReferenceNum;
|
uint16_t partition = bloc.partitionReferenceNum;
|
||||||
|
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
|
||||||
|
|
||||||
if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
|
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
|
||||||
return udf_bitmap_free_blocks(sb, inode,
|
return udf_bitmap_free_blocks(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
|
map->s_uspace.s_bitmap,
|
||||||
bloc, offset, count);
|
bloc, offset, count);
|
||||||
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
|
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
|
||||||
return udf_table_free_blocks(sb, inode,
|
return udf_table_free_blocks(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
|
map->s_uspace.s_table,
|
||||||
bloc, offset, count);
|
bloc, offset, count);
|
||||||
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
|
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
|
||||||
return udf_bitmap_free_blocks(sb, inode,
|
return udf_bitmap_free_blocks(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
|
map->s_fspace.s_bitmap,
|
||||||
bloc, offset, count);
|
bloc, offset, count);
|
||||||
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
|
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
|
||||||
return udf_table_free_blocks(sb, inode,
|
return udf_table_free_blocks(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
|
map->s_fspace.s_table,
|
||||||
bloc, offset, count);
|
bloc, offset, count);
|
||||||
} else {
|
} else {
|
||||||
return;
|
return;
|
||||||
@ -816,21 +825,23 @@ inline int udf_prealloc_blocks(struct super_block *sb,
|
|||||||
uint16_t partition, uint32_t first_block,
|
uint16_t partition, uint32_t first_block,
|
||||||
uint32_t block_count)
|
uint32_t block_count)
|
||||||
{
|
{
|
||||||
if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
|
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
|
||||||
|
|
||||||
|
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
|
||||||
return udf_bitmap_prealloc_blocks(sb, inode,
|
return udf_bitmap_prealloc_blocks(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
|
map->s_uspace.s_bitmap,
|
||||||
partition, first_block, block_count);
|
partition, first_block, block_count);
|
||||||
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
|
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
|
||||||
return udf_table_prealloc_blocks(sb, inode,
|
return udf_table_prealloc_blocks(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
|
map->s_uspace.s_table,
|
||||||
partition, first_block, block_count);
|
partition, first_block, block_count);
|
||||||
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
|
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
|
||||||
return udf_bitmap_prealloc_blocks(sb, inode,
|
return udf_bitmap_prealloc_blocks(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
|
map->s_fspace.s_bitmap,
|
||||||
partition, first_block, block_count);
|
partition, first_block, block_count);
|
||||||
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
|
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
|
||||||
return udf_table_prealloc_blocks(sb, inode,
|
return udf_table_prealloc_blocks(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
|
map->s_fspace.s_table,
|
||||||
partition, first_block, block_count);
|
partition, first_block, block_count);
|
||||||
} else {
|
} else {
|
||||||
return 0;
|
return 0;
|
||||||
@ -842,23 +853,24 @@ inline int udf_new_block(struct super_block *sb,
|
|||||||
uint16_t partition, uint32_t goal, int *err)
|
uint16_t partition, uint32_t goal, int *err)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
|
||||||
|
|
||||||
if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
|
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
|
||||||
ret = udf_bitmap_new_block(sb, inode,
|
ret = udf_bitmap_new_block(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
|
map->s_uspace.s_bitmap,
|
||||||
partition, goal, err);
|
partition, goal, err);
|
||||||
return ret;
|
return ret;
|
||||||
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
|
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
|
||||||
return udf_table_new_block(sb, inode,
|
return udf_table_new_block(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
|
map->s_uspace.s_table,
|
||||||
partition, goal, err);
|
partition, goal, err);
|
||||||
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
|
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
|
||||||
return udf_bitmap_new_block(sb, inode,
|
return udf_bitmap_new_block(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
|
map->s_fspace.s_bitmap,
|
||||||
partition, goal, err);
|
partition, goal, err);
|
||||||
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
|
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
|
||||||
return udf_table_new_block(sb, inode,
|
return udf_table_new_block(sb, inode,
|
||||||
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
|
map->s_fspace.s_table,
|
||||||
partition, goal, err);
|
partition, goal, err);
|
||||||
} else {
|
} else {
|
||||||
*err = -EIO;
|
*err = -EIO;
|
||||||
|
@ -192,7 +192,7 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case UDF_GETVOLIDENT:
|
case UDF_GETVOLIDENT:
|
||||||
return copy_to_user((char __user *)arg,
|
return copy_to_user((char __user *)arg,
|
||||||
UDF_SB_VOLIDENT(inode->i_sb), 32) ? -EFAULT : 0;
|
UDF_SB(inode->i_sb)->s_volume_ident, 32) ? -EFAULT : 0;
|
||||||
case UDF_RELOCATE_BLOCKS:
|
case UDF_RELOCATE_BLOCKS:
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
|
@ -43,15 +43,17 @@ void udf_free_inode(struct inode *inode)
|
|||||||
clear_inode(inode);
|
clear_inode(inode);
|
||||||
|
|
||||||
mutex_lock(&sbi->s_alloc_mutex);
|
mutex_lock(&sbi->s_alloc_mutex);
|
||||||
if (sbi->s_lvidbh) {
|
if (sbi->s_lvid_bh) {
|
||||||
|
struct logicalVolIntegrityDescImpUse *lvidiu =
|
||||||
|
udf_sb_lvidiu(sbi);
|
||||||
if (S_ISDIR(inode->i_mode))
|
if (S_ISDIR(inode->i_mode))
|
||||||
UDF_SB_LVIDIU(sb)->numDirs =
|
lvidiu->numDirs =
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1);
|
cpu_to_le32(le32_to_cpu(lvidiu->numDirs) - 1);
|
||||||
else
|
else
|
||||||
UDF_SB_LVIDIU(sb)->numFiles =
|
lvidiu->numFiles =
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1);
|
cpu_to_le32(le32_to_cpu(lvidiu->numFiles) - 1);
|
||||||
|
|
||||||
mark_buffer_dirty(sbi->s_lvidbh);
|
mark_buffer_dirty(sbi->s_lvid_bh);
|
||||||
}
|
}
|
||||||
mutex_unlock(&sbi->s_alloc_mutex);
|
mutex_unlock(&sbi->s_alloc_mutex);
|
||||||
|
|
||||||
@ -88,21 +90,23 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
|
|||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&sbi->s_alloc_mutex);
|
mutex_lock(&sbi->s_alloc_mutex);
|
||||||
if (UDF_SB_LVIDBH(sb)) {
|
if (sbi->s_lvid_bh) {
|
||||||
|
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
|
||||||
|
struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sbi);
|
||||||
struct logicalVolHeaderDesc *lvhd;
|
struct logicalVolHeaderDesc *lvhd;
|
||||||
uint64_t uniqueID;
|
uint64_t uniqueID;
|
||||||
lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse);
|
lvhd = (struct logicalVolHeaderDesc *)(lvid->logicalVolContentsUse);
|
||||||
if (S_ISDIR(mode))
|
if (S_ISDIR(mode))
|
||||||
UDF_SB_LVIDIU(sb)->numDirs =
|
lvidiu->numDirs =
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1);
|
cpu_to_le32(le32_to_cpu(lvidiu->numDirs) + 1);
|
||||||
else
|
else
|
||||||
UDF_SB_LVIDIU(sb)->numFiles =
|
lvidiu->numFiles =
|
||||||
cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1);
|
cpu_to_le32(le32_to_cpu(lvidiu->numFiles) + 1);
|
||||||
UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
|
UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
|
||||||
if (!(++uniqueID & 0x00000000FFFFFFFFUL))
|
if (!(++uniqueID & 0x00000000FFFFFFFFUL))
|
||||||
uniqueID += 16;
|
uniqueID += 16;
|
||||||
lvhd->uniqueID = cpu_to_le64(uniqueID);
|
lvhd->uniqueID = cpu_to_le64(uniqueID);
|
||||||
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
|
mark_buffer_dirty(sbi->s_lvid_bh);
|
||||||
}
|
}
|
||||||
inode->i_mode = mode;
|
inode->i_mode = mode;
|
||||||
inode->i_uid = current->fsuid;
|
inode->i_uid = current->fsuid;
|
||||||
@ -123,7 +127,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
|
|||||||
UDF_I_USE(inode) = 0;
|
UDF_I_USE(inode) = 0;
|
||||||
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
|
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
|
||||||
UDF_I_EFE(inode) = 1;
|
UDF_I_EFE(inode) = 1;
|
||||||
UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
|
if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
|
||||||
|
sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
|
||||||
UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
|
UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
|
||||||
} else {
|
} else {
|
||||||
UDF_I_EFE(inode) = 0;
|
UDF_I_EFE(inode) = 0;
|
||||||
|
@ -1081,6 +1081,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
|
|||||||
time_t convtime;
|
time_t convtime;
|
||||||
long convtime_usec;
|
long convtime_usec;
|
||||||
int offset;
|
int offset;
|
||||||
|
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
|
||||||
|
|
||||||
fe = (struct fileEntry *)bh->b_data;
|
fe = (struct fileEntry *)bh->b_data;
|
||||||
efe = (struct extendedFileEntry *)bh->b_data;
|
efe = (struct extendedFileEntry *)bh->b_data;
|
||||||
@ -1160,7 +1161,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
|
|||||||
inode->i_atime.tv_sec = convtime;
|
inode->i_atime.tv_sec = convtime;
|
||||||
inode->i_atime.tv_nsec = convtime_usec * 1000;
|
inode->i_atime.tv_nsec = convtime_usec * 1000;
|
||||||
} else {
|
} else {
|
||||||
inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
|
inode->i_atime = sbi->s_record_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
||||||
@ -1168,7 +1169,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
|
|||||||
inode->i_mtime.tv_sec = convtime;
|
inode->i_mtime.tv_sec = convtime;
|
||||||
inode->i_mtime.tv_nsec = convtime_usec * 1000;
|
inode->i_mtime.tv_nsec = convtime_usec * 1000;
|
||||||
} else {
|
} else {
|
||||||
inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
|
inode->i_mtime = sbi->s_record_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
||||||
@ -1176,7 +1177,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
|
|||||||
inode->i_ctime.tv_sec = convtime;
|
inode->i_ctime.tv_sec = convtime;
|
||||||
inode->i_ctime.tv_nsec = convtime_usec * 1000;
|
inode->i_ctime.tv_nsec = convtime_usec * 1000;
|
||||||
} else {
|
} else {
|
||||||
inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
|
inode->i_ctime = sbi->s_record_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
|
UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
|
||||||
@ -1192,7 +1193,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
|
|||||||
inode->i_atime.tv_sec = convtime;
|
inode->i_atime.tv_sec = convtime;
|
||||||
inode->i_atime.tv_nsec = convtime_usec * 1000;
|
inode->i_atime.tv_nsec = convtime_usec * 1000;
|
||||||
} else {
|
} else {
|
||||||
inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
|
inode->i_atime = sbi->s_record_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
||||||
@ -1200,7 +1201,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
|
|||||||
inode->i_mtime.tv_sec = convtime;
|
inode->i_mtime.tv_sec = convtime;
|
||||||
inode->i_mtime.tv_nsec = convtime_usec * 1000;
|
inode->i_mtime.tv_nsec = convtime_usec * 1000;
|
||||||
} else {
|
} else {
|
||||||
inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
|
inode->i_mtime = sbi->s_record_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
||||||
@ -1208,7 +1209,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
|
|||||||
UDF_I_CRTIME(inode).tv_sec = convtime;
|
UDF_I_CRTIME(inode).tv_sec = convtime;
|
||||||
UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
|
UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
|
||||||
} else {
|
} else {
|
||||||
UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
|
UDF_I_CRTIME(inode) = sbi->s_record_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
if (udf_stamp_to_time(&convtime, &convtime_usec,
|
||||||
@ -1216,7 +1217,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
|
|||||||
inode->i_ctime.tv_sec = convtime;
|
inode->i_ctime.tv_sec = convtime;
|
||||||
inode->i_ctime.tv_nsec = convtime_usec * 1000;
|
inode->i_ctime.tv_nsec = convtime_usec * 1000;
|
||||||
} else {
|
} else {
|
||||||
inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
|
inode->i_ctime = sbi->s_record_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
|
UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
|
||||||
@ -1353,6 +1354,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
|
|||||||
int i;
|
int i;
|
||||||
kernel_timestamp cpu_time;
|
kernel_timestamp cpu_time;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
|
||||||
|
|
||||||
bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
|
bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
|
||||||
if (!bh) {
|
if (!bh) {
|
||||||
@ -1537,11 +1539,11 @@ static int udf_update_inode(struct inode *inode, int do_sync)
|
|||||||
ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
|
ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
|
||||||
|
|
||||||
fe->icbTag.flags = cpu_to_le16(icbflags);
|
fe->icbTag.flags = cpu_to_le16(icbflags);
|
||||||
if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
|
if (sbi->s_udfrev >= 0x0200)
|
||||||
fe->descTag.descVersion = cpu_to_le16(3);
|
fe->descTag.descVersion = cpu_to_le16(3);
|
||||||
else
|
else
|
||||||
fe->descTag.descVersion = cpu_to_le16(2);
|
fe->descTag.descVersion = cpu_to_le16(2);
|
||||||
fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
|
fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
|
||||||
fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
|
fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
|
||||||
crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
|
crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
|
||||||
fe->descTag.descCRCLength = cpu_to_le16(crclen);
|
fe->descTag.descCRCLength = cpu_to_le16(crclen);
|
||||||
@ -1585,7 +1587,7 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
|
|||||||
if (is_bad_inode(inode))
|
if (is_bad_inode(inode))
|
||||||
goto out_iput;
|
goto out_iput;
|
||||||
|
|
||||||
if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
|
if (ino.logicalBlockNum >= UDF_SB(sb)->s_partmaps[ino.partitionReferenceNum].s_partition_len) {
|
||||||
udf_debug("block=%d, partition=%d out of range\n",
|
udf_debug("block=%d, partition=%d out of range\n",
|
||||||
ino.logicalBlockNum, ino.partitionReferenceNum);
|
ino.logicalBlockNum, ino.partitionReferenceNum);
|
||||||
make_bad_inode(inode);
|
make_bad_inode(inode);
|
||||||
@ -1667,7 +1669,7 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
|
|||||||
mark_inode_dirty(inode);
|
mark_inode_dirty(inode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
|
if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200)
|
||||||
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
|
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
|
||||||
epos->block.logicalBlockNum, sizeof(tag));
|
epos->block.logicalBlockNum, sizeof(tag));
|
||||||
else
|
else
|
||||||
@ -1690,7 +1692,7 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
|
|||||||
}
|
}
|
||||||
if (epos->bh) {
|
if (epos->bh) {
|
||||||
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
||||||
UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
|
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
|
||||||
udf_update_tag(epos->bh->b_data, loffset);
|
udf_update_tag(epos->bh->b_data, loffset);
|
||||||
else
|
else
|
||||||
udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
|
udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
|
||||||
@ -1711,7 +1713,7 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
|
|||||||
aed = (struct allocExtDesc *)epos->bh->b_data;
|
aed = (struct allocExtDesc *)epos->bh->b_data;
|
||||||
aed->lengthAllocDescs =
|
aed->lengthAllocDescs =
|
||||||
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
|
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
|
||||||
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
|
||||||
udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
|
udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
|
||||||
else
|
else
|
||||||
udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
|
udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
|
||||||
@ -1754,7 +1756,7 @@ int8_t udf_write_aext(struct inode * inode, struct extent_position * epos,
|
|||||||
|
|
||||||
if (epos->bh) {
|
if (epos->bh) {
|
||||||
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
||||||
UDF_SB_UDFREV(inode->i_sb) >= 0x0201) {
|
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
|
||||||
struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
|
struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
|
||||||
udf_update_tag(epos->bh->b_data,
|
udf_update_tag(epos->bh->b_data,
|
||||||
le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
|
le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
|
||||||
@ -1907,7 +1909,7 @@ int8_t udf_delete_aext(struct inode * inode, struct extent_position epos,
|
|||||||
aed->lengthAllocDescs =
|
aed->lengthAllocDescs =
|
||||||
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2 * adsize));
|
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2 * adsize));
|
||||||
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
||||||
UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
|
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
|
||||||
udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize));
|
udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize));
|
||||||
else
|
else
|
||||||
udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
|
udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
|
||||||
@ -1923,7 +1925,7 @@ int8_t udf_delete_aext(struct inode * inode, struct extent_position epos,
|
|||||||
aed->lengthAllocDescs =
|
aed->lengthAllocDescs =
|
||||||
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
|
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
|
||||||
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
||||||
UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
|
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
|
||||||
udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
|
udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
|
||||||
else
|
else
|
||||||
udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
|
udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
|
||||||
|
@ -81,14 +81,16 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
|
||||||
|
|
||||||
size -= sizeof(struct extendedAttrHeaderDesc);
|
size -= sizeof(struct extendedAttrHeaderDesc);
|
||||||
UDF_I_LENEATTR(inode) += sizeof(struct extendedAttrHeaderDesc);
|
UDF_I_LENEATTR(inode) += sizeof(struct extendedAttrHeaderDesc);
|
||||||
eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD);
|
eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD);
|
||||||
if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
|
if (sbi->s_udfrev >= 0x0200)
|
||||||
eahd->descTag.descVersion = cpu_to_le16(3);
|
eahd->descTag.descVersion = cpu_to_le16(3);
|
||||||
else
|
else
|
||||||
eahd->descTag.descVersion = cpu_to_le16(2);
|
eahd->descTag.descVersion = cpu_to_le16(2);
|
||||||
eahd->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
|
eahd->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
|
||||||
eahd->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
|
eahd->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
|
||||||
eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF);
|
eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF);
|
||||||
eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF);
|
eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF);
|
||||||
@ -192,15 +194,16 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
|
|||||||
struct buffer_head *bh = NULL;
|
struct buffer_head *bh = NULL;
|
||||||
register uint8_t checksum;
|
register uint8_t checksum;
|
||||||
register int i;
|
register int i;
|
||||||
|
struct udf_sb_info *sbi = UDF_SB(sb);
|
||||||
|
|
||||||
/* Read the block */
|
/* Read the block */
|
||||||
if (block == 0xFFFFFFFF)
|
if (block == 0xFFFFFFFF)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
bh = udf_tread(sb, block + UDF_SB_SESSION(sb));
|
bh = udf_tread(sb, block + sbi->s_session);
|
||||||
if (!bh) {
|
if (!bh) {
|
||||||
udf_debug("block=%d, location=%d: read failed\n",
|
udf_debug("block=%d, location=%d: read failed\n",
|
||||||
block + UDF_SB_SESSION(sb), location);
|
block + sbi->s_session, location);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -210,7 +213,7 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
|
|||||||
|
|
||||||
if (location != le32_to_cpu(tag_p->tagLocation)) {
|
if (location != le32_to_cpu(tag_p->tagLocation)) {
|
||||||
udf_debug("location mismatch block %u, tag %u != %u\n",
|
udf_debug("location mismatch block %u, tag %u != %u\n",
|
||||||
block + UDF_SB_SESSION(sb), le32_to_cpu(tag_p->tagLocation), location);
|
block + sbi->s_session, le32_to_cpu(tag_p->tagLocation), location);
|
||||||
goto error_out;
|
goto error_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,7 +243,7 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
|
|||||||
return bh;
|
return bh;
|
||||||
}
|
}
|
||||||
udf_debug("Crc failure block %d: crc = %d, crclen = %d\n",
|
udf_debug("Crc failure block %d: crc = %d, crclen = %d\n",
|
||||||
block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC),
|
block + sbi->s_session, le16_to_cpu(tag_p->descCRC),
|
||||||
le16_to_cpu(tag_p->descCRCLength));
|
le16_to_cpu(tag_p->descCRCLength));
|
||||||
|
|
||||||
error_out:
|
error_out:
|
||||||
|
@ -325,7 +325,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
|
|||||||
struct udf_fileident_bh *fibh,
|
struct udf_fileident_bh *fibh,
|
||||||
struct fileIdentDesc *cfi, int *err)
|
struct fileIdentDesc *cfi, int *err)
|
||||||
{
|
{
|
||||||
struct super_block *sb;
|
struct super_block *sb = dir->i_sb;
|
||||||
struct fileIdentDesc *fi = NULL;
|
struct fileIdentDesc *fi = NULL;
|
||||||
char name[UDF_NAME_LEN], fname[UDF_NAME_LEN];
|
char name[UDF_NAME_LEN], fname[UDF_NAME_LEN];
|
||||||
int namelen;
|
int namelen;
|
||||||
@ -342,8 +342,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
|
|||||||
sector_t offset;
|
sector_t offset;
|
||||||
struct extent_position epos = {};
|
struct extent_position epos = {};
|
||||||
|
|
||||||
sb = dir->i_sb;
|
|
||||||
|
|
||||||
if (dentry) {
|
if (dentry) {
|
||||||
if (!dentry->d_name.len) {
|
if (!dentry->d_name.len) {
|
||||||
*err = -EINVAL;
|
*err = -EINVAL;
|
||||||
@ -535,7 +533,7 @@ add:
|
|||||||
}
|
}
|
||||||
|
|
||||||
memset(cfi, 0, sizeof(struct fileIdentDesc));
|
memset(cfi, 0, sizeof(struct fileIdentDesc));
|
||||||
if (UDF_SB_UDFREV(sb) >= 0x0200)
|
if (UDF_SB(sb)->s_udfrev >= 0x0200)
|
||||||
udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, sizeof(tag));
|
udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, sizeof(tag));
|
||||||
else
|
else
|
||||||
udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, sizeof(tag));
|
udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, sizeof(tag));
|
||||||
@ -901,6 +899,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
|
|||||||
int block;
|
int block;
|
||||||
char name[UDF_NAME_LEN];
|
char name[UDF_NAME_LEN];
|
||||||
int namelen;
|
int namelen;
|
||||||
|
struct buffer_head *bh;
|
||||||
|
|
||||||
lock_kernel();
|
lock_kernel();
|
||||||
if (!(inode = udf_new_inode(dir, S_IFLNK, &err)))
|
if (!(inode = udf_new_inode(dir, S_IFLNK, &err)))
|
||||||
@ -1014,17 +1013,19 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
|
|||||||
goto out_no_entry;
|
goto out_no_entry;
|
||||||
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
|
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
|
||||||
cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
|
cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
|
||||||
if (UDF_SB_LVIDBH(inode->i_sb)) {
|
bh = UDF_SB(inode->i_sb)->s_lvid_bh;
|
||||||
|
if (bh) {
|
||||||
|
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
|
||||||
struct logicalVolHeaderDesc *lvhd;
|
struct logicalVolHeaderDesc *lvhd;
|
||||||
uint64_t uniqueID;
|
uint64_t uniqueID;
|
||||||
lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
|
lvhd = (struct logicalVolHeaderDesc *)(lvid->logicalVolContentsUse);
|
||||||
uniqueID = le64_to_cpu(lvhd->uniqueID);
|
uniqueID = le64_to_cpu(lvhd->uniqueID);
|
||||||
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
|
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
|
||||||
cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
|
cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
|
||||||
if (!(++uniqueID & 0x00000000FFFFFFFFUL))
|
if (!(++uniqueID & 0x00000000FFFFFFFFUL))
|
||||||
uniqueID += 16;
|
uniqueID += 16;
|
||||||
lvhd->uniqueID = cpu_to_le64(uniqueID);
|
lvhd->uniqueID = cpu_to_le64(uniqueID);
|
||||||
mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb));
|
mark_buffer_dirty(bh);
|
||||||
}
|
}
|
||||||
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
|
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
|
||||||
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
|
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
|
||||||
@ -1053,6 +1054,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
|
|||||||
struct udf_fileident_bh fibh;
|
struct udf_fileident_bh fibh;
|
||||||
struct fileIdentDesc cfi, *fi;
|
struct fileIdentDesc cfi, *fi;
|
||||||
int err;
|
int err;
|
||||||
|
struct buffer_head *bh;
|
||||||
|
|
||||||
lock_kernel();
|
lock_kernel();
|
||||||
if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
|
if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
|
||||||
@ -1066,17 +1068,19 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
|
|||||||
}
|
}
|
||||||
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
|
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
|
||||||
cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
|
cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
|
||||||
if (UDF_SB_LVIDBH(inode->i_sb)) {
|
bh = UDF_SB(inode->i_sb)->s_lvid_bh;
|
||||||
|
if (bh) {
|
||||||
|
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
|
||||||
struct logicalVolHeaderDesc *lvhd;
|
struct logicalVolHeaderDesc *lvhd;
|
||||||
uint64_t uniqueID;
|
uint64_t uniqueID;
|
||||||
lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
|
lvhd = (struct logicalVolHeaderDesc *)(lvid->logicalVolContentsUse);
|
||||||
uniqueID = le64_to_cpu(lvhd->uniqueID);
|
uniqueID = le64_to_cpu(lvhd->uniqueID);
|
||||||
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
|
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
|
||||||
cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
|
cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
|
||||||
if (!(++uniqueID & 0x00000000FFFFFFFFUL))
|
if (!(++uniqueID & 0x00000000FFFFFFFFUL))
|
||||||
uniqueID += 16;
|
uniqueID += 16;
|
||||||
lvhd->uniqueID = cpu_to_le64(uniqueID);
|
lvhd->uniqueID = cpu_to_le64(uniqueID);
|
||||||
mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb));
|
mark_buffer_dirty(bh);
|
||||||
}
|
}
|
||||||
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
|
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
|
||||||
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
|
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
|
||||||
|
@ -31,15 +31,18 @@
|
|||||||
inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
|
inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
|
||||||
uint16_t partition, uint32_t offset)
|
uint16_t partition, uint32_t offset)
|
||||||
{
|
{
|
||||||
if (partition >= UDF_SB_NUMPARTS(sb)) {
|
struct udf_sb_info *sbi = UDF_SB(sb);
|
||||||
|
struct udf_part_map *map;
|
||||||
|
if (partition >= sbi->s_partitions) {
|
||||||
udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
|
udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
|
||||||
block, partition, offset);
|
block, partition, offset);
|
||||||
return 0xFFFFFFFF;
|
return 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
if (UDF_SB_PARTFUNC(sb, partition))
|
map = &sbi->s_partmaps[partition];
|
||||||
return UDF_SB_PARTFUNC(sb, partition)(sb, block, partition, offset);
|
if (map->s_partition_func)
|
||||||
|
return map->s_partition_func(sb, block, partition, offset);
|
||||||
else
|
else
|
||||||
return UDF_SB_PARTROOT(sb, partition) + block + offset;
|
return map->s_partition_root + block + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
|
uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
|
||||||
@ -49,12 +52,15 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
|
|||||||
uint32_t newblock;
|
uint32_t newblock;
|
||||||
uint32_t index;
|
uint32_t index;
|
||||||
uint32_t loc;
|
uint32_t loc;
|
||||||
|
struct udf_sb_info *sbi = UDF_SB(sb);
|
||||||
|
struct udf_part_map *map;
|
||||||
|
|
||||||
index = (sb->s_blocksize - UDF_SB_TYPEVIRT(sb,partition).s_start_offset) / sizeof(uint32_t);
|
map = &sbi->s_partmaps[partition];
|
||||||
|
index = (sb->s_blocksize - map->s_type_specific.s_virtual.s_start_offset) / sizeof(uint32_t);
|
||||||
|
|
||||||
if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries) {
|
if (block > map->s_type_specific.s_virtual.s_num_entries) {
|
||||||
udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
|
udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
|
||||||
block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries);
|
block, map->s_type_specific.s_virtual.s_num_entries);
|
||||||
return 0xFFFFFFFF;
|
return 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,10 +70,10 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
|
|||||||
index = block % (sb->s_blocksize / sizeof(uint32_t));
|
index = block % (sb->s_blocksize / sizeof(uint32_t));
|
||||||
} else {
|
} else {
|
||||||
newblock = 0;
|
newblock = 0;
|
||||||
index = UDF_SB_TYPEVIRT(sb,partition).s_start_offset / sizeof(uint32_t) + block;
|
index = map->s_type_specific.s_virtual.s_start_offset / sizeof(uint32_t) + block;
|
||||||
}
|
}
|
||||||
|
|
||||||
loc = udf_block_map(UDF_SB_VAT(sb), newblock);
|
loc = udf_block_map(sbi->s_vat_inode, newblock);
|
||||||
|
|
||||||
if (!(bh = sb_bread(sb, loc))) {
|
if (!(bh = sb_bread(sb, loc))) {
|
||||||
udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
|
udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
|
||||||
@ -79,13 +85,13 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
|
|||||||
|
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
|
|
||||||
if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition) {
|
if (UDF_I_LOCATION(sbi->s_vat_inode).partitionReferenceNum == partition) {
|
||||||
udf_debug("recursive call to udf_get_pblock!\n");
|
udf_debug("recursive call to udf_get_pblock!\n");
|
||||||
return 0xFFFFFFFF;
|
return 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
return udf_get_pblock(sb, loc,
|
return udf_get_pblock(sb, loc,
|
||||||
UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum,
|
UDF_I_LOCATION(sbi->s_vat_inode).partitionReferenceNum,
|
||||||
offset);
|
offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,16 +101,21 @@ inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block,
|
|||||||
return udf_get_pblock_virt15(sb, block, partition, offset);
|
return udf_get_pblock_virt15(sb, block, partition, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block,
|
uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
|
||||||
uint16_t partition, uint32_t offset)
|
uint16_t partition, uint32_t offset)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct sparingTable *st = NULL;
|
struct sparingTable *st = NULL;
|
||||||
uint32_t packet = (block + offset) & ~(UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1);
|
struct udf_sb_info *sbi = UDF_SB(sb);
|
||||||
|
struct udf_part_map *map;
|
||||||
|
uint32_t packet;
|
||||||
|
|
||||||
|
map = &sbi->s_partmaps[partition];
|
||||||
|
packet = (block + offset) & ~(map->s_type_specific.s_sparing.s_packet_len - 1);
|
||||||
|
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL) {
|
if (map->s_type_specific.s_sparing.s_spar_map[i] != NULL) {
|
||||||
st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,partition).s_spar_map[i]->b_data;
|
st = (struct sparingTable *)map->s_type_specific.s_sparing.s_spar_map[i]->b_data;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -115,14 +126,14 @@ uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block,
|
|||||||
break;
|
break;
|
||||||
} else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) {
|
} else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) {
|
||||||
return le32_to_cpu(st->mapEntry[i].mappedLocation) +
|
return le32_to_cpu(st->mapEntry[i].mappedLocation) +
|
||||||
((block + offset) & (UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1));
|
((block + offset) & (map->s_type_specific.s_sparing.s_packet_len - 1));
|
||||||
} else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) {
|
} else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return UDF_SB_PARTROOT(sb,partition) + block + offset;
|
return map->s_partition_root + block + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
|
int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
|
||||||
@ -132,15 +143,17 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
|
|||||||
struct sparingEntry mapEntry;
|
struct sparingEntry mapEntry;
|
||||||
uint32_t packet;
|
uint32_t packet;
|
||||||
int i, j, k, l;
|
int i, j, k, l;
|
||||||
|
struct udf_sb_info *sbi = UDF_SB(sb);
|
||||||
|
|
||||||
for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
|
for (i = 0; i < sbi->s_partitions; i++) {
|
||||||
if (old_block > UDF_SB_PARTROOT(sb,i) &&
|
struct udf_part_map *map = &sbi->s_partmaps[i];
|
||||||
old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i)) {
|
if (old_block > map->s_partition_root &&
|
||||||
sdata = &UDF_SB_TYPESPAR(sb,i);
|
old_block < map->s_partition_root + map->s_partition_len) {
|
||||||
packet = (old_block - UDF_SB_PARTROOT(sb,i)) & ~(sdata->s_packet_len - 1);
|
sdata = &map->s_type_specific.s_sparing;
|
||||||
|
packet = (old_block - map->s_partition_root) & ~(sdata->s_packet_len - 1);
|
||||||
|
|
||||||
for (j = 0; j < 4; j++) {
|
for (j = 0; j < 4; j++) {
|
||||||
if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) {
|
if (map->s_type_specific.s_sparing.s_spar_map[j] != NULL) {
|
||||||
st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
|
st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -160,11 +173,11 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
|
*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
|
||||||
((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
|
((old_block - map->s_partition_root) & (sdata->s_packet_len - 1));
|
||||||
return 0;
|
return 0;
|
||||||
} else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) {
|
} else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) {
|
||||||
*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
|
*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
|
||||||
((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
|
((old_block - map->s_partition_root) & (sdata->s_packet_len - 1));
|
||||||
return 0;
|
return 0;
|
||||||
} else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) {
|
} else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) {
|
||||||
break;
|
break;
|
||||||
@ -185,7 +198,7 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
|
*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
|
||||||
((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
|
((old_block - map->s_partition_root) & (sdata->s_packet_len - 1));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -194,7 +207,7 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
|
|||||||
} /* if old_block */
|
} /* if old_block */
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i == UDF_SB_NUMPARTS(sb)) {
|
if (i == sbi->s_partitions) {
|
||||||
/* outside of partitions */
|
/* outside of partitions */
|
||||||
/* for now, fail =) */
|
/* for now, fail =) */
|
||||||
return 1;
|
return 1;
|
||||||
|
542
fs/udf/super.c
542
fs/udf/super.c
File diff suppressed because it is too large
Load Diff
@ -163,7 +163,7 @@ void udf_discard_prealloc(struct inode *inode)
|
|||||||
cpu_to_le32(epos.offset -
|
cpu_to_le32(epos.offset -
|
||||||
sizeof(struct allocExtDesc));
|
sizeof(struct allocExtDesc));
|
||||||
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
||||||
UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
|
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
|
||||||
udf_update_tag(epos.bh->b_data, epos.offset);
|
udf_update_tag(epos.bh->b_data, epos.offset);
|
||||||
else
|
else
|
||||||
udf_update_tag(epos.bh->b_data,
|
udf_update_tag(epos.bh->b_data,
|
||||||
@ -184,6 +184,7 @@ void udf_truncate_extents(struct inode *inode)
|
|||||||
uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
|
uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
|
||||||
int8_t etype;
|
int8_t etype;
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
|
struct udf_sb_info *sbi = UDF_SB(sb);
|
||||||
sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset;
|
sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset;
|
||||||
loff_t byte_offset;
|
loff_t byte_offset;
|
||||||
int adsize;
|
int adsize;
|
||||||
@ -232,7 +233,7 @@ void udf_truncate_extents(struct inode *inode)
|
|||||||
aed->lengthAllocDescs =
|
aed->lengthAllocDescs =
|
||||||
cpu_to_le32(lenalloc);
|
cpu_to_le32(lenalloc);
|
||||||
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
|
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
|
||||||
UDF_SB_UDFREV(sb) >= 0x0201)
|
sbi->s_udfrev >= 0x0201)
|
||||||
udf_update_tag(epos.bh->b_data,
|
udf_update_tag(epos.bh->b_data,
|
||||||
lenalloc +
|
lenalloc +
|
||||||
sizeof(struct allocExtDesc));
|
sizeof(struct allocExtDesc));
|
||||||
@ -271,7 +272,7 @@ void udf_truncate_extents(struct inode *inode)
|
|||||||
(struct allocExtDesc *)(epos.bh->b_data);
|
(struct allocExtDesc *)(epos.bh->b_data);
|
||||||
aed->lengthAllocDescs = cpu_to_le32(lenalloc);
|
aed->lengthAllocDescs = cpu_to_le32(lenalloc);
|
||||||
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
|
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
|
||||||
UDF_SB_UDFREV(sb) >= 0x0201)
|
sbi->s_udfrev >= 0x0201)
|
||||||
udf_update_tag(epos.bh->b_data,
|
udf_update_tag(epos.bh->b_data,
|
||||||
lenalloc + sizeof(struct allocExtDesc));
|
lenalloc + sizeof(struct allocExtDesc));
|
||||||
else
|
else
|
||||||
|
@ -41,40 +41,36 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
|
|||||||
return sb->s_fs_info;
|
return sb->s_fs_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define UDF_SB_FREE(X)\
|
struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi);
|
||||||
{\
|
|
||||||
if (UDF_SB(X)) {\
|
|
||||||
kfree(UDF_SB_PARTMAPS(X));\
|
|
||||||
UDF_SB_PARTMAPS(X) = NULL;\
|
|
||||||
}\
|
|
||||||
}
|
|
||||||
|
|
||||||
#define UDF_SB_ALLOC_PARTMAPS(X,Y)\
|
#define UDF_SB_ALLOC_PARTMAPS(X,Y)\
|
||||||
{\
|
{\
|
||||||
UDF_SB_PARTMAPS(X) = kmalloc(sizeof(struct udf_part_map) * Y, GFP_KERNEL);\
|
struct udf_sb_info *sbi = UDF_SB(X);\
|
||||||
if (UDF_SB_PARTMAPS(X) != NULL) {\
|
sbi->s_partmaps = kmalloc(sizeof(struct udf_part_map) * Y, GFP_KERNEL);\
|
||||||
UDF_SB_NUMPARTS(X) = Y;\
|
if (sbi->s_partmaps != NULL) {\
|
||||||
memset(UDF_SB_PARTMAPS(X), 0x00, sizeof(struct udf_part_map) * Y);\
|
sbi->s_partitions = Y;\
|
||||||
|
memset(sbi->s_partmaps, 0x00, sizeof(struct udf_part_map) * Y);\
|
||||||
} else {\
|
} else {\
|
||||||
UDF_SB_NUMPARTS(X) = 0;\
|
sbi->s_partitions = 0;\
|
||||||
udf_error(X, __FUNCTION__, "Unable to allocate space for %d partition maps", Y);\
|
udf_error(X, __FUNCTION__, "Unable to allocate space for %d partition maps", Y);\
|
||||||
}\
|
}\
|
||||||
}
|
}
|
||||||
|
|
||||||
#define UDF_SB_ALLOC_BITMAP(X,Y,Z)\
|
#define UDF_SB_ALLOC_BITMAP(X,Y,Z)\
|
||||||
{\
|
{\
|
||||||
int nr_groups = ((UDF_SB_PARTLEN((X),(Y)) + (sizeof(struct spaceBitmapDesc) << 3) +\
|
struct udf_sb_info *sbi = UDF_SB(X);\
|
||||||
|
int nr_groups = ((sbi->s_partmaps[(Y)].s_partition_len + (sizeof(struct spaceBitmapDesc) << 3) +\
|
||||||
((X)->s_blocksize * 8) - 1) / ((X)->s_blocksize * 8));\
|
((X)->s_blocksize * 8) - 1) / ((X)->s_blocksize * 8));\
|
||||||
int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\
|
int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\
|
||||||
if (size <= PAGE_SIZE)\
|
if (size <= PAGE_SIZE)\
|
||||||
UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = kmalloc(size, GFP_KERNEL);\
|
sbi->s_partmaps[(Y)].Z.s_bitmap = kmalloc(size, GFP_KERNEL);\
|
||||||
else\
|
else\
|
||||||
UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = vmalloc(size);\
|
sbi->s_partmaps[(Y)].Z.s_bitmap = vmalloc(size);\
|
||||||
if (UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap != NULL) {\
|
if (sbi->s_partmaps[(Y)].Z.s_bitmap != NULL) {\
|
||||||
memset(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap, 0x00, size);\
|
memset(sbi->s_partmaps[(Y)].Z.s_bitmap, 0x00, size);\
|
||||||
UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap =\
|
sbi->s_partmaps[(Y)].Z.s_bitmap->s_block_bitmap =\
|
||||||
(struct buffer_head **)(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap + 1);\
|
(struct buffer_head **)(sbi->s_partmaps[(Y)].Z.s_bitmap + 1);\
|
||||||
UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups = nr_groups;\
|
sbi->s_partmaps[(Y)].Z.s_bitmap->s_nr_groups = nr_groups;\
|
||||||
} else {\
|
} else {\
|
||||||
udf_error(X, __FUNCTION__, "Unable to allocate space for bitmap and %d buffer_head pointers", nr_groups);\
|
udf_error(X, __FUNCTION__, "Unable to allocate space for bitmap and %d buffer_head pointers", nr_groups);\
|
||||||
}\
|
}\
|
||||||
@ -90,47 +86,16 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
|
|||||||
brelse(UDF_SB_BITMAP(X,Y,Z,i));\
|
brelse(UDF_SB_BITMAP(X,Y,Z,i));\
|
||||||
}\
|
}\
|
||||||
if (size <= PAGE_SIZE)\
|
if (size <= PAGE_SIZE)\
|
||||||
kfree(UDF_SB_PARTMAPS(X)[Y].Z.s_bitmap);\
|
kfree(UDF_SB(X)->s_partmaps[Y].Z.s_bitmap);\
|
||||||
else\
|
else\
|
||||||
vfree(UDF_SB_PARTMAPS(X)[Y].Z.s_bitmap);\
|
vfree(UDF_SB(X)->s_partmaps[Y].Z.s_bitmap);\
|
||||||
}
|
}
|
||||||
|
|
||||||
#define UDF_QUERY_FLAG(X,Y) ( UDF_SB(X)->s_flags & ( 1 << (Y) ) )
|
#define UDF_QUERY_FLAG(X,Y) ( UDF_SB(X)->s_flags & ( 1 << (Y) ) )
|
||||||
#define UDF_SET_FLAG(X,Y) ( UDF_SB(X)->s_flags |= ( 1 << (Y) ) )
|
#define UDF_SET_FLAG(X,Y) ( UDF_SB(X)->s_flags |= ( 1 << (Y) ) )
|
||||||
#define UDF_CLEAR_FLAG(X,Y) ( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) )
|
#define UDF_CLEAR_FLAG(X,Y) ( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) )
|
||||||
|
|
||||||
#define UDF_UPDATE_UDFREV(X,Y) ( ((Y) > UDF_SB_UDFREV(X)) ? UDF_SB_UDFREV(X) = (Y) : UDF_SB_UDFREV(X) )
|
#define UDF_SB_BITMAP(X,Y,Z,I) ( UDF_SB(X)->s_partmaps[(Y)].Z.s_bitmap->s_block_bitmap[I] )
|
||||||
|
#define UDF_SB_BITMAP_NR_GROUPS(X,Y,Z) ( UDF_SB(X)->s_partmaps[(Y)].Z.s_bitmap->s_nr_groups )
|
||||||
#define UDF_SB_PARTMAPS(X) ( UDF_SB(X)->s_partmaps )
|
|
||||||
#define UDF_SB_PARTTYPE(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_type )
|
|
||||||
#define UDF_SB_PARTROOT(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_root )
|
|
||||||
#define UDF_SB_PARTLEN(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_len )
|
|
||||||
#define UDF_SB_PARTVSN(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_volumeseqnum )
|
|
||||||
#define UDF_SB_PARTNUM(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_num )
|
|
||||||
#define UDF_SB_TYPESPAR(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_type_specific.s_sparing )
|
|
||||||
#define UDF_SB_TYPEVIRT(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_type_specific.s_virtual )
|
|
||||||
#define UDF_SB_PARTFUNC(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_func )
|
|
||||||
#define UDF_SB_PARTFLAGS(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_flags )
|
|
||||||
#define UDF_SB_BITMAP(X,Y,Z,I) ( UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap[I] )
|
|
||||||
#define UDF_SB_BITMAP_NR_GROUPS(X,Y,Z) ( UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups )
|
|
||||||
|
|
||||||
#define UDF_SB_VOLIDENT(X) ( UDF_SB(X)->s_volident )
|
|
||||||
#define UDF_SB_NUMPARTS(X) ( UDF_SB(X)->s_partitions )
|
|
||||||
#define UDF_SB_PARTITION(X) ( UDF_SB(X)->s_partition )
|
|
||||||
#define UDF_SB_SESSION(X) ( UDF_SB(X)->s_session )
|
|
||||||
#define UDF_SB_ANCHOR(X) ( UDF_SB(X)->s_anchor )
|
|
||||||
#define UDF_SB_LASTBLOCK(X) ( UDF_SB(X)->s_lastblock )
|
|
||||||
#define UDF_SB_LVIDBH(X) ( UDF_SB(X)->s_lvidbh )
|
|
||||||
#define UDF_SB_LVID(X) ( (struct logicalVolIntegrityDesc *)UDF_SB_LVIDBH(X)->b_data )
|
|
||||||
#define UDF_SB_LVIDIU(X) ( (struct logicalVolIntegrityDescImpUse *)&(UDF_SB_LVID(X)->impUse[le32_to_cpu(UDF_SB_LVID(X)->numOfPartitions) * 2 * sizeof(uint32_t)/sizeof(uint8_t)]) )
|
|
||||||
|
|
||||||
#define UDF_SB_UMASK(X) ( UDF_SB(X)->s_umask )
|
|
||||||
#define UDF_SB_GID(X) ( UDF_SB(X)->s_gid )
|
|
||||||
#define UDF_SB_UID(X) ( UDF_SB(X)->s_uid )
|
|
||||||
#define UDF_SB_RECORDTIME(X) ( UDF_SB(X)->s_recordtime )
|
|
||||||
#define UDF_SB_SERIALNUM(X) ( UDF_SB(X)->s_serialnum )
|
|
||||||
#define UDF_SB_UDFREV(X) ( UDF_SB(X)->s_udfrev )
|
|
||||||
#define UDF_SB_FLAGS(X) ( UDF_SB(X)->s_flags )
|
|
||||||
#define UDF_SB_VAT(X) ( UDF_SB(X)->s_vat )
|
|
||||||
|
|
||||||
#endif /* __LINUX_UDF_SB_H */
|
#endif /* __LINUX_UDF_SB_H */
|
||||||
|
@ -75,7 +75,7 @@ struct udf_part_map
|
|||||||
struct udf_sb_info
|
struct udf_sb_info
|
||||||
{
|
{
|
||||||
struct udf_part_map *s_partmaps;
|
struct udf_part_map *s_partmaps;
|
||||||
__u8 s_volident[32];
|
__u8 s_volume_ident[32];
|
||||||
|
|
||||||
/* Overall info */
|
/* Overall info */
|
||||||
__u16 s_partitions;
|
__u16 s_partitions;
|
||||||
@ -84,9 +84,9 @@ struct udf_sb_info
|
|||||||
/* Sector headers */
|
/* Sector headers */
|
||||||
__s32 s_session;
|
__s32 s_session;
|
||||||
__u32 s_anchor[4];
|
__u32 s_anchor[4];
|
||||||
__u32 s_lastblock;
|
__u32 s_last_block;
|
||||||
|
|
||||||
struct buffer_head *s_lvidbh;
|
struct buffer_head *s_lvid_bh;
|
||||||
|
|
||||||
/* Default permissions */
|
/* Default permissions */
|
||||||
mode_t s_umask;
|
mode_t s_umask;
|
||||||
@ -94,10 +94,10 @@ struct udf_sb_info
|
|||||||
uid_t s_uid;
|
uid_t s_uid;
|
||||||
|
|
||||||
/* Root Info */
|
/* Root Info */
|
||||||
struct timespec s_recordtime;
|
struct timespec s_record_time;
|
||||||
|
|
||||||
/* Fileset Info */
|
/* Fileset Info */
|
||||||
__u16 s_serialnum;
|
__u16 s_serial_number;
|
||||||
|
|
||||||
/* highest UDF revision we have recorded to this media */
|
/* highest UDF revision we have recorded to this media */
|
||||||
__u16 s_udfrev;
|
__u16 s_udfrev;
|
||||||
@ -109,7 +109,7 @@ struct udf_sb_info
|
|||||||
struct nls_table *s_nls_map;
|
struct nls_table *s_nls_map;
|
||||||
|
|
||||||
/* VAT inode */
|
/* VAT inode */
|
||||||
struct inode *s_vat;
|
struct inode *s_vat_inode;
|
||||||
|
|
||||||
struct mutex s_alloc_mutex;
|
struct mutex s_alloc_mutex;
|
||||||
};
|
};
|
||||||
|
Reference in New Issue
Block a user