Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: dm crypt: wait for endio to complete before destruction dm crypt: fix kcryptd_async_done parameter dm io: respect BIO_MAX_PAGES limit dm table: rework reference counting fix dm ioctl: validate name length when renaming
This commit is contained in:
@@ -60,6 +60,7 @@ struct dm_crypt_io {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct dm_crypt_request {
|
struct dm_crypt_request {
|
||||||
|
struct convert_context *ctx;
|
||||||
struct scatterlist sg_in;
|
struct scatterlist sg_in;
|
||||||
struct scatterlist sg_out;
|
struct scatterlist sg_out;
|
||||||
};
|
};
|
||||||
@@ -335,6 +336,18 @@ static void crypt_convert_init(struct crypt_config *cc,
|
|||||||
init_completion(&ctx->restart);
|
init_completion(&ctx->restart);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
|
||||||
|
struct ablkcipher_request *req)
|
||||||
|
{
|
||||||
|
return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
|
||||||
|
struct dm_crypt_request *dmreq)
|
||||||
|
{
|
||||||
|
return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
|
||||||
|
}
|
||||||
|
|
||||||
static int crypt_convert_block(struct crypt_config *cc,
|
static int crypt_convert_block(struct crypt_config *cc,
|
||||||
struct convert_context *ctx,
|
struct convert_context *ctx,
|
||||||
struct ablkcipher_request *req)
|
struct ablkcipher_request *req)
|
||||||
@@ -345,10 +358,11 @@ static int crypt_convert_block(struct crypt_config *cc,
|
|||||||
u8 *iv;
|
u8 *iv;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
|
dmreq = dmreq_of_req(cc, req);
|
||||||
iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
|
iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
|
||||||
crypto_ablkcipher_alignmask(cc->tfm) + 1);
|
crypto_ablkcipher_alignmask(cc->tfm) + 1);
|
||||||
|
|
||||||
|
dmreq->ctx = ctx;
|
||||||
sg_init_table(&dmreq->sg_in, 1);
|
sg_init_table(&dmreq->sg_in, 1);
|
||||||
sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
|
sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
|
||||||
bv_in->bv_offset + ctx->offset_in);
|
bv_in->bv_offset + ctx->offset_in);
|
||||||
@@ -395,8 +409,9 @@ static void crypt_alloc_req(struct crypt_config *cc,
|
|||||||
cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
||||||
ablkcipher_request_set_tfm(cc->req, cc->tfm);
|
ablkcipher_request_set_tfm(cc->req, cc->tfm);
|
||||||
ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||||
kcryptd_async_done, ctx);
|
kcryptd_async_done,
|
||||||
|
dmreq_of_req(cc, cc->req));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -553,19 +568,22 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
|
|||||||
static void crypt_dec_pending(struct dm_crypt_io *io)
|
static void crypt_dec_pending(struct dm_crypt_io *io)
|
||||||
{
|
{
|
||||||
struct crypt_config *cc = io->target->private;
|
struct crypt_config *cc = io->target->private;
|
||||||
|
struct bio *base_bio = io->base_bio;
|
||||||
|
struct dm_crypt_io *base_io = io->base_io;
|
||||||
|
int error = io->error;
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&io->pending))
|
if (!atomic_dec_and_test(&io->pending))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (likely(!io->base_io))
|
|
||||||
bio_endio(io->base_bio, io->error);
|
|
||||||
else {
|
|
||||||
if (io->error && !io->base_io->error)
|
|
||||||
io->base_io->error = io->error;
|
|
||||||
crypt_dec_pending(io->base_io);
|
|
||||||
}
|
|
||||||
|
|
||||||
mempool_free(io, cc->io_pool);
|
mempool_free(io, cc->io_pool);
|
||||||
|
|
||||||
|
if (likely(!base_io))
|
||||||
|
bio_endio(base_bio, error);
|
||||||
|
else {
|
||||||
|
if (error && !base_io->error)
|
||||||
|
base_io->error = error;
|
||||||
|
crypt_dec_pending(base_io);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -821,7 +839,8 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
|
|||||||
static void kcryptd_async_done(struct crypto_async_request *async_req,
|
static void kcryptd_async_done(struct crypto_async_request *async_req,
|
||||||
int error)
|
int error)
|
||||||
{
|
{
|
||||||
struct convert_context *ctx = async_req->data;
|
struct dm_crypt_request *dmreq = async_req->data;
|
||||||
|
struct convert_context *ctx = dmreq->ctx;
|
||||||
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
|
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
|
||||||
struct crypt_config *cc = io->target->private;
|
struct crypt_config *cc = io->target->private;
|
||||||
|
|
||||||
@@ -830,7 +849,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
mempool_free(ablkcipher_request_cast(async_req), cc->req_pool);
|
mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&ctx->pending))
|
if (!atomic_dec_and_test(&ctx->pending))
|
||||||
return;
|
return;
|
||||||
|
@@ -292,6 +292,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
|
|||||||
(PAGE_SIZE >> SECTOR_SHIFT));
|
(PAGE_SIZE >> SECTOR_SHIFT));
|
||||||
num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
|
num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
|
||||||
num_bvecs);
|
num_bvecs);
|
||||||
|
if (unlikely(num_bvecs > BIO_MAX_PAGES))
|
||||||
|
num_bvecs = BIO_MAX_PAGES;
|
||||||
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
|
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
|
||||||
bio->bi_sector = where->sector + (where->count - remaining);
|
bio->bi_sector = where->sector + (where->count - remaining);
|
||||||
bio->bi_bdev = where->bdev;
|
bio->bi_bdev = where->bdev;
|
||||||
|
@@ -704,7 +704,8 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
|
|||||||
char *new_name = (char *) param + param->data_start;
|
char *new_name = (char *) param + param->data_start;
|
||||||
|
|
||||||
if (new_name < param->data ||
|
if (new_name < param->data ||
|
||||||
invalid_str(new_name, (void *) param + param_size)) {
|
invalid_str(new_name, (void *) param + param_size) ||
|
||||||
|
strlen(new_name) > DM_NAME_LEN - 1) {
|
||||||
DMWARN("Invalid new logical volume name supplied.");
|
DMWARN("Invalid new logical volume name supplied.");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -1063,7 +1064,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
|||||||
|
|
||||||
r = populate_table(t, param, param_size);
|
r = populate_table(t, param, param_size);
|
||||||
if (r) {
|
if (r) {
|
||||||
dm_table_put(t);
|
dm_table_destroy(t);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1071,7 +1072,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
|||||||
hc = dm_get_mdptr(md);
|
hc = dm_get_mdptr(md);
|
||||||
if (!hc || hc->md != md) {
|
if (!hc || hc->md != md) {
|
||||||
DMWARN("device has been removed from the dev hash table.");
|
DMWARN("device has been removed from the dev hash table.");
|
||||||
dm_table_put(t);
|
dm_table_destroy(t);
|
||||||
up_write(&_hash_lock);
|
up_write(&_hash_lock);
|
||||||
r = -ENXIO;
|
r = -ENXIO;
|
||||||
goto out;
|
goto out;
|
||||||
|
@@ -525,9 +525,12 @@ static int __noflush_suspending(struct mapped_device *md)
|
|||||||
static void dec_pending(struct dm_io *io, int error)
|
static void dec_pending(struct dm_io *io, int error)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int io_error;
|
||||||
|
struct bio *bio;
|
||||||
|
struct mapped_device *md = io->md;
|
||||||
|
|
||||||
/* Push-back supersedes any I/O errors */
|
/* Push-back supersedes any I/O errors */
|
||||||
if (error && !(io->error > 0 && __noflush_suspending(io->md)))
|
if (error && !(io->error > 0 && __noflush_suspending(md)))
|
||||||
io->error = error;
|
io->error = error;
|
||||||
|
|
||||||
if (atomic_dec_and_test(&io->io_count)) {
|
if (atomic_dec_and_test(&io->io_count)) {
|
||||||
@@ -537,24 +540,27 @@ static void dec_pending(struct dm_io *io, int error)
|
|||||||
* This must be handled before the sleeper on
|
* This must be handled before the sleeper on
|
||||||
* suspend queue merges the pushback list.
|
* suspend queue merges the pushback list.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&io->md->pushback_lock, flags);
|
spin_lock_irqsave(&md->pushback_lock, flags);
|
||||||
if (__noflush_suspending(io->md))
|
if (__noflush_suspending(md))
|
||||||
bio_list_add(&io->md->pushback, io->bio);
|
bio_list_add(&md->pushback, io->bio);
|
||||||
else
|
else
|
||||||
/* noflush suspend was interrupted. */
|
/* noflush suspend was interrupted. */
|
||||||
io->error = -EIO;
|
io->error = -EIO;
|
||||||
spin_unlock_irqrestore(&io->md->pushback_lock, flags);
|
spin_unlock_irqrestore(&md->pushback_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
end_io_acct(io);
|
end_io_acct(io);
|
||||||
|
|
||||||
if (io->error != DM_ENDIO_REQUEUE) {
|
io_error = io->error;
|
||||||
trace_block_bio_complete(io->md->queue, io->bio);
|
bio = io->bio;
|
||||||
|
|
||||||
bio_endio(io->bio, io->error);
|
free_io(md, io);
|
||||||
|
|
||||||
|
if (io_error != DM_ENDIO_REQUEUE) {
|
||||||
|
trace_block_bio_complete(md->queue, bio);
|
||||||
|
|
||||||
|
bio_endio(bio, io_error);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_io(io->md, io);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -562,6 +568,7 @@ static void clone_endio(struct bio *bio, int error)
|
|||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
struct dm_target_io *tio = bio->bi_private;
|
struct dm_target_io *tio = bio->bi_private;
|
||||||
|
struct dm_io *io = tio->io;
|
||||||
struct mapped_device *md = tio->io->md;
|
struct mapped_device *md = tio->io->md;
|
||||||
dm_endio_fn endio = tio->ti->type->end_io;
|
dm_endio_fn endio = tio->ti->type->end_io;
|
||||||
|
|
||||||
@@ -585,15 +592,14 @@ static void clone_endio(struct bio *bio, int error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dec_pending(tio->io, error);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Store md for cleanup instead of tio which is about to get freed.
|
* Store md for cleanup instead of tio which is about to get freed.
|
||||||
*/
|
*/
|
||||||
bio->bi_private = md->bs;
|
bio->bi_private = md->bs;
|
||||||
|
|
||||||
bio_put(bio);
|
|
||||||
free_tio(md, tio);
|
free_tio(md, tio);
|
||||||
|
bio_put(bio);
|
||||||
|
dec_pending(io, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
static sector_t max_io_len(struct mapped_device *md,
|
static sector_t max_io_len(struct mapped_device *md,
|
||||||
|
Reference in New Issue
Block a user