Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx into for-linus

This commit is contained in:
NeilBrown
2009-09-23 18:31:11 +10:00
61 changed files with 9135 additions and 3307 deletions

View File

@ -14,3 +14,12 @@ config ASYNC_MEMSET
tristate
select ASYNC_CORE
config ASYNC_PQ
tristate
select ASYNC_CORE
config ASYNC_RAID6_RECOV
tristate
select ASYNC_CORE
select ASYNC_PQ

View File

@ -2,3 +2,6 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o
obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o
obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o
obj-$(CONFIG_ASYNC_XOR) += async_xor.o
obj-$(CONFIG_ASYNC_PQ) += async_pq.o
obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o
obj-$(CONFIG_ASYNC_RAID6_TEST) += raid6test.o

View File

@ -33,28 +33,31 @@
* async_memcpy - attempt to copy memory with a dma engine.
* @dest: destination page
* @src: src page
* @offset: offset in pages to start transaction
* @dest_offset: offset into 'dest' to start transaction
* @src_offset: offset into 'src' to start transaction
* @len: length in bytes
* @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
* @depend_tx: memcpy depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
* @submit: submission / completion modifiers
*
* honored flags: ASYNC_TX_ACK
*/
struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
unsigned int src_offset, size_t len, enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
unsigned int src_offset, size_t len,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
&dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
if (device) {
if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
dma_addr_t dma_dest, dma_src;
unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
unsigned long dma_prep_flags = 0;
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
DMA_FROM_DEVICE);
@ -67,13 +70,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
async_tx_submit(chan, tx, submit);
} else {
void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
/* wait for any prerequisite operations */
async_tx_quiesce(&depend_tx);
async_tx_quiesce(&submit->depend_tx);
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
src_buf = kmap_atomic(src, KM_USER1) + src_offset;
@ -83,26 +86,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1);
async_tx_sync_epilog(cb_fn, cb_param);
async_tx_sync_epilog(submit);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_memcpy);
static int __init async_memcpy_init(void)
{
return 0;
}
static void __exit async_memcpy_exit(void)
{
do { } while (0);
}
module_init(async_memcpy_init);
module_exit(async_memcpy_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memcpy api");
MODULE_LICENSE("GPL");

View File

@ -35,26 +35,26 @@
* @val: fill value
* @offset: offset in pages to start transaction
* @len: length in bytes
* @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: memset depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
*
* honored flags: ASYNC_TX_ACK
*/
struct dma_async_tx_descriptor *
async_memset(struct page *dest, int val, unsigned int offset,
size_t len, enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
async_memset(struct page *dest, int val, unsigned int offset, size_t len,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET,
&dest, 1, NULL, 0, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
if (device) {
if (device && is_dma_fill_aligned(device, offset, 0, len)) {
dma_addr_t dma_dest;
unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
unsigned long dma_prep_flags = 0;
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
dma_dest = dma_map_page(device->dev, dest, offset, len,
DMA_FROM_DEVICE);
@ -64,38 +64,25 @@ async_memset(struct page *dest, int val, unsigned int offset,
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
async_tx_submit(chan, tx, submit);
} else { /* run the memset synchronously */
void *dest_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
dest_buf = (void *) (((char *) page_address(dest)) + offset);
dest_buf = page_address(dest) + offset;
/* wait for any prerequisite operations */
async_tx_quiesce(&depend_tx);
async_tx_quiesce(&submit->depend_tx);
memset(dest_buf, val, len);
async_tx_sync_epilog(cb_fn, cb_param);
async_tx_sync_epilog(submit);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_memset);
static int __init async_memset_init(void)
{
return 0;
}
static void __exit async_memset_exit(void)
{
do { } while (0);
}
module_init(async_memset_init);
module_exit(async_memset_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memset api");
MODULE_LICENSE("GPL");

395
crypto/async_tx/async_pq.c Normal file
View File

@ -0,0 +1,395 @@
/*
* Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
* Copyright(c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
/**
* scribble - space to hold throwaway P buffer for synchronous gen_syndrome
*/
static struct page *scribble;
static bool is_raid6_zero_block(struct page *p)
{
return p == (void *) raid6_empty_zero_page;
}
/* the struct page *blocks[] parameter passed to async_gen_syndrome()
* and async_syndrome_val() contains the 'P' destination address at
* blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
*
* note: these are macros as they are used as lvalues
*/
#define P(b, d) (b[d-2])
#define Q(b, d) (b[d-1])
/**
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
const unsigned char *scfs, unsigned int offset, int disks,
size_t len, dma_addr_t *dma_src,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct dma_device *dma = chan->device;
enum dma_ctrl_flags dma_flags = 0;
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
dma_async_tx_callback cb_param_orig = submit->cb_param;
int src_cnt = disks - 2;
unsigned char coefs[src_cnt];
unsigned short pq_src_cnt;
dma_addr_t dma_dest[2];
int src_off = 0;
int idx;
int i;
/* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
if (P(blocks, disks))
dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
len, DMA_BIDIRECTIONAL);
else
dma_flags |= DMA_PREP_PQ_DISABLE_P;
if (Q(blocks, disks))
dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
len, DMA_BIDIRECTIONAL);
else
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
/* convert source addresses being careful to collapse 'empty'
* sources and update the coefficients accordingly
*/
for (i = 0, idx = 0; i < src_cnt; i++) {
if (is_raid6_zero_block(blocks[i]))
continue;
dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
DMA_TO_DEVICE);
coefs[idx] = scfs[i];
idx++;
}
src_cnt = idx;
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
/* if we are submitting additional pqs, leave the chain open,
* clear the callback parameters, and leave the destination
* buffers mapped
*/
if (src_cnt > pq_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
/* Since we have clobbered the src_list we are committed
* to doing this asynchronously. Drivers force forward
* progress in case they can not provide a descriptor
*/
for (;;) {
tx = dma->device_prep_dma_pq(chan, dma_dest,
&dma_src[src_off],
pq_src_cnt,
&coefs[src_off], len,
dma_flags);
if (likely(tx))
break;
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
/* drop completed sources */
src_cnt -= pq_src_cnt;
src_off += pq_src_cnt;
dma_flags |= DMA_PREP_CONTINUE;
}
return tx;
}
/**
* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
*/
static void
do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
size_t len, struct async_submit_ctl *submit)
{
void **srcs;
int i;
if (submit->scribble)
srcs = submit->scribble;
else
srcs = (void **) blocks;
for (i = 0; i < disks; i++) {
if (is_raid6_zero_block(blocks[i])) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
srcs[i] = blocks[i];
} else
srcs[i] = page_address(blocks[i]) + offset;
}
raid6_call.gen_syndrome(disks, len, srcs);
async_tx_sync_epilog(submit);
}
/**
* async_gen_syndrome - asynchronously calculate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
* @offset: common offset into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @submit: submission/completion modifiers
*
* General note: This routine assumes a field of GF(2^8) with a
* primitive polynomial of 0x11d and a generator of {02}.
*
* 'disks' note: callers can optionally omit either P or Q (but not
* both) from the calculation by setting blocks[disks-2] or
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
* PAGE_SIZE as a temporary buffer of this size is used in the
* synchronous path. 'disks' always accounts for both destination
* buffers.
*
* 'blocks' note: if submit->scribble is NULL then the contents of
* 'blocks' may be overridden
*/
struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
size_t len, struct async_submit_ctl *submit)
{
int src_cnt = disks - 2;
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&P(blocks, disks), 2,
blocks, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
dma_addr_t *dma_src = NULL;
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) blocks;
if (dma_src && device &&
(src_cnt <= dma_maxpq(device, 0) ||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
/* run the p+q asynchronously */
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
disks, len, dma_src, submit);
}
/* run the pq synchronously */
pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
if (!P(blocks, disks)) {
P(blocks, disks) = scribble;
BUG_ON(len + offset > PAGE_SIZE);
}
if (!Q(blocks, disks)) {
Q(blocks, disks) = scribble;
BUG_ON(len + offset > PAGE_SIZE);
}
do_sync_gen_syndrome(blocks, offset, disks, len, submit);
return NULL;
}
EXPORT_SYMBOL_GPL(async_gen_syndrome);
/**
* async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
* @offset: common offset into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
* @spare: temporary result buffer for the synchronous case
* @submit: submission / completion modifiers
*
* The same notes from async_gen_syndrome apply to the 'blocks',
* and 'disks' parameters of this routine. The synchronous path
* requires a temporary result buffer and submit->scribble to be
* specified.
*/
struct dma_async_tx_descriptor *
async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
size_t len, enum sum_check_flags *pqres, struct page *spare,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL,
NULL, 0, blocks, disks,
len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
dma_addr_t *dma_src = NULL;
BUG_ON(disks < 4);
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) blocks;
if (dma_src && device && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
struct device *dev = device->dev;
dma_addr_t *pq = &dma_src[disks-2];
int i;
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
if (!P(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_P;
if (!Q(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
for (i = 0; i < disks; i++)
if (likely(blocks[i])) {
BUG_ON(is_raid6_zero_block(blocks[i]));
dma_src[i] = dma_map_page(dev, blocks[i],
offset, len,
DMA_TO_DEVICE);
}
for (;;) {
tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
disks - 2,
raid6_gfexp,
len, pqres,
dma_flags);
if (likely(tx))
break;
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
async_tx_submit(chan, tx, submit);
return tx;
} else {
struct page *p_src = P(blocks, disks);
struct page *q_src = Q(blocks, disks);
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *scribble = submit->scribble;
void *cb_param_orig = submit->cb_param;
void *p, *q, *s;
pr_debug("%s: (sync) disks: %d len: %zu\n",
__func__, disks, len);
/* caller must provide a temporary result buffer and
* allow the input parameters to be preserved
*/
BUG_ON(!spare || !scribble);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
/* recompute p and/or q into the temporary buffer and then
* check to see the result matches the current value
*/
tx = NULL;
*pqres = 0;
if (p_src) {
init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
NULL, NULL, scribble);
tx = async_xor(spare, blocks, offset, disks-2, len, submit);
async_tx_quiesce(&tx);
p = page_address(p_src) + offset;
s = page_address(spare) + offset;
*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
}
if (q_src) {
P(blocks, disks) = NULL;
Q(blocks, disks) = spare;
init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
tx = async_gen_syndrome(blocks, offset, disks, len, submit);
async_tx_quiesce(&tx);
q = page_address(q_src) + offset;
s = page_address(spare) + offset;
*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
}
/* restore P, Q and submit */
P(blocks, disks) = p_src;
Q(blocks, disks) = q_src;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
submit->flags = flags_orig;
async_tx_sync_epilog(submit);
return NULL;
}
}
EXPORT_SYMBOL_GPL(async_syndrome_val);
static int __init async_pq_init(void)
{
scribble = alloc_page(GFP_KERNEL);
if (scribble)
return 0;
pr_err("%s: failed to allocate required spare page\n", __func__);
return -ENOMEM;
}
static void __exit async_pq_exit(void)
{
put_page(scribble);
}
module_init(async_pq_init);
module_exit(async_pq_exit);
MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,468 @@
/*
* Asynchronous RAID-6 recovery calculations ASYNC_TX API.
* Copyright(c) 2009 Intel Corporation
*
* based on raid6recov.c:
* Copyright 2002 H. Peter Anvin
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
static struct dma_async_tx_descriptor *
async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
size_t len, struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, srcs, 2, len);
struct dma_device *dma = chan ? chan->device : NULL;
const u8 *amul, *bmul;
u8 ax, bx;
u8 *a, *b, *c;
if (dma) {
dma_addr_t dma_dest[2];
dma_addr_t dma_src[2];
struct device *dev = dma->dev;
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
len, dma_flags);
if (tx) {
async_tx_submit(chan, tx, submit);
return tx;
}
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
}
/* run the operation synchronously */
async_tx_quiesce(&submit->depend_tx);
amul = raid6_gfmul[coef[0]];
bmul = raid6_gfmul[coef[1]];
a = page_address(srcs[0]);
b = page_address(srcs[1]);
c = page_address(dest);
while (len--) {
ax = amul[*a++];
bx = bmul[*b++];
*c++ = ax ^ bx;
}
return NULL;
}
static struct dma_async_tx_descriptor *
async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, &src, 1, len);
struct dma_device *dma = chan ? chan->device : NULL;
const u8 *qmul; /* Q multiplier table */
u8 *d, *s;
if (dma) {
dma_addr_t dma_dest[2];
dma_addr_t dma_src[1];
struct device *dev = dma->dev;
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
len, dma_flags);
if (tx) {
async_tx_submit(chan, tx, submit);
return tx;
}
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
}
/* no channel available, or failed to allocate a descriptor, so
* perform the operation synchronously
*/
async_tx_quiesce(&submit->depend_tx);
qmul = raid6_gfmul[coef];
d = page_address(dest);
s = page_address(src);
while (len--)
*d++ = qmul[*s++];
return NULL;
}
static struct dma_async_tx_descriptor *
__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *a, *b;
struct page *srcs[2];
unsigned char coef[2];
enum async_tx_flags flags = submit->flags;
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
p = blocks[4-2];
q = blocks[4-1];
a = blocks[faila];
b = blocks[failb];
/* in the 4 disk case P + Pxy == P and Q + Qxy == Q */
/* Dx = A*(P+Pxy) + B*(Q+Qxy) */
srcs[0] = p;
srcs[1] = q;
coef[0] = raid6_gfexi[failb-faila];
coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_sum_product(b, srcs, coef, bytes, submit);
/* Dy = P+Pxy+Dx */
srcs[0] = p;
srcs[1] = b;
init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
cb_param, scribble);
tx = async_xor(a, srcs, 0, 2, bytes, submit);
return tx;
}
static struct dma_async_tx_descriptor *
__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *g, *dp, *dq;
struct page *srcs[2];
unsigned char coef[2];
enum async_tx_flags flags = submit->flags;
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
int uninitialized_var(good);
int i;
for (i = 0; i < 3; i++) {
if (i == faila || i == failb)
continue;
else {
good = i;
break;
}
}
BUG_ON(i >= 3);
p = blocks[5-2];
q = blocks[5-1];
g = blocks[good];
/* Compute syndrome with zero for the missing data pages
* Use the dead data pages as temporary storage for delta p and
* delta q
*/
dp = blocks[faila];
dq = blocks[failb];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_memcpy(dp, g, 0, 0, bytes, submit);
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
/* compute P + Pxy */
srcs[0] = dp;
srcs[1] = p;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor(dp, srcs, 0, 2, bytes, submit);
/* compute Q + Qxy */
srcs[0] = dq;
srcs[1] = q;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor(dq, srcs, 0, 2, bytes, submit);
/* Dx = A*(P+Pxy) + B*(Q+Qxy) */
srcs[0] = dp;
srcs[1] = dq;
coef[0] = raid6_gfexi[failb-faila];
coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_sum_product(dq, srcs, coef, bytes, submit);
/* Dy = P+Pxy+Dx */
srcs[0] = dp;
srcs[1] = dq;
init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
cb_param, scribble);
tx = async_xor(dp, srcs, 0, 2, bytes, submit);
return tx;
}
static struct dma_async_tx_descriptor *
__2data_recov_n(int disks, size_t bytes, int faila, int failb,
struct page **blocks, struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *dp, *dq;
struct page *srcs[2];
unsigned char coef[2];
enum async_tx_flags flags = submit->flags;
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
p = blocks[disks-2];
q = blocks[disks-1];
/* Compute syndrome with zero for the missing data pages
* Use the dead data pages as temporary storage for
* delta p and delta q
*/
dp = blocks[faila];
blocks[faila] = (void *)raid6_empty_zero_page;
blocks[disks-2] = dp;
dq = blocks[failb];
blocks[failb] = (void *)raid6_empty_zero_page;
blocks[disks-1] = dq;
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
/* Restore pointer table */
blocks[faila] = dp;
blocks[failb] = dq;
blocks[disks-2] = p;
blocks[disks-1] = q;
/* compute P + Pxy */
srcs[0] = dp;
srcs[1] = p;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor(dp, srcs, 0, 2, bytes, submit);
/* compute Q + Qxy */
srcs[0] = dq;
srcs[1] = q;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor(dq, srcs, 0, 2, bytes, submit);
/* Dx = A*(P+Pxy) + B*(Q+Qxy) */
srcs[0] = dp;
srcs[1] = dq;
coef[0] = raid6_gfexi[failb-faila];
coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_sum_product(dq, srcs, coef, bytes, submit);
/* Dy = P+Pxy+Dx */
srcs[0] = dp;
srcs[1] = dq;
init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
cb_param, scribble);
tx = async_xor(dp, srcs, 0, 2, bytes, submit);
return tx;
}
/**
* async_raid6_2data_recov - asynchronously calculate two missing data blocks
* @disks: number of disks in the RAID-6 array
* @bytes: block size
* @faila: first failed drive index
* @failb: second failed drive index
* @blocks: array of source pointers where the last two entries are p and q
* @submit: submission/completion modifiers
*/
struct dma_async_tx_descriptor *
async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
struct page **blocks, struct async_submit_ctl *submit)
{
BUG_ON(faila == failb);
if (failb < faila)
swap(faila, failb);
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
/* we need to preserve the contents of 'blocks' for the async
* case, so punt to synchronous if a scribble buffer is not available
*/
if (!submit->scribble) {
void **ptrs = (void **) blocks;
int i;
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
ptrs[i] = page_address(blocks[i]);
raid6_2data_recov(disks, bytes, faila, failb, ptrs);
async_tx_sync_epilog(submit);
return NULL;
}
switch (disks) {
case 4:
/* dma devices do not uniformly understand a zero source pq
* operation (in contrast to the synchronous case), so
* explicitly handle the 4 disk special case
*/
return __2data_recov_4(bytes, faila, failb, blocks, submit);
case 5:
/* dma devices do not uniformly understand a single
* source pq operation (in contrast to the synchronous
* case), so explicitly handle the 5 disk special case
*/
return __2data_recov_5(bytes, faila, failb, blocks, submit);
default:
return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
}
}
EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
/**
* async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
* @disks: number of disks in the RAID-6 array
* @bytes: block size
* @faila: failed drive index
* @blocks: array of source pointers where the last two entries are p and q
* @submit: submission/completion modifiers
*/
struct dma_async_tx_descriptor *
async_raid6_datap_recov(int disks, size_t bytes, int faila,
struct page **blocks, struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *dq;
u8 coef;
enum async_tx_flags flags = submit->flags;
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
struct page *srcs[2];
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
/* we need to preserve the contents of 'blocks' for the async
* case, so punt to synchronous if a scribble buffer is not available
*/
if (!scribble) {
void **ptrs = (void **) blocks;
int i;
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
ptrs[i] = page_address(blocks[i]);
raid6_datap_recov(disks, bytes, faila, ptrs);
async_tx_sync_epilog(submit);
return NULL;
}
p = blocks[disks-2];
q = blocks[disks-1];
/* Compute syndrome with zero for the missing data page
* Use the dead data page as temporary storage for delta q
*/
dq = blocks[faila];
blocks[faila] = (void *)raid6_empty_zero_page;
blocks[disks-1] = dq;
/* in the 4 disk case we only need to perform a single source
* multiplication
*/
if (disks == 4) {
int good = faila == 0 ? 1 : 0;
struct page *g = blocks[good];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
scribble);
tx = async_memcpy(p, g, 0, 0, bytes, submit);
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
scribble);
tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
} else {
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
scribble);
tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
}
/* Restore pointer table */
blocks[faila] = dq;
blocks[disks-1] = q;
/* calculate g^{-faila} */
coef = raid6_gfinv[raid6_gfexp[faila]];
srcs[0] = dq;
srcs[1] = q;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor(dq, srcs, 0, 2, bytes, submit);
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_mult(dq, dq, coef, bytes, submit);
srcs[0] = p;
srcs[1] = dq;
init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
cb_param, scribble);
tx = async_xor(p, srcs, 0, 2, bytes, submit);
return tx;
}
EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
MODULE_LICENSE("GPL");

View File

@ -42,16 +42,21 @@ static void __exit async_tx_exit(void)
async_dmaengine_put();
}
module_init(async_tx_init);
module_exit(async_tx_exit);
/**
* __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously
* @depend_tx: transaction dependency
* @submit: transaction dependency and submission modifiers
* @tx_type: transaction type
*/
struct dma_chan *
__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
enum dma_transaction_type tx_type)
__async_tx_find_channel(struct async_submit_ctl *submit,
enum dma_transaction_type tx_type)
{
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
/* see if we can keep the chain on one channel */
if (depend_tx &&
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
@ -59,17 +64,6 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
return async_dma_find_channel(tx_type);
}
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#else
static int __init async_tx_init(void)
{
printk(KERN_INFO "async_tx: api initialized (sync-only)\n");
return 0;
}
static void __exit async_tx_exit(void)
{
do { } while (0);
}
#endif
@ -83,10 +77,14 @@ static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_async_tx_descriptor *tx)
{
struct dma_chan *chan;
struct dma_device *device;
struct dma_chan *chan = depend_tx->chan;
struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
BUG();
#endif
/* first check to see if we can still append to depend_tx */
spin_lock_bh(&depend_tx->lock);
if (depend_tx->parent && depend_tx->chan == tx->chan) {
@ -96,11 +94,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
}
spin_unlock_bh(&depend_tx->lock);
if (!intr_tx)
/* attached dependency, flush the parent channel */
if (!intr_tx) {
device->device_issue_pending(chan);
return;
chan = depend_tx->chan;
device = chan->device;
}
/* see if we can schedule an interrupt
* otherwise poll for completion
@ -134,6 +132,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx);
}
device->device_issue_pending(chan);
} else {
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for depend_tx\n",
@ -144,13 +143,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
/**
* submit_disposition - while holding depend_tx->lock we must avoid submitting
* new operations to prevent a circular locking dependency with
* drivers that already hold a channel lock when calling
* async_tx_run_dependencies.
* submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
*
* while holding depend_tx->lock we must avoid submitting new operations
* to prevent a circular locking dependency with drivers that already
* hold a channel lock when calling async_tx_run_dependencies.
*/
enum submit_disposition {
ASYNC_TX_SUBMITTED,
@ -160,11 +160,12 @@ enum submit_disposition {
void
async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
struct async_submit_ctl *submit)
{
tx->callback = cb_fn;
tx->callback_param = cb_param;
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
tx->callback = submit->cb_fn;
tx->callback_param = submit->cb_param;
if (depend_tx) {
enum submit_disposition s;
@ -220,30 +221,29 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
tx->tx_submit(tx);
}
if (flags & ASYNC_TX_ACK)
if (submit->flags & ASYNC_TX_ACK)
async_tx_ack(tx);
if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
if (depend_tx)
async_tx_ack(depend_tx);
}
EXPORT_SYMBOL_GPL(async_tx_submit);
/**
* async_trigger_callback - schedules the callback function to be run after
* any dependent operations have been completed.
* @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: 'callback' requires the completion of this transaction
* @cb_fn: function to call after depend_tx completes
* @cb_param: parameter to pass to the callback routine
* async_trigger_callback - schedules the callback function to be run
* @submit: submission and completion parameters
*
* honored flags: ASYNC_TX_ACK
*
* The callback is run after any dependent operations have completed.
*/
struct dma_async_tx_descriptor *
async_trigger_callback(enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
async_trigger_callback(struct async_submit_ctl *submit)
{
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *tx;
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
if (depend_tx) {
chan = depend_tx->chan;
@ -262,14 +262,14 @@ async_trigger_callback(enum async_tx_flags flags,
if (tx) {
pr_debug("%s: (async)\n", __func__);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
async_tx_submit(chan, tx, submit);
} else {
pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */
async_tx_quiesce(&depend_tx);
async_tx_quiesce(&submit->depend_tx);
async_tx_sync_epilog(cb_fn, cb_param);
async_tx_sync_epilog(submit);
}
return tx;
@ -295,9 +295,6 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
}
EXPORT_SYMBOL_GPL(async_tx_quiesce);
module_init(async_tx_init);
module_exit(async_tx_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
MODULE_LICENSE("GPL");

View File

@ -33,19 +33,16 @@
/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
unsigned int offset, int src_cnt, size_t len,
enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
struct async_submit_ctl *submit)
{
struct dma_device *dma = chan->device;
dma_addr_t *dma_src = (dma_addr_t *) src_list;
struct dma_async_tx_descriptor *tx = NULL;
int src_off = 0;
int i;
dma_async_tx_callback _cb_fn;
void *_cb_param;
enum async_tx_flags async_flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags;
enum dma_ctrl_flags dma_flags;
int xor_src_cnt;
dma_addr_t dma_dest;
@ -63,25 +60,27 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
}
while (src_cnt) {
async_flags = flags;
submit->flags = flags_orig;
dma_flags = 0;
xor_src_cnt = min(src_cnt, dma->max_xor);
xor_src_cnt = min(src_cnt, (int)dma->max_xor);
/* if we are submitting additional xors, leave the chain open,
* clear the callback parameters, and leave the destination
* buffer mapped
*/
if (src_cnt > xor_src_cnt) {
async_flags &= ~ASYNC_TX_ACK;
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
_cb_fn = NULL;
_cb_param = NULL;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
_cb_fn = cb_fn;
_cb_param = cb_param;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
}
if (_cb_fn)
if (submit->cb_fn)
dma_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
/* Since we have clobbered the src_list we are committed
* to doing this asynchronously. Drivers force forward progress
* in case they can not provide a descriptor
@ -90,7 +89,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
xor_src_cnt, len, dma_flags);
if (unlikely(!tx))
async_tx_quiesce(&depend_tx);
async_tx_quiesce(&submit->depend_tx);
/* spin wait for the preceeding transactions to complete */
while (unlikely(!tx)) {
@ -101,11 +100,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
dma_flags);
}
async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
_cb_param);
depend_tx = tx;
flags |= ASYNC_TX_DEP_ACK;
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
if (src_cnt > xor_src_cnt) {
/* drop completed sources */
@ -124,23 +120,27 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum async_tx_flags flags,
dma_async_tx_callback cb_fn, void *cb_param)
int src_cnt, size_t len, struct async_submit_ctl *submit)
{
int i;
int xor_src_cnt;
int src_off = 0;
void *dest_buf;
void **srcs = (void **) src_list;
void **srcs;
/* reuse the 'src_list' array to convert to buffer pointers */
if (submit->scribble)
srcs = submit->scribble;
else
srcs = (void **) src_list;
/* convert to buffer pointers */
for (i = 0; i < src_cnt; i++)
srcs[i] = page_address(src_list[i]) + offset;
/* set destination address */
dest_buf = page_address(dest) + offset;
if (flags & ASYNC_TX_XOR_ZERO_DST)
if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
memset(dest_buf, 0, len);
while (src_cnt > 0) {
@ -153,61 +153,70 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
src_off += xor_src_cnt;
}
async_tx_sync_epilog(cb_fn, cb_param);
async_tx_sync_epilog(submit);
}
/**
* async_xor - attempt to xor a set of blocks with a dma engine.
* xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
* flag must be set to not include dest data in the calculation. The
* assumption with dma eninges is that they only use the destination
* buffer as a source when it is explicity specified in the source list.
* @dest: destination page
* @src_list: array of source pages (if the dest is also a source it must be
* at index zero). The contents of this array may be overwritten.
* @offset: offset in pages to start transaction
* @src_list: array of source pages
* @offset: common src/dst offset to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
* @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
* ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine
* @submit: submission / completion modifiers
*
* honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
*
* xor_blocks always uses the dest as a source so the
* ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
* the calculation. The assumption with dma eninges is that they only
* use the destination buffer as a source when it is explicity specified
* in the source list.
*
* src_list note: if the dest is also a source it must be at index zero.
* The contents of this array will be overwritten if a scribble region
* is not specified.
*/
struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
int src_cnt, size_t len, struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
dma_addr_t *dma_src = NULL;
BUG_ON(src_cnt <= 1);
if (chan) {
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) src_list;
if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
/* run the xor asynchronously */
pr_debug("%s (async): len: %zu\n", __func__, len);
return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
flags, depend_tx, cb_fn, cb_param);
dma_src, submit);
} else {
/* run the xor synchronously */
pr_debug("%s (sync): len: %zu\n", __func__, len);
WARN_ONCE(chan, "%s: no space for dma address conversion\n",
__func__);
/* in the sync case the dest is an implied source
* (assumes the dest is the first source)
*/
if (flags & ASYNC_TX_XOR_DROP_DST) {
if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
src_cnt--;
src_list++;
}
/* wait for any prerequisite operations */
async_tx_quiesce(&depend_tx);
async_tx_quiesce(&submit->depend_tx);
do_sync_xor(dest, src_list, offset, src_cnt, len,
flags, cb_fn, cb_param);
do_sync_xor(dest, src_list, offset, src_cnt, len, submit);
return NULL;
}
@ -222,104 +231,94 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
}
/**
* async_xor_zero_sum - attempt a xor parity check with a dma engine.
* async_xor_val - attempt a xor parity check with a dma engine.
* @dest: destination page used if the xor is performed synchronously
* @src_list: array of source pages. The dest page must be listed as a source
* at index zero. The contents of this array may be overwritten.
* @src_list: array of source pages
* @offset: offset in pages to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
* @result: 0 if sum == 0 else non-zero
* @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine
* @submit: submission / completion modifiers
*
* honored flags: ASYNC_TX_ACK
*
* src_list note: if the dest is also a source it must be at index zero.
* The contents of this array will be overwritten if a scribble region
* is not specified.
*/
struct dma_async_tx_descriptor *
async_xor_zero_sum(struct page *dest, struct page **src_list,
unsigned int offset, int src_cnt, size_t len,
u32 *result, enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum sum_check_flags *result,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
&dest, 1, src_list,
src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t *dma_src = NULL;
BUG_ON(src_cnt <= 1);
if (device && src_cnt <= device->max_xor) {
dma_addr_t *dma_src = (dma_addr_t *) src_list;
unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) src_list;
if (dma_src && device && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) {
unsigned long dma_prep_flags = 0;
int i;
pr_debug("%s: (async) len: %zu\n", __func__, len);
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
for (i = 0; i < src_cnt; i++)
dma_src[i] = dma_map_page(device->dev, src_list[i],
offset, len, DMA_TO_DEVICE);
tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
len, result,
dma_prep_flags);
tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
len, result,
dma_prep_flags);
if (unlikely(!tx)) {
async_tx_quiesce(&depend_tx);
async_tx_quiesce(&submit->depend_tx);
while (!tx) {
dma_async_issue_pending(chan);
tx = device->device_prep_dma_zero_sum(chan,
tx = device->device_prep_dma_xor_val(chan,
dma_src, src_cnt, len, result,
dma_prep_flags);
}
}
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
async_tx_submit(chan, tx, submit);
} else {
unsigned long xor_flags = flags;
enum async_tx_flags flags_orig = submit->flags;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
WARN_ONCE(device && src_cnt <= device->max_xor,
"%s: no space for dma address conversion\n",
__func__);
xor_flags |= ASYNC_TX_XOR_DROP_DST;
xor_flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_XOR_DROP_DST;
submit->flags &= ~ASYNC_TX_ACK;
tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
depend_tx, NULL, NULL);
tx = async_xor(dest, src_list, offset, src_cnt, len, submit);
async_tx_quiesce(&tx);
*result = page_is_zero(dest, offset, len) ? 0 : 1;
*result = !page_is_zero(dest, offset, len) << SUM_CHECK_P;
async_tx_sync_epilog(cb_fn, cb_param);
async_tx_sync_epilog(submit);
submit->flags = flags_orig;
}
return tx;
}
EXPORT_SYMBOL_GPL(async_xor_zero_sum);
static int __init async_xor_init(void)
{
#ifdef CONFIG_ASYNC_TX_DMA
/* To conserve stack space the input src_list (array of page pointers)
* is reused to hold the array of dma addresses passed to the driver.
* This conversion is only possible when dma_addr_t is less than the
* the size of a pointer. HIGHMEM64G is known to violate this
* assumption.
*/
BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
#endif
return 0;
}
static void __exit async_xor_exit(void)
{
do { } while (0);
}
module_init(async_xor_init);
module_exit(async_xor_exit);
EXPORT_SYMBOL_GPL(async_xor_val);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");

240
crypto/async_tx/raid6test.c Normal file
View File

@ -0,0 +1,240 @@
/*
* asynchronous raid6 recovery self test
* Copyright (c) 2009, Intel Corporation.
*
* based on drivers/md/raid6test/test.c:
* Copyright 2002-2007 H. Peter Anvin
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/async_tx.h>
#include <linux/random.h>
#undef pr
#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
#define NDISKS 16 /* Including P and Q */
static struct page *dataptrs[NDISKS];
static addr_conv_t addr_conv[NDISKS];
static struct page *data[NDISKS+3];
static struct page *spare;
static struct page *recovi;
static struct page *recovj;
static void callback(void *param)
{
struct completion *cmp = param;
complete(cmp);
}
static void makedata(int disks)
{
int i, j;
for (i = 0; i < disks; i++) {
for (j = 0; j < PAGE_SIZE/sizeof(u32); j += sizeof(u32)) {
u32 *p = page_address(data[i]) + j;
*p = random32();
}
dataptrs[i] = data[i];
}
}
static char disk_type(int d, int disks)
{
if (d == disks - 2)
return 'P';
else if (d == disks - 1)
return 'Q';
else
return 'D';
}
/* Recover two failed blocks. */
static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs)
{
struct async_submit_ctl submit;
struct completion cmp;
struct dma_async_tx_descriptor *tx = NULL;
enum sum_check_flags result = ~0;
if (faila > failb)
swap(faila, failb);
if (failb == disks-1) {
if (faila == disks-2) {
/* P+Q failure. Just rebuild the syndrome. */
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
} else {
struct page *blocks[disks];
struct page *dest;
int count = 0;
int i;
/* data+Q failure. Reconstruct data from P,
* then rebuild syndrome
*/
for (i = disks; i-- ; ) {
if (i == faila || i == failb)
continue;
blocks[count++] = ptrs[i];
}
dest = ptrs[faila];
init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
NULL, NULL, addr_conv);
tx = async_xor(dest, blocks, 0, count, bytes, &submit);
init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
}
} else {
if (failb == disks-2) {
/* data+P failure. */
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit);
} else {
/* data+data failure. */
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit);
}
}
init_completion(&cmp);
init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv);
tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit);
async_tx_issue_pending(tx);
if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0)
pr("%s: timeout! (faila: %d failb: %d disks: %d)\n",
__func__, faila, failb, disks);
if (result != 0)
pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n",
__func__, faila, failb, result);
}
static int test_disks(int i, int j, int disks)
{
int erra, errb;
memset(page_address(recovi), 0xf0, PAGE_SIZE);
memset(page_address(recovj), 0xba, PAGE_SIZE);
dataptrs[i] = recovi;
dataptrs[j] = recovj;
raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs);
erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE);
errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE);
pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n",
__func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks),
(!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB");
dataptrs[i] = data[i];
dataptrs[j] = data[j];
return erra || errb;
}
static int test(int disks, int *tests)
{
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
struct completion cmp;
int err = 0;
int i, j;
recovi = data[disks];
recovj = data[disks+1];
spare = data[disks+2];
makedata(disks);
/* Nuke syndromes */
memset(page_address(data[disks-2]), 0xee, PAGE_SIZE);
memset(page_address(data[disks-1]), 0xee, PAGE_SIZE);
/* Generate assumed good syndrome */
init_completion(&cmp);
init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv);
tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit);
async_tx_issue_pending(tx);
if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) {
pr("error: initial gen_syndrome(%d) timed out\n", disks);
return 1;
}
pr("testing the %d-disk case...\n", disks);
for (i = 0; i < disks-1; i++)
for (j = i+1; j < disks; j++) {
(*tests)++;
err += test_disks(i, j, disks);
}
return err;
}
static int raid6_test(void)
{
int err = 0;
int tests = 0;
int i;
for (i = 0; i < NDISKS+3; i++) {
data[i] = alloc_page(GFP_KERNEL);
if (!data[i]) {
while (i--)
put_page(data[i]);
return -ENOMEM;
}
}
/* the 4-disk and 5-disk cases are special for the recovery code */
if (NDISKS > 4)
err += test(4, &tests);
if (NDISKS > 5)
err += test(5, &tests);
err += test(NDISKS, &tests);
pr("\n");
pr("complete (%d tests, %d failure%s)\n",
tests, err, err == 1 ? "" : "s");
for (i = 0; i < NDISKS+3; i++)
put_page(data[i]);
return 0;
}
static void raid6_test_exit(void)
{
}
/* when compiled-in wait for drivers to load first (assumes dma drivers
* are also compliled-in)
*/
late_initcall(raid6_test);
module_exit(raid6_test_exit);
MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests");
MODULE_LICENSE("GPL");