ide: call ide_build_sglist() prior to ->dma_setup (v2)
* Re-map sg table if needed in ide_build_sglist(). * Move ide_build_sglist() call from ->dma_setup to its users. * Un-export ide_build_sglist(). v2: * Build fix for CONFIG_BLK_DEV_IDEDMA=n (noticed by Randy Dunlap). There should be no functional changes caused by this patch. Cc: Randy Dunlap <randy.dunlap@oracle.com> Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
This commit is contained in:
@@ -211,21 +211,16 @@ static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
|
|||||||
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
|
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
|
||||||
static int auide_build_dmatable(ide_drive_t *drive)
|
static int auide_build_dmatable(ide_drive_t *drive)
|
||||||
{
|
{
|
||||||
int i, iswrite, count = 0;
|
|
||||||
ide_hwif_t *hwif = drive->hwif;
|
ide_hwif_t *hwif = drive->hwif;
|
||||||
struct request *rq = hwif->rq;
|
struct request *rq = hwif->rq;
|
||||||
_auide_hwif *ahwif = &auide_hwif;
|
_auide_hwif *ahwif = &auide_hwif;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
|
int i = hwif->sg_nents, iswrite, count = 0;
|
||||||
|
|
||||||
iswrite = (rq_data_dir(rq) == WRITE);
|
iswrite = (rq_data_dir(rq) == WRITE);
|
||||||
/* Save for interrupt context */
|
/* Save for interrupt context */
|
||||||
ahwif->drive = drive;
|
ahwif->drive = drive;
|
||||||
|
|
||||||
hwif->sg_nents = i = ide_build_sglist(drive, rq);
|
|
||||||
|
|
||||||
if (!i)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* fill the descriptors */
|
/* fill the descriptors */
|
||||||
sg = hwif->sg_table;
|
sg = hwif->sg_table;
|
||||||
while (i && sg_dma_len(sg)) {
|
while (i && sg_dma_len(sg)) {
|
||||||
|
@@ -325,12 +325,6 @@ static int icside_dma_setup(ide_drive_t *drive)
|
|||||||
*/
|
*/
|
||||||
BUG_ON(dma_channel_active(ec->dma));
|
BUG_ON(dma_channel_active(ec->dma));
|
||||||
|
|
||||||
hwif->sg_nents = ide_build_sglist(drive, rq);
|
|
||||||
if (hwif->sg_nents == 0) {
|
|
||||||
ide_map_sg(drive, rq);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that we have the right interrupt routed.
|
* Ensure that we have the right interrupt routed.
|
||||||
*/
|
*/
|
||||||
|
@@ -631,18 +631,23 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive)
|
|||||||
struct ide_atapi_pc *pc;
|
struct ide_atapi_pc *pc;
|
||||||
ide_hwif_t *hwif = drive->hwif;
|
ide_hwif_t *hwif = drive->hwif;
|
||||||
ide_expiry_t *expiry = NULL;
|
ide_expiry_t *expiry = NULL;
|
||||||
|
struct request *rq = hwif->rq;
|
||||||
unsigned int timeout;
|
unsigned int timeout;
|
||||||
u32 tf_flags;
|
u32 tf_flags;
|
||||||
u16 bcount;
|
u16 bcount;
|
||||||
|
|
||||||
if (dev_is_idecd(drive)) {
|
if (dev_is_idecd(drive)) {
|
||||||
tf_flags = IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL;
|
tf_flags = IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL;
|
||||||
bcount = ide_cd_get_xferlen(hwif->rq);
|
bcount = ide_cd_get_xferlen(rq);
|
||||||
expiry = ide_cd_expiry;
|
expiry = ide_cd_expiry;
|
||||||
timeout = ATAPI_WAIT_PC;
|
timeout = ATAPI_WAIT_PC;
|
||||||
|
|
||||||
if (drive->dma)
|
if (drive->dma) {
|
||||||
drive->dma = !hwif->dma_ops->dma_setup(drive);
|
if (ide_build_sglist(drive, rq))
|
||||||
|
drive->dma = !hwif->dma_ops->dma_setup(drive);
|
||||||
|
else
|
||||||
|
drive->dma = 0;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
pc = drive->pc;
|
pc = drive->pc;
|
||||||
|
|
||||||
@@ -661,8 +666,12 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((pc->flags & PC_FLAG_DMA_OK) &&
|
if ((pc->flags & PC_FLAG_DMA_OK) &&
|
||||||
(drive->dev_flags & IDE_DFLAG_USING_DMA))
|
(drive->dev_flags & IDE_DFLAG_USING_DMA)) {
|
||||||
drive->dma = !hwif->dma_ops->dma_setup(drive);
|
if (ide_build_sglist(drive, rq))
|
||||||
|
drive->dma = !hwif->dma_ops->dma_setup(drive);
|
||||||
|
else
|
||||||
|
drive->dma = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (!drive->dma)
|
if (!drive->dma)
|
||||||
pc->flags &= ~PC_FLAG_DMA_OK;
|
pc->flags &= ~PC_FLAG_DMA_OK;
|
||||||
|
@@ -120,10 +120,6 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq)
|
|||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
|
u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
|
||||||
|
|
||||||
hwif->sg_nents = ide_build_sglist(drive, rq);
|
|
||||||
if (hwif->sg_nents == 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
|
for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
|
||||||
u32 cur_addr, cur_len, xcount, bcount;
|
u32 cur_addr, cur_len, xcount, bcount;
|
||||||
|
|
||||||
|
@@ -138,14 +138,15 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
|
|||||||
hwif->sg_dma_direction = DMA_TO_DEVICE;
|
hwif->sg_dma_direction = DMA_TO_DEVICE;
|
||||||
|
|
||||||
i = dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
|
i = dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
|
||||||
if (i) {
|
if (i == 0)
|
||||||
|
ide_map_sg(drive, rq);
|
||||||
|
else {
|
||||||
hwif->orig_sg_nents = hwif->sg_nents;
|
hwif->orig_sg_nents = hwif->sg_nents;
|
||||||
hwif->sg_nents = i;
|
hwif->sg_nents = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ide_build_sglist);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ide_destroy_dmatable - clean up DMA mapping
|
* ide_destroy_dmatable - clean up DMA mapping
|
||||||
|
@@ -103,6 +103,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
|
|||||||
return ide_started;
|
return ide_started;
|
||||||
default:
|
default:
|
||||||
if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
|
if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
|
||||||
|
ide_build_sglist(drive, hwif->rq) == 0 ||
|
||||||
dma_ops->dma_setup(drive))
|
dma_ops->dma_setup(drive))
|
||||||
return ide_stopped;
|
return ide_stopped;
|
||||||
dma_ops->dma_exec_cmd(drive, tf->command);
|
dma_ops->dma_exec_cmd(drive, tf->command);
|
||||||
|
@@ -1429,10 +1429,10 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
|
|||||||
pmac_ide_hwif_t *pmif =
|
pmac_ide_hwif_t *pmif =
|
||||||
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
|
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
|
||||||
struct dbdma_cmd *table;
|
struct dbdma_cmd *table;
|
||||||
int i, count = 0;
|
|
||||||
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
|
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int wr = (rq_data_dir(rq) == WRITE);
|
int wr = (rq_data_dir(rq) == WRITE);
|
||||||
|
int i = hwif->sg_nents, count = 0;
|
||||||
|
|
||||||
/* DMA table is already aligned */
|
/* DMA table is already aligned */
|
||||||
table = (struct dbdma_cmd *) pmif->dma_table_cpu;
|
table = (struct dbdma_cmd *) pmif->dma_table_cpu;
|
||||||
@@ -1442,11 +1442,6 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
|
|||||||
while (readl(&dma->status) & RUN)
|
while (readl(&dma->status) & RUN)
|
||||||
udelay(1);
|
udelay(1);
|
||||||
|
|
||||||
hwif->sg_nents = i = ide_build_sglist(drive, rq);
|
|
||||||
|
|
||||||
if (!i)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Build DBDMA commands list */
|
/* Build DBDMA commands list */
|
||||||
sg = hwif->sg_table;
|
sg = hwif->sg_table;
|
||||||
while (i && sg_dma_len(sg)) {
|
while (i && sg_dma_len(sg)) {
|
||||||
|
@@ -429,15 +429,9 @@ sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
|
|||||||
{
|
{
|
||||||
ide_hwif_t *hwif = drive->hwif;
|
ide_hwif_t *hwif = drive->hwif;
|
||||||
unsigned int *table = hwif->dmatable_cpu;
|
unsigned int *table = hwif->dmatable_cpu;
|
||||||
unsigned int count = 0, i = 1;
|
unsigned int count = 0, i = hwif->sg_nents;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg = hwif->sg_table;
|
||||||
|
|
||||||
hwif->sg_nents = i = ide_build_sglist(drive, rq);
|
|
||||||
|
|
||||||
if (!i)
|
|
||||||
return 0; /* sglist of length Zero */
|
|
||||||
|
|
||||||
sg = hwif->sg_table;
|
|
||||||
while (i && sg_dma_len(sg)) {
|
while (i && sg_dma_len(sg)) {
|
||||||
dma_addr_t cur_addr;
|
dma_addr_t cur_addr;
|
||||||
int cur_len;
|
int cur_len;
|
||||||
|
@@ -240,10 +240,6 @@ static int tx4939ide_build_dmatable(ide_drive_t *drive, struct request *rq)
|
|||||||
int i;
|
int i;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
|
|
||||||
hwif->sg_nents = ide_build_sglist(drive, rq);
|
|
||||||
if (hwif->sg_nents == 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
|
for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
|
||||||
u32 cur_addr, cur_len, bcount;
|
u32 cur_addr, cur_len, bcount;
|
||||||
|
|
||||||
|
@@ -1477,6 +1477,8 @@ static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
|
|||||||
static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
|
static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
|
||||||
static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
|
static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
|
||||||
static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
|
static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
|
||||||
|
static inline int ide_build_sglist(ide_drive_t *drive,
|
||||||
|
struct request *rq) { return 0; }
|
||||||
#endif /* CONFIG_BLK_DEV_IDEDMA */
|
#endif /* CONFIG_BLK_DEV_IDEDMA */
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_IDEACPI
|
#ifdef CONFIG_BLK_DEV_IDEACPI
|
||||||
|
Reference in New Issue
Block a user