sata_mv fix mv_host_intr bug for hc_irq_cause
Remove the unwanted reads of hc_irq_cause from mv_host_intr(), thereby removing a bug whereby we were not always reading it when needed.. Signed-off-by: Mark Lord <mlord@pobox.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:
@@ -1818,48 +1818,61 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
|
|||||||
static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
|
static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
|
||||||
{
|
{
|
||||||
struct mv_host_priv *hpriv = host->private_data;
|
struct mv_host_priv *hpriv = host->private_data;
|
||||||
void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
|
void __iomem *mmio = hpriv->base, *hc_mmio;
|
||||||
u32 hc_irq_cause = 0;
|
|
||||||
unsigned int handled = 0, port;
|
unsigned int handled = 0, port;
|
||||||
|
|
||||||
for (port = 0; port < hpriv->n_ports; port++) {
|
for (port = 0; port < hpriv->n_ports; port++) {
|
||||||
struct ata_port *ap = host->ports[port];
|
struct ata_port *ap = host->ports[port];
|
||||||
struct mv_port_priv *pp;
|
struct mv_port_priv *pp;
|
||||||
unsigned int shift, hardport, port_cause;
|
unsigned int p, shift, hardport, port_cause;
|
||||||
/*
|
|
||||||
* When we move to the second hc, flag our cached
|
|
||||||
* copies of hc_mmio (and hc_irq_cause) as invalid again.
|
|
||||||
*/
|
|
||||||
if (port == MV_PORTS_PER_HC)
|
|
||||||
hc_mmio = NULL;
|
|
||||||
/*
|
|
||||||
* Do nothing if port is not interrupting or is disabled:
|
|
||||||
*/
|
|
||||||
MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
|
MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
|
||||||
port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
|
|
||||||
if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
|
|
||||||
continue;
|
|
||||||
/*
|
/*
|
||||||
* Each hc within the host has its own hc_irq_cause register.
|
* Each hc within the host has its own hc_irq_cause register,
|
||||||
* We defer reading it until we know we need it, right now:
|
* where the interrupting ports bits get ack'd.
|
||||||
*
|
|
||||||
* FIXME later: we don't really need to read this register
|
|
||||||
* (some logic changes required below if we go that way),
|
|
||||||
* because it doesn't tell us anything new. But we do need
|
|
||||||
* to write to it, outside the top of this loop,
|
|
||||||
* to reset the interrupt triggers for next time.
|
|
||||||
*/
|
*/
|
||||||
if (!hc_mmio) {
|
if (hardport == 0) { /* first port on this hc ? */
|
||||||
|
u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
|
||||||
|
u32 port_mask, ack_irqs;
|
||||||
|
/*
|
||||||
|
* Skip this entire hc if nothing pending for any ports
|
||||||
|
*/
|
||||||
|
if (!hc_cause) {
|
||||||
|
port += MV_PORTS_PER_HC - 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* We don't need/want to read the hc_irq_cause register,
|
||||||
|
* because doing so hurts performance, and
|
||||||
|
* main_irq_cause already gives us everything we need.
|
||||||
|
*
|
||||||
|
* But we do have to *write* to the hc_irq_cause to ack
|
||||||
|
* the ports that we are handling this time through.
|
||||||
|
*
|
||||||
|
* This requires that we create a bitmap for those
|
||||||
|
* ports which interrupted us, and use that bitmap
|
||||||
|
* to ack (only) those ports via hc_irq_cause.
|
||||||
|
*/
|
||||||
|
ack_irqs = 0;
|
||||||
|
for (p = 0; p < MV_PORTS_PER_HC; ++p) {
|
||||||
|
if ((port + p) >= hpriv->n_ports)
|
||||||
|
break;
|
||||||
|
port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
|
||||||
|
if (hc_cause & port_mask)
|
||||||
|
ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
|
||||||
|
}
|
||||||
hc_mmio = mv_hc_base_from_port(mmio, port);
|
hc_mmio = mv_hc_base_from_port(mmio, port);
|
||||||
hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
|
writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
|
||||||
writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
|
|
||||||
handled = 1;
|
handled = 1;
|
||||||
}
|
}
|
||||||
|
port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
|
||||||
|
if (!port_cause)
|
||||||
|
continue;
|
||||||
/*
|
/*
|
||||||
* Process completed CRPB response(s) before other events.
|
* Process completed CRPB response(s) before other events.
|
||||||
*/
|
*/
|
||||||
pp = ap->private_data;
|
pp = ap->private_data;
|
||||||
if (hc_irq_cause & (DMA_IRQ << hardport)) {
|
if (port_cause & DONE_IRQ) {
|
||||||
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
|
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
|
||||||
mv_process_crpb_entries(ap, pp);
|
mv_process_crpb_entries(ap, pp);
|
||||||
}
|
}
|
||||||
@@ -1868,15 +1881,15 @@ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
|
|||||||
*/
|
*/
|
||||||
if (unlikely(port_cause & ERR_IRQ)) {
|
if (unlikely(port_cause & ERR_IRQ)) {
|
||||||
mv_err_intr(ap);
|
mv_err_intr(ap);
|
||||||
} else if (hc_irq_cause & (DEV_IRQ << hardport)) {
|
} else {
|
||||||
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
|
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
|
||||||
struct ata_queued_cmd *qc = mv_get_active_qc(ap);
|
struct ata_queued_cmd *qc = mv_get_active_qc(ap);
|
||||||
if (qc) {
|
if (qc) {
|
||||||
ata_sff_host_intr(ap, qc);
|
ata_sff_host_intr(ap, qc);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
mv_unexpected_intr(ap);
|
||||||
}
|
}
|
||||||
mv_unexpected_intr(ap);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return handled;
|
return handled;
|
||||||
|
Reference in New Issue
Block a user