Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (166 commits) [SCSI] ibmvscsi: convert to use the data buffer accessors [SCSI] dc395x: convert to use the data buffer accessors [SCSI] ncr53c8xx: convert to use the data buffer accessors [SCSI] sym53c8xx: convert to use the data buffer accessors [SCSI] ppa: coding police and printk levels [SCSI] aic7xxx_old: remove redundant GFP_ATOMIC from kmalloc [SCSI] i2o: remove redundant GFP_ATOMIC from kmalloc from device.c [SCSI] remove the dead CYBERSTORMIII_SCSI option [SCSI] don't build scsi_dma_{map,unmap} for !HAS_DMA [SCSI] Clean up scsi_add_lun a bit [SCSI] 53c700: Remove printk, which triggers because of low scsi clock on SNI RMs [SCSI] sni_53c710: Cleanup [SCSI] qla4xxx: Fix underrun/overrun conditions [SCSI] megaraid_mbox: use mutex instead of semaphore [SCSI] aacraid: add 51245, 51645 and 52245 adapters to documentation. [SCSI] qla2xxx: update version to 8.02.00-k1. [SCSI] qla2xxx: add support for NPIV [SCSI] stex: use resid for xfer len information [SCSI] Add Brownie 1200U3P to blacklist [SCSI] scsi.c: convert to use the data buffer accessors ...
This commit is contained in:
@ -211,19 +211,6 @@ module_param(ips, charp, 0);
|
||||
#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
|
||||
#include <linux/blk.h>
|
||||
#include "sd.h"
|
||||
#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags)
|
||||
#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags)
|
||||
#ifndef __devexit_p
|
||||
#define __devexit_p(x) x
|
||||
#endif
|
||||
#else
|
||||
#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0)
|
||||
#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0)
|
||||
#endif
|
||||
|
||||
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
|
||||
DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
|
||||
PCI_DMA_BIDIRECTIONAL : \
|
||||
@ -381,24 +368,13 @@ static struct scsi_host_template ips_driver_template = {
|
||||
.eh_abort_handler = ips_eh_abort,
|
||||
.eh_host_reset_handler = ips_eh_reset,
|
||||
.proc_name = "ips",
|
||||
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
|
||||
.proc_info = ips_proc_info,
|
||||
.slave_configure = ips_slave_configure,
|
||||
#else
|
||||
.proc_info = ips_proc24_info,
|
||||
.select_queue_depths = ips_select_queue_depth,
|
||||
#endif
|
||||
.bios_param = ips_biosparam,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = IPS_MAX_SG,
|
||||
.cmd_per_lun = 3,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
|
||||
.use_new_eh_code = 1,
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
|
||||
.highmem_io = 1,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@ -731,7 +707,7 @@ ips_release(struct Scsi_Host *sh)
|
||||
/* free IRQ */
|
||||
free_irq(ha->irq, ha);
|
||||
|
||||
IPS_REMOVE_HOST(sh);
|
||||
scsi_remove_host(sh);
|
||||
scsi_host_put(sh);
|
||||
|
||||
ips_released_controllers++;
|
||||
@ -813,7 +789,6 @@ int ips_eh_abort(struct scsi_cmnd *SC)
|
||||
ips_ha_t *ha;
|
||||
ips_copp_wait_item_t *item;
|
||||
int ret;
|
||||
unsigned long cpu_flags;
|
||||
struct Scsi_Host *host;
|
||||
|
||||
METHOD_TRACE("ips_eh_abort", 1);
|
||||
@ -830,7 +805,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
|
||||
if (!ha->active)
|
||||
return (FAILED);
|
||||
|
||||
IPS_LOCK_SAVE(host->host_lock, cpu_flags);
|
||||
spin_lock(host->host_lock);
|
||||
|
||||
/* See if the command is on the copp queue */
|
||||
item = ha->copp_waitlist.head;
|
||||
@ -851,7 +826,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
|
||||
ret = (FAILED);
|
||||
}
|
||||
|
||||
IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
|
||||
spin_unlock(host->host_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1129,7 +1104,7 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
|
||||
/* A Reset IOCTL is only sent by the boot CD in extreme cases. */
|
||||
/* There can never be any system activity ( network or disk ), but check */
|
||||
/* anyway just as a good practice. */
|
||||
pt = (ips_passthru_t *) SC->request_buffer;
|
||||
pt = (ips_passthru_t *) scsi_sglist(SC);
|
||||
if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
|
||||
(pt->CoppCP.cmd.reset.adapter_flag == 1)) {
|
||||
if (ha->scb_activelist.count != 0) {
|
||||
@ -1176,18 +1151,10 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
|
||||
/* Set bios geometry for the controller */
|
||||
/* */
|
||||
/****************************************************************************/
|
||||
static int
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
|
||||
ips_biosparam(Disk * disk, kdev_t dev, int geom[])
|
||||
{
|
||||
ips_ha_t *ha = (ips_ha_t *) disk->device->host->hostdata;
|
||||
unsigned long capacity = disk->capacity;
|
||||
#else
|
||||
ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
|
||||
sector_t capacity, int geom[])
|
||||
static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
|
||||
sector_t capacity, int geom[])
|
||||
{
|
||||
ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
|
||||
#endif
|
||||
int heads;
|
||||
int sectors;
|
||||
int cylinders;
|
||||
@ -1225,70 +1192,6 @@ ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
|
||||
return (0);
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
|
||||
|
||||
/* ips_proc24_info is a wrapper around ips_proc_info *
|
||||
* for compatibility with the 2.4 scsi parameters */
|
||||
static int
|
||||
ips_proc24_info(char *buffer, char **start, off_t offset, int length,
|
||||
int hostno, int func)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ips_next_controller; i++) {
|
||||
if (ips_sh[i] && ips_sh[i]->host_no == hostno) {
|
||||
return ips_proc_info(ips_sh[i], buffer, start,
|
||||
offset, length, func);
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
/* */
|
||||
/* Routine Name: ips_select_queue_depth */
|
||||
/* */
|
||||
/* Routine Description: */
|
||||
/* */
|
||||
/* Select queue depths for the devices on the contoller */
|
||||
/* */
|
||||
/****************************************************************************/
|
||||
static void
|
||||
ips_select_queue_depth(struct Scsi_Host *host, struct scsi_device * scsi_devs)
|
||||
{
|
||||
struct scsi_device *device;
|
||||
ips_ha_t *ha;
|
||||
int count = 0;
|
||||
int min;
|
||||
|
||||
ha = IPS_HA(host);
|
||||
min = ha->max_cmds / 4;
|
||||
|
||||
for (device = scsi_devs; device; device = device->next) {
|
||||
if (device->host == host) {
|
||||
if ((device->channel == 0) && (device->type == 0))
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
for (device = scsi_devs; device; device = device->next) {
|
||||
if (device->host == host) {
|
||||
if ((device->channel == 0) && (device->type == 0)) {
|
||||
device->queue_depth =
|
||||
(ha->max_cmds - 1) / count;
|
||||
if (device->queue_depth < min)
|
||||
device->queue_depth = min;
|
||||
} else {
|
||||
device->queue_depth = 2;
|
||||
}
|
||||
|
||||
if (device->queue_depth < 2)
|
||||
device->queue_depth = 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
/****************************************************************************/
|
||||
/* */
|
||||
/* Routine Name: ips_slave_configure */
|
||||
@ -1316,7 +1219,6 @@ ips_slave_configure(struct scsi_device * SDptr)
|
||||
SDptr->skip_ms_page_3f = 1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************/
|
||||
/* */
|
||||
@ -1331,7 +1233,6 @@ static irqreturn_t
|
||||
do_ipsintr(int irq, void *dev_id)
|
||||
{
|
||||
ips_ha_t *ha;
|
||||
unsigned long cpu_flags;
|
||||
struct Scsi_Host *host;
|
||||
int irqstatus;
|
||||
|
||||
@ -1347,16 +1248,16 @@ do_ipsintr(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
IPS_LOCK_SAVE(host->host_lock, cpu_flags);
|
||||
spin_lock(host->host_lock);
|
||||
|
||||
if (!ha->active) {
|
||||
IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
|
||||
spin_unlock(host->host_lock);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
irqstatus = (*ha->func.intr) (ha);
|
||||
|
||||
IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
|
||||
spin_unlock(host->host_lock);
|
||||
|
||||
/* start the next command */
|
||||
ips_next(ha, IPS_INTR_ON);
|
||||
@ -1606,30 +1507,22 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
|
||||
if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
|
||||
(SC->device->channel == 0) &&
|
||||
(SC->device->id == IPS_ADAPTER_ID) &&
|
||||
(SC->device->lun == 0) && SC->request_buffer) {
|
||||
if ((!SC->use_sg) && SC->request_bufflen &&
|
||||
(((char *) SC->request_buffer)[0] == 'C') &&
|
||||
(((char *) SC->request_buffer)[1] == 'O') &&
|
||||
(((char *) SC->request_buffer)[2] == 'P') &&
|
||||
(((char *) SC->request_buffer)[3] == 'P'))
|
||||
return 1;
|
||||
else if (SC->use_sg) {
|
||||
struct scatterlist *sg = SC->request_buffer;
|
||||
char *buffer;
|
||||
(SC->device->lun == 0) && scsi_sglist(SC)) {
|
||||
struct scatterlist *sg = scsi_sglist(SC);
|
||||
char *buffer;
|
||||
|
||||
/* kmap_atomic() ensures addressability of the user buffer.*/
|
||||
/* local_irq_save() protects the KM_IRQ0 address slot. */
|
||||
local_irq_save(flags);
|
||||
buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
|
||||
buffer[2] == 'P' && buffer[3] == 'P') {
|
||||
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
return 1;
|
||||
}
|
||||
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
/* kmap_atomic() ensures addressability of the user buffer.*/
|
||||
/* local_irq_save() protects the KM_IRQ0 address slot. */
|
||||
local_irq_save(flags);
|
||||
buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
|
||||
buffer[2] == 'P' && buffer[3] == 'P') {
|
||||
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
return 1;
|
||||
}
|
||||
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -1680,18 +1573,14 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
|
||||
{
|
||||
ips_passthru_t *pt;
|
||||
int length = 0;
|
||||
int ret;
|
||||
int i, ret;
|
||||
struct scatterlist *sg = scsi_sglist(SC);
|
||||
|
||||
METHOD_TRACE("ips_make_passthru", 1);
|
||||
|
||||
if (!SC->use_sg) {
|
||||
length = SC->request_bufflen;
|
||||
} else {
|
||||
struct scatterlist *sg = SC->request_buffer;
|
||||
int i;
|
||||
for (i = 0; i < SC->use_sg; i++)
|
||||
length += sg[i].length;
|
||||
}
|
||||
scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
|
||||
length += sg[i].length;
|
||||
|
||||
if (length < sizeof (ips_passthru_t)) {
|
||||
/* wrong size */
|
||||
DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
|
||||
@ -2115,7 +2004,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
|
||||
|
||||
METHOD_TRACE("ips_cleanup_passthru", 1);
|
||||
|
||||
if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) {
|
||||
if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
|
||||
DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
|
||||
ips_name, ha->host_num);
|
||||
|
||||
@ -2730,7 +2619,6 @@ ips_next(ips_ha_t * ha, int intr)
|
||||
struct scsi_cmnd *q;
|
||||
ips_copp_wait_item_t *item;
|
||||
int ret;
|
||||
unsigned long cpu_flags = 0;
|
||||
struct Scsi_Host *host;
|
||||
METHOD_TRACE("ips_next", 1);
|
||||
|
||||
@ -2742,7 +2630,7 @@ ips_next(ips_ha_t * ha, int intr)
|
||||
* this command won't time out
|
||||
*/
|
||||
if (intr == IPS_INTR_ON)
|
||||
IPS_LOCK_SAVE(host->host_lock, cpu_flags);
|
||||
spin_lock(host->host_lock);
|
||||
|
||||
if ((ha->subsys->param[3] & 0x300000)
|
||||
&& (ha->scb_activelist.count == 0)) {
|
||||
@ -2769,14 +2657,14 @@ ips_next(ips_ha_t * ha, int intr)
|
||||
item = ips_removeq_copp_head(&ha->copp_waitlist);
|
||||
ha->num_ioctl++;
|
||||
if (intr == IPS_INTR_ON)
|
||||
IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
|
||||
spin_unlock(host->host_lock);
|
||||
scb->scsi_cmd = item->scsi_cmd;
|
||||
kfree(item);
|
||||
|
||||
ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
|
||||
|
||||
if (intr == IPS_INTR_ON)
|
||||
IPS_LOCK_SAVE(host->host_lock, cpu_flags);
|
||||
spin_lock(host->host_lock);
|
||||
switch (ret) {
|
||||
case IPS_FAILURE:
|
||||
if (scb->scsi_cmd) {
|
||||
@ -2846,7 +2734,7 @@ ips_next(ips_ha_t * ha, int intr)
|
||||
SC = ips_removeq_wait(&ha->scb_waitlist, q);
|
||||
|
||||
if (intr == IPS_INTR_ON)
|
||||
IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */
|
||||
spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */
|
||||
|
||||
SC->result = DID_OK;
|
||||
SC->host_scribble = NULL;
|
||||
@ -2866,41 +2754,26 @@ ips_next(ips_ha_t * ha, int intr)
|
||||
/* copy in the CDB */
|
||||
memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
|
||||
|
||||
/* Now handle the data buffer */
|
||||
if (SC->use_sg) {
|
||||
scb->sg_count = scsi_dma_map(SC);
|
||||
BUG_ON(scb->sg_count < 0);
|
||||
if (scb->sg_count) {
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
sg = SC->request_buffer;
|
||||
scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
|
||||
SC->sc_data_direction);
|
||||
scb->flags |= IPS_SCB_MAP_SG;
|
||||
for (i = 0; i < scb->sg_count; i++) {
|
||||
|
||||
scsi_for_each_sg(SC, sg, scb->sg_count, i) {
|
||||
if (ips_fill_scb_sg_single
|
||||
(ha, sg_dma_address(&sg[i]), scb, i,
|
||||
sg_dma_len(&sg[i])) < 0)
|
||||
(ha, sg_dma_address(sg), scb, i,
|
||||
sg_dma_len(sg)) < 0)
|
||||
break;
|
||||
}
|
||||
scb->dcdb.transfer_length = scb->data_len;
|
||||
} else {
|
||||
if (SC->request_bufflen) {
|
||||
scb->data_busaddr =
|
||||
pci_map_single(ha->pcidev,
|
||||
SC->request_buffer,
|
||||
SC->request_bufflen,
|
||||
SC->sc_data_direction);
|
||||
scb->flags |= IPS_SCB_MAP_SINGLE;
|
||||
ips_fill_scb_sg_single(ha, scb->data_busaddr,
|
||||
scb, 0,
|
||||
SC->request_bufflen);
|
||||
scb->dcdb.transfer_length = scb->data_len;
|
||||
} else {
|
||||
scb->data_busaddr = 0L;
|
||||
scb->sg_len = 0;
|
||||
scb->data_len = 0;
|
||||
scb->dcdb.transfer_length = 0;
|
||||
}
|
||||
|
||||
scb->data_busaddr = 0L;
|
||||
scb->sg_len = 0;
|
||||
scb->data_len = 0;
|
||||
scb->dcdb.transfer_length = 0;
|
||||
}
|
||||
|
||||
scb->dcdb.cmd_attribute =
|
||||
@ -2919,7 +2792,7 @@ ips_next(ips_ha_t * ha, int intr)
|
||||
scb->dcdb.transfer_length = 0;
|
||||
}
|
||||
if (intr == IPS_INTR_ON)
|
||||
IPS_LOCK_SAVE(host->host_lock, cpu_flags);
|
||||
spin_lock(host->host_lock);
|
||||
|
||||
ret = ips_send_cmd(ha, scb);
|
||||
|
||||
@ -2958,7 +2831,7 @@ ips_next(ips_ha_t * ha, int intr)
|
||||
} /* end while */
|
||||
|
||||
if (intr == IPS_INTR_ON)
|
||||
IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
|
||||
spin_unlock(host->host_lock);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
@ -3377,52 +3250,32 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
|
||||
* the rest of the data and continue.
|
||||
*/
|
||||
if ((scb->breakup) || (scb->sg_break)) {
|
||||
struct scatterlist *sg;
|
||||
int sg_dma_index, ips_sg_index = 0;
|
||||
|
||||
/* we had a data breakup */
|
||||
scb->data_len = 0;
|
||||
|
||||
if (scb->sg_count) {
|
||||
/* S/G request */
|
||||
struct scatterlist *sg;
|
||||
int ips_sg_index = 0;
|
||||
int sg_dma_index;
|
||||
sg = scsi_sglist(scb->scsi_cmd);
|
||||
|
||||
sg = scb->scsi_cmd->request_buffer;
|
||||
/* Spin forward to last dma chunk */
|
||||
sg_dma_index = scb->breakup;
|
||||
|
||||
/* Spin forward to last dma chunk */
|
||||
sg_dma_index = scb->breakup;
|
||||
/* Take care of possible partial on last chunk */
|
||||
ips_fill_scb_sg_single(ha,
|
||||
sg_dma_address(&sg[sg_dma_index]),
|
||||
scb, ips_sg_index++,
|
||||
sg_dma_len(&sg[sg_dma_index]));
|
||||
|
||||
/* Take care of possible partial on last chunk */
|
||||
ips_fill_scb_sg_single(ha,
|
||||
sg_dma_address(&sg
|
||||
[sg_dma_index]),
|
||||
scb, ips_sg_index++,
|
||||
sg_dma_len(&sg
|
||||
[sg_dma_index]));
|
||||
|
||||
for (; sg_dma_index < scb->sg_count;
|
||||
sg_dma_index++) {
|
||||
if (ips_fill_scb_sg_single
|
||||
(ha,
|
||||
sg_dma_address(&sg[sg_dma_index]),
|
||||
scb, ips_sg_index++,
|
||||
sg_dma_len(&sg[sg_dma_index])) < 0)
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
} else {
|
||||
/* Non S/G Request */
|
||||
(void) ips_fill_scb_sg_single(ha,
|
||||
scb->
|
||||
data_busaddr +
|
||||
(scb->sg_break *
|
||||
ha->max_xfer),
|
||||
scb, 0,
|
||||
scb->scsi_cmd->
|
||||
request_bufflen -
|
||||
(scb->sg_break *
|
||||
ha->max_xfer));
|
||||
}
|
||||
for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
|
||||
sg_dma_index++) {
|
||||
if (ips_fill_scb_sg_single
|
||||
(ha,
|
||||
sg_dma_address(&sg[sg_dma_index]),
|
||||
scb, ips_sg_index++,
|
||||
sg_dma_len(&sg[sg_dma_index])) < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
scb->dcdb.transfer_length = scb->data_len;
|
||||
scb->dcdb.cmd_attribute |=
|
||||
@ -3653,32 +3506,27 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
|
||||
static void
|
||||
ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
|
||||
{
|
||||
if (scmd->use_sg) {
|
||||
int i;
|
||||
unsigned int min_cnt, xfer_cnt;
|
||||
char *cdata = (char *) data;
|
||||
unsigned char *buffer;
|
||||
unsigned long flags;
|
||||
struct scatterlist *sg = scmd->request_buffer;
|
||||
for (i = 0, xfer_cnt = 0;
|
||||
(i < scmd->use_sg) && (xfer_cnt < count); i++) {
|
||||
min_cnt = min(count - xfer_cnt, sg[i].length);
|
||||
int i;
|
||||
unsigned int min_cnt, xfer_cnt;
|
||||
char *cdata = (char *) data;
|
||||
unsigned char *buffer;
|
||||
unsigned long flags;
|
||||
struct scatterlist *sg = scsi_sglist(scmd);
|
||||
|
||||
/* kmap_atomic() ensures addressability of the data buffer.*/
|
||||
/* local_irq_save() protects the KM_IRQ0 address slot. */
|
||||
local_irq_save(flags);
|
||||
buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
|
||||
memcpy(buffer, &cdata[xfer_cnt], min_cnt);
|
||||
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
for (i = 0, xfer_cnt = 0;
|
||||
(i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
|
||||
min_cnt = min(count - xfer_cnt, sg[i].length);
|
||||
|
||||
xfer_cnt += min_cnt;
|
||||
}
|
||||
/* kmap_atomic() ensures addressability of the data buffer.*/
|
||||
/* local_irq_save() protects the KM_IRQ0 address slot. */
|
||||
local_irq_save(flags);
|
||||
buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
|
||||
memcpy(buffer, &cdata[xfer_cnt], min_cnt);
|
||||
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
|
||||
} else {
|
||||
unsigned int min_cnt = min(count, scmd->request_bufflen);
|
||||
memcpy(scmd->request_buffer, data, min_cnt);
|
||||
}
|
||||
xfer_cnt += min_cnt;
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
@ -3691,32 +3539,27 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
|
||||
static void
|
||||
ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
|
||||
{
|
||||
if (scmd->use_sg) {
|
||||
int i;
|
||||
unsigned int min_cnt, xfer_cnt;
|
||||
char *cdata = (char *) data;
|
||||
unsigned char *buffer;
|
||||
unsigned long flags;
|
||||
struct scatterlist *sg = scmd->request_buffer;
|
||||
for (i = 0, xfer_cnt = 0;
|
||||
(i < scmd->use_sg) && (xfer_cnt < count); i++) {
|
||||
min_cnt = min(count - xfer_cnt, sg[i].length);
|
||||
int i;
|
||||
unsigned int min_cnt, xfer_cnt;
|
||||
char *cdata = (char *) data;
|
||||
unsigned char *buffer;
|
||||
unsigned long flags;
|
||||
struct scatterlist *sg = scsi_sglist(scmd);
|
||||
|
||||
/* kmap_atomic() ensures addressability of the data buffer.*/
|
||||
/* local_irq_save() protects the KM_IRQ0 address slot. */
|
||||
local_irq_save(flags);
|
||||
buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
|
||||
memcpy(&cdata[xfer_cnt], buffer, min_cnt);
|
||||
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
for (i = 0, xfer_cnt = 0;
|
||||
(i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
|
||||
min_cnt = min(count - xfer_cnt, sg[i].length);
|
||||
|
||||
xfer_cnt += min_cnt;
|
||||
}
|
||||
/* kmap_atomic() ensures addressability of the data buffer.*/
|
||||
/* local_irq_save() protects the KM_IRQ0 address slot. */
|
||||
local_irq_save(flags);
|
||||
buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
|
||||
memcpy(&cdata[xfer_cnt], buffer, min_cnt);
|
||||
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
|
||||
} else {
|
||||
unsigned int min_cnt = min(count, scmd->request_bufflen);
|
||||
memcpy(data, scmd->request_buffer, min_cnt);
|
||||
}
|
||||
xfer_cnt += min_cnt;
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
@ -4350,7 +4193,7 @@ ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
|
||||
|
||||
METHOD_TRACE("ips_rdcap", 1);
|
||||
|
||||
if (scb->scsi_cmd->request_bufflen < 8)
|
||||
if (scsi_bufflen(scb->scsi_cmd) < 8)
|
||||
return (0);
|
||||
|
||||
cap.lba =
|
||||
@ -4735,8 +4578,7 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
|
||||
|
||||
METHOD_TRACE("ips_freescb", 1);
|
||||
if (scb->flags & IPS_SCB_MAP_SG)
|
||||
pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer,
|
||||
scb->scsi_cmd->use_sg, IPS_DMA_DIR(scb));
|
||||
scsi_dma_unmap(scb->scsi_cmd);
|
||||
else if (scb->flags & IPS_SCB_MAP_SINGLE)
|
||||
pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
|
||||
IPS_DMA_DIR(scb));
|
||||
@ -7004,7 +6846,6 @@ ips_register_scsi(int index)
|
||||
kfree(oldha);
|
||||
ips_sh[index] = sh;
|
||||
ips_ha[index] = ha;
|
||||
IPS_SCSI_SET_DEVICE(sh, ha);
|
||||
|
||||
/* Store away needed values for later use */
|
||||
sh->io_port = ha->io_addr;
|
||||
@ -7016,17 +6857,16 @@ ips_register_scsi(int index)
|
||||
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
|
||||
sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma;
|
||||
sh->use_clustering = sh->hostt->use_clustering;
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,7)
|
||||
sh->max_sectors = 128;
|
||||
#endif
|
||||
|
||||
sh->max_id = ha->ntargets;
|
||||
sh->max_lun = ha->nlun;
|
||||
sh->max_channel = ha->nbus - 1;
|
||||
sh->can_queue = ha->max_cmds - 1;
|
||||
|
||||
IPS_ADD_HOST(sh, NULL);
|
||||
scsi_add_host(sh, NULL);
|
||||
scsi_scan_host(sh);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -7069,7 +6909,7 @@ ips_module_init(void)
|
||||
return -ENODEV;
|
||||
ips_driver_template.module = THIS_MODULE;
|
||||
ips_order_controllers();
|
||||
if (IPS_REGISTER_HOSTS(&ips_driver_template)) {
|
||||
if (!ips_detect(&ips_driver_template)) {
|
||||
pci_unregister_driver(&ips_pci_driver);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -7087,7 +6927,6 @@ ips_module_init(void)
|
||||
static void __exit
|
||||
ips_module_exit(void)
|
||||
{
|
||||
IPS_UNREGISTER_HOSTS(&ips_driver_template);
|
||||
pci_unregister_driver(&ips_pci_driver);
|
||||
unregister_reboot_notifier(&ips_notifier);
|
||||
}
|
||||
@ -7436,15 +7275,9 @@ ips_init_phase2(int index)
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)
|
||||
MODULE_LICENSE("GPL");
|
||||
#endif
|
||||
|
||||
MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
|
||||
|
||||
#ifdef MODULE_VERSION
|
||||
MODULE_VERSION(IPS_VER_STRING);
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user