SCSI fixes on 20190720

This is the final round of mostly small fixes in our initial submit.
 It's mostly minor fixes and driver updates.  The only change of note
 is adding a virt_boundary_mask to the SCSI host and host template to
 parametrise this for NVMe devices instead of having them do a call in
 slave_alloc.  It's a fairly straightforward conversion except in the
 two NVMe handling drivers that didn't set it who now have a virtual
 infinity parameter added.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCXTJS/yYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishQTNAQCsTdkA
 IN1BvDBbE+KO8mvL5DuRxLtnDU6Pq5K6fkrE3gD/a1GkqyPPaJIuspq7fQY87DH/
 o7VsJd/5uGphIE2Ls+M=
 =38XV
 -----END PGP SIGNATURE-----

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "This is the final round of mostly small fixes in our initial submit.

  It's mostly minor fixes and driver updates. The only change of note is
  adding a virt_boundary_mask to the SCSI host and host template to
  parametrise this for NVMe devices instead of having them do a call in
  slave_alloc. It's a fairly straightforward conversion except in the
  two NVMe handling drivers that didn't set it who now have a virtual
  infinity parameter added"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (24 commits)
  scsi: megaraid_sas: set an unlimited max_segment_size
  scsi: mpt3sas: set an unlimited max_segment_size for SAS 3.0 HBAs
  scsi: IB/srp: set virt_boundary_mask in the scsi host
  scsi: IB/iser: set virt_boundary_mask in the scsi host
  scsi: storvsc: set virt_boundary_mask in the scsi host template
  scsi: ufshcd: set max_segment_size in the scsi host template
  scsi: core: take the DMA max mapping size into account
  scsi: core: add a host / host template field for the virt boundary
  scsi: core: Fix race on creating sense cache
  scsi: sd_zbc: Fix compilation warning
  scsi: libfc: fix null pointer dereference on a null lport
  scsi: zfcp: fix GCC compiler warning emitted with -Wmaybe-uninitialized
  scsi: zfcp: fix request object use-after-free in send path causing wrong traces
  scsi: zfcp: fix request object use-after-free in send path causing seqno errors
  scsi: megaraid_sas: Update driver version to 07.710.50.00
  scsi: megaraid_sas: Add module parameter for FW Async event logging
  scsi: megaraid_sas: Enable msix_load_balance for Invader and later controllers
  scsi: megaraid_sas: Fix calculation of target ID
  scsi: lpfc: reduce stack size with CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE
  scsi: devinfo: BLIST_TRY_VPD_PAGES for SanDisk Cruzer Blade
  ...
This commit is contained in:
Linus Torvalds 2019-07-20 10:04:58 -07:00
commit f65420df91
20 changed files with 123 additions and 74 deletions

View File

@ -611,6 +611,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
struct ib_device *ib_dev;
u32 max_fr_sectors;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@ -641,16 +642,19 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
}
ib_conn = &iser_conn->ib_conn;
ib_dev = ib_conn->device->ib_device;
if (ib_conn->pi_support) {
u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap;
u32 sig_caps = ib_dev->attrs.sig_prot_cap;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
}
if (iscsi_host_add(shost,
ib_conn->device->ib_device->dev.parent)) {
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
shost->virt_boundary_mask = ~MASK_4K;
if (iscsi_host_add(shost, ib_dev->dev.parent)) {
mutex_unlock(&iser_conn->state_mutex);
goto free_host;
}
@ -956,30 +960,6 @@ static umode_t iser_attr_is_visible(int param_type, int param)
return 0;
}
static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
{
struct iscsi_session *session;
struct iser_conn *iser_conn;
struct ib_device *ib_dev;
mutex_lock(&unbind_iser_conn_mutex);
session = starget_to_session(scsi_target(sdev))->dd_data;
iser_conn = session->leadconn->dd_data;
if (!iser_conn) {
mutex_unlock(&unbind_iser_conn_mutex);
return -ENOTCONN;
}
ib_dev = iser_conn->ib_conn.device->ib_device;
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
mutex_unlock(&unbind_iser_conn_mutex);
return 0;
}
static struct scsi_host_template iscsi_iser_sht = {
.module = THIS_MODULE,
.name = "iSCSI Initiator over iSER",
@ -992,7 +972,6 @@ static struct scsi_host_template iscsi_iser_sht = {
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
.slave_alloc = iscsi_iser_slave_alloc,
.proc_name = "iscsi_iser",
.this_id = -1,
.track_queue_depth = 1,

View File

@ -3046,20 +3046,6 @@ static int srp_target_alloc(struct scsi_target *starget)
return 0;
}
static int srp_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct srp_target_port *target = host_to_target(shost);
struct srp_device *srp_dev = target->srp_host->srp_dev;
struct ib_device *ibdev = srp_dev->dev;
if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
blk_queue_virt_boundary(sdev->request_queue,
~srp_dev->mr_page_mask);
return 0;
}
static int srp_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
@ -3262,7 +3248,6 @@ static struct scsi_host_template srp_template = {
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
.target_alloc = srp_target_alloc,
.slave_alloc = srp_slave_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
@ -3806,6 +3791,9 @@ static ssize_t srp_create_target(struct device *dev,
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
target = host_to_target(target_host);
target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);

View File

@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kthread.h>
#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
@ -217,6 +218,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(enum zfcp_erp_act_type need,
struct zfcp_erp_action *erp_action;
struct zfcp_scsi_dev *zfcp_sdev;
if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
need != ZFCP_ERP_ACTION_REOPEN_PORT &&
need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
return NULL;
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);

View File

@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_els.h>
#include "zfcp_ext.h"
@ -741,6 +742,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
int req_id = req->req_id;
@ -757,8 +759,20 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
return -EIO;
}
/*
* NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
* ONLY TOUCH SYNC req AGAIN ON req->completion.
*
* The request might complete and be freed concurrently at any point
* now. This is not protected by the QDIO-lock (req_q_lock). So any
* uncontrolled access after this might result in an use-after-free bug.
* Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
* when it is completed via req->completion, is it safe to use req
* again.
*/
/* Don't increase for unsolicited status */
if (!zfcp_fsf_req_is_status_read_buffer(req))
if (!is_srb)
adapter->fsf_req_seq_no++;
adapter->req_no++;
@ -805,6 +819,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
retval = zfcp_fsf_req_send(req);
if (retval)
goto failed_req_send;
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
@ -914,8 +929,10 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
req->qtcb->bottom.support.req_handle = (u64) old_req_id;
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
if (!zfcp_fsf_req_send(req)) {
/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
}
out_error_free:
zfcp_fsf_req_free(req);
@ -1098,6 +1115,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
@ -1198,6 +1216,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
@ -1243,6 +1262,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@ -1279,8 +1299,10 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
}
zfcp_fsf_req_free(req);
return retval;
@ -1330,6 +1352,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@ -1372,8 +1395,10 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
}
zfcp_fsf_req_free(req);
@ -1493,6 +1518,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
erp_action->fsf_req_id = 0;
put_device(&port->dev);
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@ -1557,6 +1583,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@ -1600,6 +1627,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
unsigned long req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@ -1622,14 +1650,17 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
req->data = wka_port;
req_id = req->req_id;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
return retval;
}
@ -1655,6 +1686,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
unsigned long req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@ -1677,14 +1709,17 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
req->data = wka_port;
req->qtcb->header.port_handle = wka_port->handle;
req_id = req->req_id;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
return retval;
}
@ -1776,6 +1811,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@ -1899,6 +1935,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@ -1987,6 +2024,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@ -2299,6 +2337,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
goto failed_scsi_cmnd;
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
@ -2373,8 +2412,10 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
if (!zfcp_fsf_req_send(req)) {
/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
}
zfcp_fsf_req_free(req);
req = NULL;

View File

@ -462,6 +462,9 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
if (sht->virt_boundary_mask)
shost->virt_boundary_mask = sht->virt_boundary_mask;
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;

View File

@ -2591,7 +2591,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
/* lport lock ? */
if (!lport || lport->state == LPORT_ST_DISABLED) {
FC_LPORT_DBG(lport, "Receiving frames for an lport that "
FC_LIBFC_DBG("Receiving frames for an lport that "
"has not been initialized correctly\n");
fc_frame_free(fp);
return;

View File

@ -414,7 +414,6 @@ static void sas_wait_eh(struct domain_device *dev)
goto retry;
}
}
EXPORT_SYMBOL(sas_wait_eh);
static int sas_queue_reset(struct domain_device *dev, int reset_type,
u64 lun, int wait)

View File

@ -330,7 +330,7 @@ enum {
* This function dumps an entry indexed by @idx from a queue specified by the
* queue descriptor @q.
**/
static inline void
static void
lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
{
char line_buf[LPFC_LBUF_SZ];

View File

@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
#define MEGASAS_VERSION "07.710.06.00-rc1"
#define MEGASAS_RELDATE "June 18, 2019"
#define MEGASAS_VERSION "07.710.50.00-rc1"
#define MEGASAS_RELDATE "June 28, 2019"
/*
* Device IDs

View File

@ -105,6 +105,10 @@ MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:
"default mode is 'balanced'"
);
int event_log_level = MFI_EVT_CLASS_CRITICAL;
module_param(event_log_level, int, 0644);
MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@ -280,7 +284,7 @@ void megasas_set_dma_settings(struct megasas_instance *instance,
}
}
void
static void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
instance->instancet->fire_cmd(instance,
@ -404,7 +408,13 @@ megasas_decode_evt(struct megasas_instance *instance)
union megasas_evt_class_locale class_locale;
class_locale.word = le32_to_cpu(evt_detail->cl.word);
if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
(event_log_level > MFI_EVT_CLASS_DEAD)) {
printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
event_log_level = MFI_EVT_CLASS_CRITICAL;
}
if (class_locale.members.class >= event_log_level)
dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
le32_to_cpu(evt_detail->seq_num),
format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
@ -2237,7 +2247,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
static void
process_fw_state_change_wq(struct work_struct *work);
void megasas_do_ocr(struct megasas_instance *instance)
static void megasas_do_ocr(struct megasas_instance *instance)
{
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
@ -3303,7 +3313,7 @@ static DEVICE_ATTR_RO(fw_cmds_outstanding);
static DEVICE_ATTR_RO(dump_system_regs);
static DEVICE_ATTR_RO(raid_map_id);
struct device_attribute *megaraid_host_attrs[] = {
static struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
&dev_attr_fw_crash_buffer,
&dev_attr_fw_crash_state,
@ -3334,6 +3344,7 @@ static struct scsi_host_template megasas_template = {
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
.no_write_same = 1,
};
@ -5933,7 +5944,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
1 : 0;
if (!instance->msix_combined) {
if (instance->adapter_type >= INVADER_SERIES &&
!instance->msix_combined) {
instance->msix_load_balance = true;
instance->smp_affinity_enable = false;
}
@ -6546,7 +6558,8 @@ megasas_get_target_prop(struct megasas_instance *instance,
int ret;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
u16 targetId = (sdev->channel % 2) + sdev->id;
u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
sdev->id;
cmd = megasas_get_cmd(instance);
@ -8748,6 +8761,12 @@ static int __init megasas_init(void)
goto err_pcidrv;
}
if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
(event_log_level > MFI_EVT_CLASS_DEAD)) {
printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
event_log_level = MFI_EVT_CLASS_CRITICAL;
}
rval = driver_create_file(&megasas_pci_driver.driver,
&driver_attr_version);
if (rval)

View File

@ -10238,6 +10238,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.this_id = -1,
.sg_tablesize = MPT3SAS_SG_DEPTH,
.max_sectors = 32767,
.max_segment_size = 0xffffffff,
.cmd_per_lun = 7,
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,

View File

@ -888,6 +888,8 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
while (pm8001_dev->running_req)
msleep(20);
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
@ -1256,8 +1258,10 @@ int pm8001_abort_task(struct sas_task *task)
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n"));
wait_for_completion(&completion_reset);
if (phy->port_reset_status)
if (phy->port_reset_status) {
pm8001_dev_gone_notify(dev);
goto out;
}
/*
* 4. SATA Abort ALL

View File

@ -604,7 +604,7 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &=
0x0000ffff;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
0x140000;
CHIP_8006_PORT_RECOVERY_TIMEOUT;
}
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);

View File

@ -230,6 +230,8 @@
#define SAS_MAX_AIP 0x200000
#define IT_NEXUS_TIMEOUT 0x7D0
#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
/* Port recovery timeout, 10000 ms for PM8006 controller */
#define CHIP_8006_PORT_RECOVERY_TIMEOUT 0x640000
#ifdef __LITTLE_ENDIAN_BITFIELD
struct sas_identify_frame_local {

View File

@ -239,6 +239,8 @@ static struct {
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES |
BLIST_INQUIRY_36},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */

View File

@ -84,11 +84,11 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
struct kmem_cache *cache;
int ret = 0;
mutex_lock(&scsi_sense_cache_mutex);
cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
if (cache)
return 0;
goto exit;
mutex_lock(&scsi_sense_cache_mutex);
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
@ -104,7 +104,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
if (!scsi_sense_cache)
ret = -ENOMEM;
}
exit:
mutex_unlock(&scsi_sense_cache_mutex);
return ret;
}
@ -1452,7 +1452,7 @@ static void scsi_softirq_done(struct request *rq)
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
sdev_printk(KERN_ERR, cmd->device,
scmd_printk(KERN_ERR, cmd,
"timing out command, waited %lus\n",
wait_for/HZ);
disposition = SUCCESS;
@ -1784,6 +1784,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
shost->max_sectors = min_t(unsigned int, shost->max_sectors,
dma_max_mapping_size(dev) << SECTOR_SHIFT);
blk_queue_max_hw_sectors(q, shost->max_sectors);
if (shost->unchecked_isa_dma)
blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
@ -1791,7 +1793,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
dma_set_seg_boundary(dev, shost->dma_boundary);
blk_queue_max_segment_size(q, shost->max_segment_size);
dma_set_max_seg_size(dev, shost->max_segment_size);
blk_queue_virt_boundary(q, shost->virt_boundary_mask);
dma_set_max_seg_size(dev, queue_max_segment_size(q));
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),

View File

@ -461,7 +461,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
struct gendisk *disk = sdkp->disk;
unsigned int nr_zones;
u32 zone_blocks;
u32 zone_blocks = 0;
int ret;
if (!sd_is_zoned(sdkp))

View File

@ -1423,9 +1423,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
{
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
/* Ensure there are no gaps in presented sgls */
blk_queue_virt_boundary(sdevice->request_queue, PAGE_SIZE - 1);
sdevice->no_write_same = 1;
/*
@ -1698,6 +1695,8 @@ static struct scsi_host_template scsi_driver = {
.this_id = -1,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
/* Ensure there are no gaps in presented sgls */
.virt_boundary_mask = PAGE_SIZE-1,
.no_write_same = 1,
.track_queue_depth = 1,
.change_queue_depth = storvsc_change_queue_depth,

View File

@ -4587,8 +4587,6 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
return 0;
}
@ -7022,6 +7020,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,

View File

@ -369,6 +369,8 @@ struct scsi_host_template {
*/
unsigned long dma_boundary;
unsigned long virt_boundary_mask;
/*
* This specifies "machine infinity" for host templates which don't
* limit the transfer size. Note this limit represents an absolute
@ -587,6 +589,7 @@ struct Scsi_Host {
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned long dma_boundary;
unsigned long virt_boundary_mask;
/*
* In scsi-mq mode, the number of hardware queues supported by the LLD.
*