Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
 "The highlights this round include:

   - Add support for T10 PI pass-through between vhost-scsi +
     virtio-scsi (MST + Paolo + MKP + nab)
   - Add support for T10 PI in qla2xxx target mode (Quinn + MKP + hch +
     nab, merged through scsi.git)
   - Add support for percpu-ida pre-allocation in qla2xxx target code
     (Quinn + nab)
   - A number of iser-target fixes related to hardening the network
     portal shutdown path (Sagi + Slava)
   - Fix response length residual handling for a number of control CDBs
     (Roland + Christophe V.)
   - Various iscsi RFC conformance fixes in the CHAP authentication path
     (Tejas and Calsoft folks + nab)
   - Return TASK_SET_FULL status for tcm_fc(FCoE) DataIn + Response
     failures (Vasu + Jun + nab)
   - Fix long-standing ABORT_TASK + session reset hang (nab)
   - Convert iser-initiator + iser-target to include T10 bytes into EDTL
     (Sagi + Or + MKP + Mike Christie)
   - Fix NULL pointer dereference regression related to XCOPY introduced
     in v3.15 + CC'ed to v3.12.y (nab)"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (34 commits)
  target: Fix NULL pointer dereference for XCOPY in target_put_sess_cmd
  vhost-scsi: Include prot_bytes into expected data transfer length
  TARGET/sbc,loopback: Adjust command data length in case pi exists on the wire
  libiscsi, iser: Adjust data_length to include protection information
  scsi_cmnd: Introduce scsi_transfer_length helper
  target: Report correct response length for some commands
  target/sbc: Check that the LBA and number of blocks are correct in VERIFY
  target/sbc: Remove sbc_check_valid_sectors()
  Target/iscsi: Fix sendtargets response pdu for iser transport
  Target/iser: Fix a wrong dereference in case discovery session is over iser
  iscsi-target: Fix ABORT_TASK + connection reset iscsi_queue_req memory leak
  target: Use complete_all for se_cmd->t_transport_stop_comp
  target: Set CMD_T_ACTIVE bit for Task Management Requests
  target: cleanup some boolean tests
  target/spc: Simplify INQUIRY EVPD=0x80
  tcm_fc: Generate TASK_SET_FULL status for response failures
  tcm_fc: Generate TASK_SET_FULL status for DataIN failures
  iscsi-target: Reject mutual authentication with reflected CHAP_C
  iscsi-target: Remove no-op from iscsit_tpg_del_portal_group
  iscsi-target: Fix CHAP_A parameter list handling
  ...
This commit is contained in:
Linus Torvalds
2014-06-12 22:38:32 -07:00
30 changed files with 732 additions and 368 deletions

View File

@@ -1773,6 +1773,7 @@ config SCSI_BFA_FC
config SCSI_VIRTIO
tristate "virtio-scsi support"
depends on VIRTIO
select BLK_DEV_INTEGRITY
help
This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M.

View File

@@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
struct iscsi_scsi_req *hdr;
unsigned hdrlength, cmd_len;
unsigned hdrlength, cmd_len, transfer_length;
itt_t itt;
int rc;
@@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
task->protected = true;
transfer_length = scsi_transfer_length(sc);
hdr->data_length = cpu_to_be32(transfer_length);
if (sc->sc_data_direction == DMA_TO_DEVICE) {
unsigned out_len = scsi_out(sc)->length;
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
hdr->data_length = cpu_to_be32(out_len);
hdr->flags |= ISCSI_FLAG_CMD_WRITE;
/*
* Write counters:
@@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
memset(r2t, 0, sizeof(*r2t));
if (session->imm_data_en) {
if (out_len >= session->first_burst)
if (transfer_length >= session->first_burst)
task->imm_count = min(session->first_burst,
conn->max_xmit_dlength);
else
task->imm_count = min(out_len,
conn->max_xmit_dlength);
task->imm_count = min(transfer_length,
conn->max_xmit_dlength);
hton24(hdr->dlength, task->imm_count);
} else
zero_data(hdr->dlength);
if (!session->initial_r2t_en) {
r2t->data_length = min(session->first_burst, out_len) -
r2t->data_length = min(session->first_burst,
transfer_length) -
task->imm_count;
r2t->data_offset = task->imm_count;
r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
@@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
} else {
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
zero_data(hdr->dlength);
hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
if (sc->sc_data_direction == DMA_FROM_DEVICE)
hdr->flags |= ISCSI_FLAG_CMD_READ;
@@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
scsi_bidi_cmnd(sc) ? "bidirectional" :
sc->sc_data_direction == DMA_TO_DEVICE ?
"write" : "read", conn->id, sc, sc->cmnd[0],
task->itt, scsi_bufflen(sc),
task->itt, transfer_length,
scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
session->cmdsn,
session->max_cmdsn - session->exp_cmdsn + 1);

View File

@@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
/*
* Global Variables
*/
static struct kmem_cache *qla_tgt_cmd_cachep;
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
static mempool_t *qla_tgt_mgmt_cmd_mempool;
static struct workqueue_struct *qla_tgt_wq;
@@ -2705,6 +2704,8 @@ done:
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
struct qla_tgt_sess *sess = cmd->sess;
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
"%s: se_cmd[%p] ox_id %04x\n",
__func__, &cmd->se_cmd,
@@ -2713,7 +2714,12 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
BUG_ON(cmd->sg_mapped);
if (unlikely(cmd->free_sg))
kfree(cmd->sg);
kmem_cache_free(qla_tgt_cmd_cachep, cmd);
if (!sess || !sess->se_sess) {
WARN_ON(1);
return;
}
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -3075,13 +3081,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
/*
* Process context for I/O path into tcm_qla2xxx code
*/
static void qlt_do_work(struct work_struct *work)
static void __qlt_do_work(struct qla_tgt_cmd *cmd)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
scsi_qla_host_t *vha = cmd->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess = NULL;
struct qla_tgt_sess *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio;
unsigned char *cdb;
unsigned long flags;
@@ -3091,41 +3096,6 @@ static void qlt_do_work(struct work_struct *work)
if (tgt->tgt_stop)
goto out_term;
spin_lock_irqsave(&ha->hardware_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
atio->u.isp24.fcp_hdr.s_id);
/* Do kref_get() before dropping qla_hw_data->hardware_lock. */
if (sess)
kref_get(&sess->se_sess->sess_kref);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (unlikely(!sess)) {
uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
"qla_target(%d): Unable to find wwn login"
" (s_id %x:%x:%x), trying to create it manually\n",
vha->vp_idx, s_id[0], s_id[1], s_id[2]);
if (atio->u.raw.entry_count > 1) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
"Dropping multy entry cmd %p\n", cmd);
goto out_term;
}
mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has an extra creation ref. */
mutex_unlock(&vha->vha_tgt.tgt_mutex);
if (!sess)
goto out_term;
}
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int(
@@ -3153,8 +3123,8 @@ static void qlt_do_work(struct work_struct *work)
cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
cmd->atio.u.isp24.fcp_hdr.ox_id);
ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
fcp_task_attr, data_dir, bidi);
ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
fcp_task_attr, data_dir, bidi);
if (ret != 0)
goto out_term;
/*
@@ -3173,17 +3143,114 @@ out_term:
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
kmem_cache_free(qla_tgt_cmd_cachep, cmd);
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void qlt_do_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
__qlt_do_work(cmd);
}
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct qla_tgt_sess *sess,
struct atio_from_isp *atio)
{
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_cmd *cmd;
int tag;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
return NULL;
cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(struct qla_tgt_cmd));
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
return cmd;
}
static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
uint16_t);
static void qlt_create_sess_from_atio(struct work_struct *work)
{
struct qla_tgt_sess_op *op = container_of(work,
struct qla_tgt_sess_op, work);
scsi_qla_host_t *vha = op->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess;
struct qla_tgt_cmd *cmd;
unsigned long flags;
uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
"qla_target(%d): Unable to find wwn login"
" (s_id %x:%x:%x), trying to create it manually\n",
vha->vp_idx, s_id[0], s_id[1], s_id[2]);
if (op->atio.u.raw.entry_count > 1) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
"Dropping multy entry atio %p\n", &op->atio);
goto out_term;
}
mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has an extra creation ref. */
mutex_unlock(&vha->vha_tgt.tgt_mutex);
if (!sess)
goto out_term;
/*
* Now obtain a pre-allocated session tag using the original op->atio
* packet header, and dispatch into __qlt_do_work() using the existing
* process context.
*/
cmd = qlt_get_tag(vha, sess, &op->atio);
if (!cmd) {
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
return;
}
/*
* __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
* the extra reference taken above by qlt_make_local_sess()
*/
__qlt_do_work(cmd);
kfree(op);
return;
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &op->atio, 1);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
}
/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
struct qla_tgt_cmd *cmd;
if (unlikely(tgt->tgt_stop)) {
@@ -3192,18 +3259,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -EFAULT;
}
cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
if (unlikely(!sess)) {
struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
GFP_ATOMIC);
if (!op)
return -ENOMEM;
memcpy(&op->atio, atio, sizeof(*atio));
INIT_WORK(&op->work, qlt_create_sess_from_atio);
queue_work(qla_tgt_wq, &op->work);
return 0;
}
/*
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
kref_get(&sess->se_sess->sess_kref);
cmd = qlt_get_tag(vha, sess, atio);
if (!cmd) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
ha->tgt.tgt_ops->put_sess(sess);
return -ENOMEM;
}
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
INIT_WORK(&cmd->work, qlt_do_work);
queue_work(qla_tgt_wq, &cmd->work);
return 0;
@@ -5501,23 +5581,13 @@ int __init qlt_init(void)
if (!QLA_TGT_MODE_ENABLED())
return 0;
qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
NULL);
if (!qla_tgt_cmd_cachep) {
ql_log(ql_log_fatal, NULL, 0xe06c,
"kmem_cache_create for qla_tgt_cmd_cachep failed\n");
return -ENOMEM;
}
qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
qla_tgt_mgmt_cmd), 0, NULL);
if (!qla_tgt_mgmt_cmd_cachep) {
ql_log(ql_log_fatal, NULL, 0xe06d,
"kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
ret = -ENOMEM;
goto out;
return -ENOMEM;
}
qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
@@ -5545,8 +5615,6 @@ out_cmd_mempool:
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
out_mgmt_cmd_cachep:
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
out:
kmem_cache_destroy(qla_tgt_cmd_cachep);
return ret;
}
@@ -5558,5 +5626,4 @@ void qlt_exit(void)
destroy_workqueue(qla_tgt_wq);
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
kmem_cache_destroy(qla_tgt_cmd_cachep);
}

View File

@@ -870,6 +870,12 @@ struct qla_tgt {
struct list_head tgt_list_entry;
};
struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
struct atio_from_isp atio;
struct work_struct work;
};
/*
* Equivilant to IT Nexus (Initiator-Target)
*/

View File

@@ -1501,6 +1501,8 @@ static int tcm_qla2xxx_check_initiator_node_acl(
struct qla_tgt_sess *sess = qla_tgt_sess;
unsigned char port_name[36];
unsigned long flags;
int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
TCM_QLA2XXX_DEFAULT_TAGS;
lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
@@ -1518,7 +1520,9 @@ static int tcm_qla2xxx_check_initiator_node_acl(
}
se_tpg = &tpg->se_tpg;
se_sess = transport_init_session(TARGET_PROT_NORMAL);
se_sess = transport_init_session_tags(num_tags,
sizeof(struct qla_tgt_cmd),
TARGET_PROT_NORMAL);
if (IS_ERR(se_sess)) {
pr_err("Unable to initialize struct se_session\n");
return PTR_ERR(se_sess);

View File

@@ -4,6 +4,11 @@
#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
/*
* Number of pre-allocated per-session tags, based upon the worst-case
* per port number of iocbs
*/
#define TCM_QLA2XXX_DEFAULT_TAGS 2088
#include "qla_target.h"

View File

@@ -23,6 +23,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_scsi.h>
#include <linux/cpu.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
@@ -37,6 +38,7 @@ struct virtio_scsi_cmd {
struct completion *comp;
union {
struct virtio_scsi_cmd_req cmd;
struct virtio_scsi_cmd_req_pi cmd_pi;
struct virtio_scsi_ctrl_tmf_req tmf;
struct virtio_scsi_ctrl_an_req an;
} req;
@@ -399,7 +401,7 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
size_t req_size, size_t resp_size)
{
struct scsi_cmnd *sc = cmd->sc;
struct scatterlist *sgs[4], req, resp;
struct scatterlist *sgs[6], req, resp;
struct sg_table *out, *in;
unsigned out_num = 0, in_num = 0;
@@ -417,16 +419,24 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
sgs[out_num++] = &req;
/* Data-out buffer. */
if (out)
if (out) {
/* Place WRITE protection SGLs before Data OUT payload */
if (scsi_prot_sg_count(sc))
sgs[out_num++] = scsi_prot_sglist(sc);
sgs[out_num++] = out->sgl;
}
/* Response header. */
sg_init_one(&resp, &cmd->resp, resp_size);
sgs[out_num + in_num++] = &resp;
/* Data-in buffer */
if (in)
if (in) {
/* Place READ protection SGLs before Data IN payload */
if (scsi_prot_sg_count(sc))
sgs[out_num + in_num++] = scsi_prot_sglist(sc);
sgs[out_num + in_num++] = in->sgl;
}
return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
}
@@ -451,12 +461,45 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
return err;
}
static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
struct scsi_cmnd *sc)
{
cmd->lun[0] = 1;
cmd->lun[1] = sc->device->id;
cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
cmd->lun[3] = sc->device->lun & 0xff;
cmd->tag = (unsigned long)sc;
cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
cmd->prio = 0;
cmd->crn = 0;
}
static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
struct scsi_cmnd *sc)
{
struct request *rq = sc->request;
struct blk_integrity *bi;
virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
if (!rq || !scsi_prot_sg_count(sc))
return;
bi = blk_get_integrity(rq->rq_disk);
if (sc->sc_data_direction == DMA_TO_DEVICE)
cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
else if (sc->sc_data_direction == DMA_FROM_DEVICE)
cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
}
static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
struct virtio_scsi_vq *req_vq,
struct scsi_cmnd *sc)
{
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
int req_size;
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
@@ -468,22 +511,20 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc;
cmd->req.cmd = (struct virtio_scsi_cmd_req){
.lun[0] = 1,
.lun[1] = sc->device->id,
.lun[2] = (sc->device->lun >> 8) | 0x40,
.lun[3] = sc->device->lun & 0xff,
.tag = (unsigned long)sc,
.task_attr = VIRTIO_SCSI_S_SIMPLE,
.prio = 0,
.crn = 0,
};
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
if (virtscsi_kick_cmd(req_vq, cmd,
sizeof cmd->req.cmd, sizeof cmd->resp.cmd) != 0)
if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd_pi);
} else {
virtio_scsi_init_hdr(&cmd->req.cmd, sc);
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd);
}
if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
return SCSI_MLQUEUE_HOST_BUSY;
return 0;
}
@@ -820,7 +861,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
{
struct Scsi_Host *shost;
struct virtio_scsi *vscsi;
int err;
int err, host_prot;
u32 sg_elems, num_targets;
u32 cmd_per_lun;
u32 num_queues;
@@ -870,6 +911,16 @@ static int virtscsi_probe(struct virtio_device *vdev)
shost->max_id = num_targets;
shost->max_channel = 0;
shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
scsi_host_set_prot(shost, host_prot);
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
}
err = scsi_add_host(shost, &vdev->dev);
if (err)
goto scsi_add_host_failed;
@@ -939,6 +990,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_SCSI_F_HOTPLUG,
VIRTIO_SCSI_F_CHANGE,
VIRTIO_SCSI_F_T10_PI,
};
static struct virtio_driver virtio_scsi_driver = {