cxgb3 sparse warning fixes
Fix warnings from sparse related to shadowed variables and routines that should be declared static. Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
committed by
David S. Miller
parent
33a85aa1c9
commit
9265fabf0d
@@ -740,7 +740,7 @@ static inline char t3rev2char(struct adapter *adapter)
|
|||||||
return rev;
|
return rev;
|
||||||
}
|
}
|
||||||
|
|
||||||
int update_tpsram(struct adapter *adap)
|
static int update_tpsram(struct adapter *adap)
|
||||||
{
|
{
|
||||||
const struct firmware *tpsram;
|
const struct firmware *tpsram;
|
||||||
char buf[64];
|
char buf[64];
|
||||||
@@ -1769,7 +1769,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||||||
}
|
}
|
||||||
case CHELSIO_SET_QSET_NUM:{
|
case CHELSIO_SET_QSET_NUM:{
|
||||||
struct ch_reg edata;
|
struct ch_reg edata;
|
||||||
struct port_info *pi = netdev_priv(dev);
|
|
||||||
unsigned int i, first_qset = 0, other_qsets = 0;
|
unsigned int i, first_qset = 0, other_qsets = 0;
|
||||||
|
|
||||||
if (!capable(CAP_NET_ADMIN))
|
if (!capable(CAP_NET_ADMIN))
|
||||||
@@ -1801,7 +1800,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||||||
}
|
}
|
||||||
case CHELSIO_GET_QSET_NUM:{
|
case CHELSIO_GET_QSET_NUM:{
|
||||||
struct ch_reg edata;
|
struct ch_reg edata;
|
||||||
struct port_info *pi = netdev_priv(dev);
|
|
||||||
|
|
||||||
edata.cmd = CHELSIO_GET_QSET_NUM;
|
edata.cmd = CHELSIO_GET_QSET_NUM;
|
||||||
edata.val = pi->nqsets;
|
edata.val = pi->nqsets;
|
||||||
|
@@ -222,32 +222,32 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
switch (req) {
|
switch (req) {
|
||||||
case RDMA_GET_PARAMS:{
|
case RDMA_GET_PARAMS: {
|
||||||
struct rdma_info *req = data;
|
struct rdma_info *rdma = data;
|
||||||
struct pci_dev *pdev = adapter->pdev;
|
struct pci_dev *pdev = adapter->pdev;
|
||||||
|
|
||||||
req->udbell_physbase = pci_resource_start(pdev, 2);
|
rdma->udbell_physbase = pci_resource_start(pdev, 2);
|
||||||
req->udbell_len = pci_resource_len(pdev, 2);
|
rdma->udbell_len = pci_resource_len(pdev, 2);
|
||||||
req->tpt_base =
|
rdma->tpt_base =
|
||||||
t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
|
t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
|
||||||
req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
|
rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
|
||||||
req->pbl_base =
|
rdma->pbl_base =
|
||||||
t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
|
t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
|
||||||
req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
|
rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
|
||||||
req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
|
rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
|
||||||
req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
|
rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
|
||||||
req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
|
rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
|
||||||
req->pdev = pdev;
|
rdma->pdev = pdev;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case RDMA_CQ_OP:{
|
case RDMA_CQ_OP:{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rdma_cq_op *req = data;
|
struct rdma_cq_op *rdma = data;
|
||||||
|
|
||||||
/* may be called in any context */
|
/* may be called in any context */
|
||||||
spin_lock_irqsave(&adapter->sge.reg_lock, flags);
|
spin_lock_irqsave(&adapter->sge.reg_lock, flags);
|
||||||
ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
|
ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
|
||||||
req->credits);
|
rdma->credits);
|
||||||
spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
|
spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -274,15 +274,15 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case RDMA_CQ_SETUP:{
|
case RDMA_CQ_SETUP:{
|
||||||
struct rdma_cq_setup *req = data;
|
struct rdma_cq_setup *rdma = data;
|
||||||
|
|
||||||
spin_lock_irq(&adapter->sge.reg_lock);
|
spin_lock_irq(&adapter->sge.reg_lock);
|
||||||
ret =
|
ret =
|
||||||
t3_sge_init_cqcntxt(adapter, req->id,
|
t3_sge_init_cqcntxt(adapter, rdma->id,
|
||||||
req->base_addr, req->size,
|
rdma->base_addr, rdma->size,
|
||||||
ASYNC_NOTIF_RSPQ,
|
ASYNC_NOTIF_RSPQ,
|
||||||
req->ovfl_mode, req->credits,
|
rdma->ovfl_mode, rdma->credits,
|
||||||
req->credit_thres);
|
rdma->credit_thres);
|
||||||
spin_unlock_irq(&adapter->sge.reg_lock);
|
spin_unlock_irq(&adapter->sge.reg_lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -292,13 +292,13 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
|
|||||||
spin_unlock_irq(&adapter->sge.reg_lock);
|
spin_unlock_irq(&adapter->sge.reg_lock);
|
||||||
break;
|
break;
|
||||||
case RDMA_CTRL_QP_SETUP:{
|
case RDMA_CTRL_QP_SETUP:{
|
||||||
struct rdma_ctrlqp_setup *req = data;
|
struct rdma_ctrlqp_setup *rdma = data;
|
||||||
|
|
||||||
spin_lock_irq(&adapter->sge.reg_lock);
|
spin_lock_irq(&adapter->sge.reg_lock);
|
||||||
ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
|
ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
|
||||||
SGE_CNTXT_RDMA,
|
SGE_CNTXT_RDMA,
|
||||||
ASYNC_NOTIF_RSPQ,
|
ASYNC_NOTIF_RSPQ,
|
||||||
req->base_addr, req->size,
|
rdma->base_addr, rdma->size,
|
||||||
FW_RI_TID_START, 1, 0);
|
FW_RI_TID_START, 1, 0);
|
||||||
spin_unlock_irq(&adapter->sge.reg_lock);
|
spin_unlock_irq(&adapter->sge.reg_lock);
|
||||||
break;
|
break;
|
||||||
|
@@ -544,7 +544,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
|
|||||||
* as HW contexts, packet buffers, and descriptor rings. Traffic to the
|
* as HW contexts, packet buffers, and descriptor rings. Traffic to the
|
||||||
* queue set must be quiesced prior to calling this.
|
* queue set must be quiesced prior to calling this.
|
||||||
*/
|
*/
|
||||||
void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
|
static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct pci_dev *pdev = adapter->pdev;
|
struct pci_dev *pdev = adapter->pdev;
|
||||||
@@ -2215,7 +2215,7 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
|
|||||||
* The MSI-X interrupt handler for an SGE response queue for the NAPI case
|
* The MSI-X interrupt handler for an SGE response queue for the NAPI case
|
||||||
* (i.e., response queue serviced by NAPI polling).
|
* (i.e., response queue serviced by NAPI polling).
|
||||||
*/
|
*/
|
||||||
irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
|
static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
|
||||||
{
|
{
|
||||||
struct sge_qset *qs = cookie;
|
struct sge_qset *qs = cookie;
|
||||||
struct sge_rspq *q = &qs->rspq;
|
struct sge_rspq *q = &qs->rspq;
|
||||||
@@ -2284,7 +2284,7 @@ static int rspq_check_napi(struct sge_qset *qs)
|
|||||||
* one SGE response queue per port in this mode and protect all response
|
* one SGE response queue per port in this mode and protect all response
|
||||||
* queues with queue 0's lock.
|
* queues with queue 0's lock.
|
||||||
*/
|
*/
|
||||||
irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
|
static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
|
||||||
{
|
{
|
||||||
int new_packets;
|
int new_packets;
|
||||||
struct adapter *adap = cookie;
|
struct adapter *adap = cookie;
|
||||||
|
@@ -119,9 +119,9 @@ void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
|
|||||||
* Reads registers that are accessed indirectly through an address/data
|
* Reads registers that are accessed indirectly through an address/data
|
||||||
* register pair.
|
* register pair.
|
||||||
*/
|
*/
|
||||||
void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
|
static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
|
||||||
unsigned int data_reg, u32 *vals, unsigned int nregs,
|
unsigned int data_reg, u32 *vals,
|
||||||
unsigned int start_idx)
|
unsigned int nregs, unsigned int start_idx)
|
||||||
{
|
{
|
||||||
while (nregs--) {
|
while (nregs--) {
|
||||||
t3_write_reg(adap, addr_reg, start_idx);
|
t3_write_reg(adap, addr_reg, start_idx);
|
||||||
@@ -3407,7 +3407,7 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
|
|||||||
* Older PCIe cards lose their config space during reset, PCI-X
|
* Older PCIe cards lose their config space during reset, PCI-X
|
||||||
* ones don't.
|
* ones don't.
|
||||||
*/
|
*/
|
||||||
int t3_reset_adapter(struct adapter *adapter)
|
static int t3_reset_adapter(struct adapter *adapter)
|
||||||
{
|
{
|
||||||
int i, save_and_restore_pcie =
|
int i, save_and_restore_pcie =
|
||||||
adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
|
adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
|
||||||
|
@@ -142,7 +142,7 @@ int t3_mac_reset(struct cmac *mac)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int t3b2_mac_reset(struct cmac *mac)
|
static int t3b2_mac_reset(struct cmac *mac)
|
||||||
{
|
{
|
||||||
struct adapter *adap = mac->adapter;
|
struct adapter *adap = mac->adapter;
|
||||||
unsigned int oft = mac->offset;
|
unsigned int oft = mac->offset;
|
||||||
|
Reference in New Issue
Block a user