sfc: Cleanup RX queue information
Rename efx_nic::rss_queues to the more obvious n_rx_queues Remove efx_rx_queue::used and other stuff that's redundant with it. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:
committed by
Jeff Garzik
parent
64ee3120f7
commit
8831da7b6c
@@ -859,20 +859,20 @@ static void efx_probe_interrupts(struct efx_nic *efx)
|
|||||||
* We will need one channel per interrupt.
|
* We will need one channel per interrupt.
|
||||||
*/
|
*/
|
||||||
wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
|
wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
|
||||||
efx->rss_queues = min(wanted_ints, max_channels);
|
efx->n_rx_queues = min(wanted_ints, max_channels);
|
||||||
|
|
||||||
for (i = 0; i < efx->rss_queues; i++)
|
for (i = 0; i < efx->n_rx_queues; i++)
|
||||||
xentries[i].entry = i;
|
xentries[i].entry = i;
|
||||||
rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
|
rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
|
||||||
if (rc > 0) {
|
if (rc > 0) {
|
||||||
EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
|
EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
|
||||||
efx->rss_queues = rc;
|
efx->n_rx_queues = rc;
|
||||||
rc = pci_enable_msix(efx->pci_dev, xentries,
|
rc = pci_enable_msix(efx->pci_dev, xentries,
|
||||||
efx->rss_queues);
|
efx->n_rx_queues);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rc == 0) {
|
if (rc == 0) {
|
||||||
for (i = 0; i < efx->rss_queues; i++)
|
for (i = 0; i < efx->n_rx_queues; i++)
|
||||||
efx->channel[i].irq = xentries[i].vector;
|
efx->channel[i].irq = xentries[i].vector;
|
||||||
} else {
|
} else {
|
||||||
/* Fall back to single channel MSI */
|
/* Fall back to single channel MSI */
|
||||||
@@ -883,7 +883,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
|
|||||||
|
|
||||||
/* Try single interrupt MSI */
|
/* Try single interrupt MSI */
|
||||||
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
|
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
|
||||||
efx->rss_queues = 1;
|
efx->n_rx_queues = 1;
|
||||||
rc = pci_enable_msi(efx->pci_dev);
|
rc = pci_enable_msi(efx->pci_dev);
|
||||||
if (rc == 0) {
|
if (rc == 0) {
|
||||||
efx->channel[0].irq = efx->pci_dev->irq;
|
efx->channel[0].irq = efx->pci_dev->irq;
|
||||||
@@ -895,7 +895,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
|
|||||||
|
|
||||||
/* Assume legacy interrupts */
|
/* Assume legacy interrupts */
|
||||||
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
|
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
|
||||||
efx->rss_queues = 1;
|
efx->n_rx_queues = 1;
|
||||||
efx->legacy_irq = efx->pci_dev->irq;
|
efx->legacy_irq = efx->pci_dev->irq;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -914,14 +914,10 @@ static void efx_remove_interrupts(struct efx_nic *efx)
|
|||||||
efx->legacy_irq = 0;
|
efx->legacy_irq = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Select number of used resources
|
static void efx_set_channels(struct efx_nic *efx)
|
||||||
* Should be called after probe_interrupts()
|
|
||||||
*/
|
|
||||||
static void efx_select_used(struct efx_nic *efx)
|
|
||||||
{
|
{
|
||||||
struct efx_tx_queue *tx_queue;
|
struct efx_tx_queue *tx_queue;
|
||||||
struct efx_rx_queue *rx_queue;
|
struct efx_rx_queue *rx_queue;
|
||||||
int i;
|
|
||||||
|
|
||||||
efx_for_each_tx_queue(tx_queue, efx) {
|
efx_for_each_tx_queue(tx_queue, efx) {
|
||||||
if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
|
if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
|
||||||
@@ -931,19 +927,9 @@ static void efx_select_used(struct efx_nic *efx)
|
|||||||
tx_queue->channel->used_flags |= EFX_USED_BY_TX;
|
tx_queue->channel->used_flags |= EFX_USED_BY_TX;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* RX queues. Each has a dedicated channel. */
|
efx_for_each_rx_queue(rx_queue, efx) {
|
||||||
for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
|
rx_queue->channel = &efx->channel[rx_queue->queue];
|
||||||
rx_queue = &efx->rx_queue[i];
|
rx_queue->channel->used_flags |= EFX_USED_BY_RX;
|
||||||
|
|
||||||
if (i < efx->rss_queues) {
|
|
||||||
rx_queue->used = true;
|
|
||||||
/* If we allow multiple RX queues per channel
|
|
||||||
* we need to decide that here
|
|
||||||
*/
|
|
||||||
rx_queue->channel = &efx->channel[rx_queue->queue];
|
|
||||||
rx_queue->channel->used_flags |= EFX_USED_BY_RX;
|
|
||||||
rx_queue++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -962,8 +948,7 @@ static int efx_probe_nic(struct efx_nic *efx)
|
|||||||
* in MSI-X interrupts. */
|
* in MSI-X interrupts. */
|
||||||
efx_probe_interrupts(efx);
|
efx_probe_interrupts(efx);
|
||||||
|
|
||||||
/* Determine number of RX queues and TX queues */
|
efx_set_channels(efx);
|
||||||
efx_select_used(efx);
|
|
||||||
|
|
||||||
/* Initialise the interrupt moderation settings */
|
/* Initialise the interrupt moderation settings */
|
||||||
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
|
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
|
||||||
|
@@ -1535,7 +1535,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
|
|||||||
offset < RX_RSS_INDIR_TBL_B0 + 0x800;
|
offset < RX_RSS_INDIR_TBL_B0 + 0x800;
|
||||||
offset += 0x10) {
|
offset += 0x10) {
|
||||||
EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
|
EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
|
||||||
i % efx->rss_queues);
|
i % efx->n_rx_queues);
|
||||||
falcon_writel(efx, &dword, offset);
|
falcon_writel(efx, &dword, offset);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
@@ -2785,7 +2785,7 @@ int falcon_init_nic(struct efx_nic *efx)
|
|||||||
if (falcon_rev(efx) >= FALCON_REV_B0)
|
if (falcon_rev(efx) >= FALCON_REV_B0)
|
||||||
EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
|
EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
|
||||||
else
|
else
|
||||||
EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
|
EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->n_rx_queues - 1);
|
||||||
if (EFX_WORKAROUND_7244(efx)) {
|
if (EFX_WORKAROUND_7244(efx)) {
|
||||||
EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
|
EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
|
||||||
EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
|
EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
|
||||||
|
@@ -231,7 +231,6 @@ struct efx_rx_buffer {
|
|||||||
* struct efx_rx_queue - An Efx RX queue
|
* struct efx_rx_queue - An Efx RX queue
|
||||||
* @efx: The associated Efx NIC
|
* @efx: The associated Efx NIC
|
||||||
* @queue: DMA queue number
|
* @queue: DMA queue number
|
||||||
* @used: Queue is used by net driver
|
|
||||||
* @channel: The associated channel
|
* @channel: The associated channel
|
||||||
* @buffer: The software buffer ring
|
* @buffer: The software buffer ring
|
||||||
* @rxd: The hardware descriptor ring
|
* @rxd: The hardware descriptor ring
|
||||||
@@ -265,7 +264,6 @@ struct efx_rx_buffer {
|
|||||||
struct efx_rx_queue {
|
struct efx_rx_queue {
|
||||||
struct efx_nic *efx;
|
struct efx_nic *efx;
|
||||||
int queue;
|
int queue;
|
||||||
bool used;
|
|
||||||
struct efx_channel *channel;
|
struct efx_channel *channel;
|
||||||
struct efx_rx_buffer *buffer;
|
struct efx_rx_buffer *buffer;
|
||||||
struct efx_special_buffer rxd;
|
struct efx_special_buffer rxd;
|
||||||
@@ -628,7 +626,7 @@ union efx_multicast_hash {
|
|||||||
* @tx_queue: TX DMA queues
|
* @tx_queue: TX DMA queues
|
||||||
* @rx_queue: RX DMA queues
|
* @rx_queue: RX DMA queues
|
||||||
* @channel: Channels
|
* @channel: Channels
|
||||||
* @rss_queues: Number of RSS queues
|
* @n_rx_queues: Number of RX queues
|
||||||
* @rx_buffer_len: RX buffer length
|
* @rx_buffer_len: RX buffer length
|
||||||
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
|
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
|
||||||
* @irq_status: Interrupt status buffer
|
* @irq_status: Interrupt status buffer
|
||||||
@@ -704,7 +702,7 @@ struct efx_nic {
|
|||||||
struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
|
struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
|
||||||
struct efx_channel channel[EFX_MAX_CHANNELS];
|
struct efx_channel channel[EFX_MAX_CHANNELS];
|
||||||
|
|
||||||
int rss_queues;
|
int n_rx_queues;
|
||||||
unsigned int rx_buffer_len;
|
unsigned int rx_buffer_len;
|
||||||
unsigned int rx_buffer_order;
|
unsigned int rx_buffer_order;
|
||||||
|
|
||||||
@@ -850,19 +848,15 @@ struct efx_nic_type {
|
|||||||
/* Iterate over all used RX queues */
|
/* Iterate over all used RX queues */
|
||||||
#define efx_for_each_rx_queue(_rx_queue, _efx) \
|
#define efx_for_each_rx_queue(_rx_queue, _efx) \
|
||||||
for (_rx_queue = &_efx->rx_queue[0]; \
|
for (_rx_queue = &_efx->rx_queue[0]; \
|
||||||
_rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \
|
_rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \
|
||||||
_rx_queue++) \
|
_rx_queue++)
|
||||||
if (!_rx_queue->used) \
|
|
||||||
continue; \
|
|
||||||
else
|
|
||||||
|
|
||||||
/* Iterate over all RX queues belonging to a channel */
|
/* Iterate over all RX queues belonging to a channel */
|
||||||
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
|
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
|
||||||
for (_rx_queue = &_channel->efx->rx_queue[0]; \
|
for (_rx_queue = &_channel->efx->rx_queue[0]; \
|
||||||
_rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \
|
_rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \
|
||||||
_rx_queue++) \
|
_rx_queue++) \
|
||||||
if ((!_rx_queue->used) || \
|
if (_rx_queue->channel != _channel) \
|
||||||
(_rx_queue->channel != _channel)) \
|
|
||||||
continue; \
|
continue; \
|
||||||
else
|
else
|
||||||
|
|
||||||
|
@@ -789,23 +789,14 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
|
|||||||
/* Allocate RX buffers */
|
/* Allocate RX buffers */
|
||||||
rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
|
rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
|
||||||
rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
|
rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
|
||||||
if (!rx_queue->buffer) {
|
if (!rx_queue->buffer)
|
||||||
rc = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto fail1;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = falcon_probe_rx(rx_queue);
|
rc = falcon_probe_rx(rx_queue);
|
||||||
if (rc)
|
if (rc) {
|
||||||
goto fail2;
|
kfree(rx_queue->buffer);
|
||||||
|
rx_queue->buffer = NULL;
|
||||||
return 0;
|
}
|
||||||
|
|
||||||
fail2:
|
|
||||||
kfree(rx_queue->buffer);
|
|
||||||
rx_queue->buffer = NULL;
|
|
||||||
fail1:
|
|
||||||
rx_queue->used = 0;
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -872,7 +863,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
|
|||||||
|
|
||||||
kfree(rx_queue->buffer);
|
kfree(rx_queue->buffer);
|
||||||
rx_queue->buffer = NULL;
|
rx_queue->buffer = NULL;
|
||||||
rx_queue->used = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void efx_flush_lro(struct efx_channel *channel)
|
void efx_flush_lro(struct efx_channel *channel)
|
||||||
|
Reference in New Issue
Block a user