sfc: Implement message level control
Replace EFX_ERR() with netif_err(), EFX_INFO() with netif_info(), EFX_LOG() with netif_dbg() and EFX_TRACE() and EFX_REGDUMP() with netif_vdbg(). Replace EFX_ERR_RL(), EFX_INFO_RL() and EFX_LOG_RL() using explicit calls to net_ratelimit(). Implement the ethtool operations to get and set message level flags, and add a 'debug' module parameter for the initial value. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
0c605a2061
commit
62776d034c
@ -348,10 +348,11 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
|
||||
if (space < EFX_RX_BATCH)
|
||||
goto out;
|
||||
|
||||
EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
|
||||
" level %d to level %d using %s allocation\n",
|
||||
rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
|
||||
channel->rx_alloc_push_pages ? "page" : "skb");
|
||||
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
||||
"RX queue %d fast-filling descriptor ring from"
|
||||
" level %d to level %d using %s allocation\n",
|
||||
rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
|
||||
channel->rx_alloc_push_pages ? "page" : "skb");
|
||||
|
||||
do {
|
||||
if (channel->rx_alloc_push_pages)
|
||||
@ -366,9 +367,10 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
|
||||
}
|
||||
} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
|
||||
|
||||
EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
|
||||
"to level %d\n", rx_queue->queue,
|
||||
rx_queue->added_count - rx_queue->removed_count);
|
||||
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
||||
"RX queue %d fast-filled descriptor ring "
|
||||
"to level %d\n", rx_queue->queue,
|
||||
rx_queue->added_count - rx_queue->removed_count);
|
||||
|
||||
out:
|
||||
if (rx_queue->notified_count != rx_queue->added_count)
|
||||
@ -402,10 +404,12 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
||||
*discard = true;
|
||||
|
||||
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
|
||||
EFX_ERR_RL(efx, " RX queue %d seriously overlength "
|
||||
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
|
||||
rx_queue->queue, len, max_len,
|
||||
efx->type->rx_buffer_padding);
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
" RX queue %d seriously overlength "
|
||||
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
|
||||
rx_queue->queue, len, max_len,
|
||||
efx->type->rx_buffer_padding);
|
||||
/* If this buffer was skb-allocated, then the meta
|
||||
* data at the end of the skb will be trashed. So
|
||||
* we have no choice but to leak the fragment.
|
||||
@ -413,8 +417,11 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
||||
*leak_packet = (rx_buf->skb != NULL);
|
||||
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
|
||||
} else {
|
||||
EFX_ERR_RL(efx, " RX queue %d overlength RX event "
|
||||
"(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
" RX queue %d overlength RX event "
|
||||
"(0x%x > 0x%x)\n",
|
||||
rx_queue->queue, len, max_len);
|
||||
}
|
||||
|
||||
rx_queue->channel->n_rx_overlength++;
|
||||
@ -502,11 +509,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
efx_rx_packet__check_len(rx_queue, rx_buf, len,
|
||||
&discard, &leak_packet);
|
||||
|
||||
EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
|
||||
rx_queue->queue, index,
|
||||
(unsigned long long)rx_buf->dma_addr, len,
|
||||
(checksummed ? " [SUMMED]" : ""),
|
||||
(discard ? " [DISCARD]" : ""));
|
||||
netif_vdbg(efx, rx_status, efx->net_dev,
|
||||
"RX queue %d received id %x at %llx+%x %s%s\n",
|
||||
rx_queue->queue, index,
|
||||
(unsigned long long)rx_buf->dma_addr, len,
|
||||
(checksummed ? " [SUMMED]" : ""),
|
||||
(discard ? " [DISCARD]" : ""));
|
||||
|
||||
/* Discard packet, if instructed to do so */
|
||||
if (unlikely(discard)) {
|
||||
@ -621,7 +629,8 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
unsigned int rxq_size;
|
||||
int rc;
|
||||
|
||||
EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"creating RX queue %d\n", rx_queue->queue);
|
||||
|
||||
/* Allocate RX buffers */
|
||||
rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
|
||||
@ -641,7 +650,8 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
unsigned int max_fill, trigger, limit;
|
||||
|
||||
EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"initialising RX queue %d\n", rx_queue->queue);
|
||||
|
||||
/* Initialise ptr fields */
|
||||
rx_queue->added_count = 0;
|
||||
@ -668,7 +678,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
int i;
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
|
||||
EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"shutting down RX queue %d\n", rx_queue->queue);
|
||||
|
||||
del_timer_sync(&rx_queue->slow_fill);
|
||||
efx_nic_fini_rx(rx_queue);
|
||||
@ -684,7 +695,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
|
||||
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"destroying RX queue %d\n", rx_queue->queue);
|
||||
|
||||
efx_nic_remove_rx(rx_queue);
|
||||
|
||||
|
Reference in New Issue
Block a user