Here we've got to a situation when tasklet called usleep_range() in PTT
acquire logic, thus welcome to the "scheduling while atomic" BUG().
BUG: scheduling while atomic: swapper/24/0/0x00000100
[<
ffffffffb41c6199>] schedule+0x29/0x70
[<
ffffffffb41c5512>] schedule_hrtimeout_range_clock+0xb2/0x150
[<
ffffffffb41c55c3>] schedule_hrtimeout_range+0x13/0x20
[<
ffffffffb41c3bcf>] usleep_range+0x4f/0x70
[<
ffffffffc08d3e58>] qed_ptt_acquire+0x38/0x100 [qed]
[<
ffffffffc08eac48>] _qed_get_vport_stats+0x458/0x580 [qed]
[<
ffffffffc08ead8c>] qed_get_vport_stats+0x1c/0xd0 [qed]
[<
ffffffffc08dffd3>] qed_get_protocol_stats+0x93/0x100 [qed]
qed_mcp_send_protocol_stats
case MFW_DRV_MSG_GET_LAN_STATS:
case MFW_DRV_MSG_GET_FCOE_STATS:
case MFW_DRV_MSG_GET_ISCSI_STATS:
case MFW_DRV_MSG_GET_RDMA_STATS:
[<
ffffffffc08e36d8>] qed_mcp_handle_events+0x2d8/0x890 [qed]
qed_int_assertion
qed_int_attentions
[<
ffffffffc08d9490>] qed_int_sp_dpc+0xa50/0xdc0 [qed]
[<
ffffffffb3aa7623>] tasklet_action+0x83/0x140
[<
ffffffffb41d9125>] __do_softirq+0x125/0x2bb
[<
ffffffffb41d560c>] call_softirq+0x1c/0x30
[<
ffffffffb3a30645>] do_softirq+0x65/0xa0
[<
ffffffffb3aa78d5>] irq_exit+0x105/0x110
[<
ffffffffb41d8996>] do_IRQ+0x56/0xf0
Fix this by making caller to provide the context whether it could be in
atomic context flow or not when getting stats from QED driver.
QED driver based on the context provided decide to schedule out or not
when acquiring the PTT BAR window.
We faced the BUG_ON() while getting vport stats, but according to the
code same issue could happen for fcoe and iscsi statistics as well, so
fixing them too.
Fixes:
6c75424612a7 ("qed: Add support for NCSI statistics.")
Fixes:
1e128c81290a ("qed: Add support for hardware offloaded FCoE.")
Fixes:
2f2b2614e893 ("qed: Provide iSCSI statistics to management")
Cc: Sudarsana Kalluru <skalluru@marvell.com>
Cc: David Miller <davem@davemloft.net>
Cc: Manish Chopra <manishc@marvell.com>
Signed-off-by: Konstantin Khorenko <khorenko@virtuozzo.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
*/
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+/**
+ * qed_ptt_acquire_context(): Allocate a PTT window honoring the context
+ * atomicy.
+ *
+ * @p_hwfn: HW device data.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: struct qed_ptt.
+ *
+ * Should be called at the entry point to the driver
+ * (at the beginning of an exported function).
+ */
+struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn,
+ bool is_atomic);
+
/**
* qed_ptt_release(): Release PTT Window.
*
}
static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
- struct qed_fcoe_stats *p_stats)
+ struct qed_fcoe_stats *p_stats,
+ bool is_atomic)
{
struct qed_ptt *p_ptt;
memset(p_stats, 0, sizeof(*p_stats));
- p_ptt = qed_ptt_acquire(p_hwfn);
+ p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
QED_SPQ_MODE_EBLOCK, NULL);
}
+static int qed_fcoe_stats_context(struct qed_dev *cdev,
+ struct qed_fcoe_stats *stats,
+ bool is_atomic)
+{
+ return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
+}
+
static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
{
- return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
+ return qed_fcoe_stats_context(cdev, stats, false);
}
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
- struct qed_mcp_fcoe_stats *stats)
+ struct qed_mcp_fcoe_stats *stats,
+ bool is_atomic)
{
struct qed_fcoe_stats proto_stats;
/* Retrieve FW statistics */
memset(&proto_stats, 0, sizeof(proto_stats));
- if (qed_fcoe_stats(cdev, &proto_stats)) {
+ if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) {
DP_VERBOSE(cdev, QED_MSG_STORAGE,
"Failed to collect FCoE statistics\n");
return;
void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
void qed_fcoe_free(struct qed_hwfn *p_hwfn);
+/**
+ * qed_get_protocol_stats_fcoe(): Fills provided statistics
+ * struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: Void.
+ */
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
- struct qed_mcp_fcoe_stats *stats);
+ struct qed_mcp_fcoe_stats *stats,
+ bool is_atomic);
#else /* CONFIG_QED_FCOE */
static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{
static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
- struct qed_mcp_fcoe_stats *stats)
+ struct qed_mcp_fcoe_stats *stats,
+ bool is_atomic)
{
}
#endif /* CONFIG_QED_FCOE */
#include "qed_reg_addr.h"
#include "qed_sriov.h"
-#define QED_BAR_ACQUIRE_TIMEOUT 1000
+#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000
+#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000
+#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000
+#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10
/* Invalid values */
#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
}
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+ return qed_ptt_acquire_context(p_hwfn, false);
+}
+
+struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic)
{
struct qed_ptt *p_ptt;
- unsigned int i;
+ unsigned int i, count;
+
+ if (is_atomic)
+ count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT;
+ else
+ count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT;
/* Take the free PTT from the list */
- for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+ for (i = 0; i < count; i++) {
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
}
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
- usleep_range(1000, 2000);
+
+ if (is_atomic)
+ udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY);
+ else
+ usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP,
+ QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2);
}
DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
}
static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_stats *stats)
+ struct qed_iscsi_stats *stats,
+ bool is_atomic)
{
struct qed_ptt *p_ptt;
memset(stats, 0, sizeof(*stats));
- p_ptt = qed_ptt_acquire(p_hwfn);
+ p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
return -EAGAIN;
QED_SPQ_MODE_EBLOCK, NULL);
}
+static int qed_iscsi_stats_context(struct qed_dev *cdev,
+ struct qed_iscsi_stats *stats,
+ bool is_atomic)
+{
+ return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
+}
+
static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
{
- return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats);
+ return qed_iscsi_stats_context(cdev, stats, false);
}
static int qed_iscsi_change_mac(struct qed_dev *cdev,
}
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
- struct qed_mcp_iscsi_stats *stats)
+ struct qed_mcp_iscsi_stats *stats,
+ bool is_atomic)
{
struct qed_iscsi_stats proto_stats;
/* Retrieve FW statistics */
memset(&proto_stats, 0, sizeof(proto_stats));
- if (qed_iscsi_stats(cdev, &proto_stats)) {
+ if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) {
DP_VERBOSE(cdev, QED_MSG_STORAGE,
"Failed to collect ISCSI statistics\n");
return;
*
* @cdev: Qed dev pointer.
* @stats: Points to struct that will be filled with statistics.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
*
+ * Context: The function should not sleep in case is_atomic == true.
* Return: Void.
*/
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
- struct qed_mcp_iscsi_stats *stats);
+ struct qed_mcp_iscsi_stats *stats,
+ bool is_atomic);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
static inline void
qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
- struct qed_mcp_iscsi_stats *stats) {}
+ struct qed_mcp_iscsi_stats *stats,
+ bool is_atomic) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif
}
static void _qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
+ struct qed_eth_stats *stats,
+ bool is_atomic)
{
u8 fw_vport = 0;
int i;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
- : NULL;
+ struct qed_ptt *p_ptt;
bool b_get_port_stats;
+ p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic)
+ : NULL;
if (IS_PF(cdev)) {
/* The main vport index is relative first */
if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
}
void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
+{
+ qed_get_vport_stats_context(cdev, stats, false);
+}
+
+void qed_get_vport_stats_context(struct qed_dev *cdev,
+ struct qed_eth_stats *stats,
+ bool is_atomic)
{
u32 i;
return;
}
- _qed_get_vport_stats(cdev, stats);
+ _qed_get_vport_stats(cdev, stats, is_atomic);
if (!cdev->reset_stats)
return;
if (!cdev->reset_stats) {
DP_INFO(cdev, "Reset stats not allocated\n");
} else {
- _qed_get_vport_stats(cdev, cdev->reset_stats);
+ _qed_get_vport_stats(cdev, cdev->reset_stats, false);
cdev->reset_stats->common.link_change_count = 0;
}
}
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
+/**
+ * qed_get_vport_stats(): Fills provided statistics
+ * struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ *
+ * Return: Void.
+ */
void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+/**
+ * qed_get_vport_stats_context(): Fills provided statistics
+ * struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: Void.
+ */
+void qed_get_vport_stats_context(struct qed_dev *cdev,
+ struct qed_eth_stats *stats,
+ bool is_atomic);
+
void qed_reset_vport_stats(struct qed_dev *cdev);
/**
switch (type) {
case QED_MCP_LAN_STATS:
- qed_get_vport_stats(cdev, ð_stats);
+ qed_get_vport_stats_context(cdev, ð_stats, true);
stats->lan_stats.ucast_rx_pkts =
eth_stats.common.rx_ucast_pkts;
stats->lan_stats.ucast_tx_pkts =
stats->lan_stats.fcs_err = -1;
break;
case QED_MCP_FCOE_STATS:
- qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
+ qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true);
break;
case QED_MCP_ISCSI_STATS:
- qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
+ qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true);
break;
default:
DP_VERBOSE(cdev, QED_MSG_SP,