From: Arie Gershberg <agershb...@marvell.com>

In this patch, we implement controller level error handling and recovery.
Upon an error discovered by the ULP or reset controller initiated by the
nvme-core (using reset_ctrl workqueue), the ULP will initiate a controller
recovery which includes teardown and re-connect of all queues.

Signed-off-by: Arie Gershberg <agershb...@marvell.com>
Signed-off-by: Michal Kalderon <mkalde...@marvell.com>
Signed-off-by: Ariel Elior <ael...@marvell.com>
Signed-off-by: Shai Malin <sma...@marvell.com>
---
 drivers/nvme/host/tcp-offload.c | 121 +++++++++++++++++++++++++++++++-
 drivers/nvme/host/tcp-offload.h |  10 +++
 2 files changed, 130 insertions(+), 1 deletion(-)

diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c
index da934c6fed9e..e3759c281927 100644
--- a/drivers/nvme/host/tcp-offload.c
+++ b/drivers/nvme/host/tcp-offload.c
@@ -73,6 +73,23 @@ void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev 
*dev)
 }
 EXPORT_SYMBOL_GPL(nvme_tcp_ofld_unregister_dev);
 
+/**
+ * nvme_tcp_ofld_error_recovery() - NVMeTCP Offload Library error recovery.
+ * function.
+ * @nctrl:     NVMe controller instance to change to resetting.
+ *
+ * API function that change the controller state to resseting.
+ * Part of the overall controller reset sequence.
+ */
+void nvme_tcp_ofld_error_recovery(struct nvme_ctrl *nctrl)
+{
+       if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_RESETTING))
+               return;
+
+       queue_work(nvme_reset_wq, &to_tcp_ofld_ctrl(nctrl)->err_work);
+}
+EXPORT_SYMBOL_GPL(nvme_tcp_ofld_error_recovery);
+
 /**
  * nvme_tcp_ofld_report_queue_err() - NVMeTCP Offload report error event
  * callback function. Pointed to by nvme_tcp_ofld_queue->report_err.
@@ -291,6 +308,27 @@ nvme_tcp_ofld_configure_io_queues(struct nvme_ctrl *nctrl, 
bool new)
        return rc;
 }
 
+static void nvme_tcp_ofld_reconnect_or_remove(struct nvme_ctrl *nctrl)
+{
+       /* If we are resetting/deleting then do nothing */
+       if (nctrl->state != NVME_CTRL_CONNECTING) {
+               WARN_ON_ONCE(nctrl->state == NVME_CTRL_NEW ||
+                            nctrl->state == NVME_CTRL_LIVE);
+               return;
+       }
+
+       if (nvmf_should_reconnect(nctrl)) {
+               dev_info(nctrl->device, "Reconnecting in %d seconds...\n",
+                        nctrl->opts->reconnect_delay);
+               queue_delayed_work(nvme_wq,
+                                  &to_tcp_ofld_ctrl(nctrl)->connect_work,
+                                  nctrl->opts->reconnect_delay * HZ);
+       } else {
+               dev_info(nctrl->device, "Removing controller...\n");
+               nvme_delete_ctrl(nctrl);
+       }
+}
+
 static int nvme_tcp_ofld_setup_ctrl(struct nvme_ctrl *nctrl, bool new)
 {
        struct nvmf_ctrl_options *opts = nctrl->opts;
@@ -399,10 +437,62 @@ nvme_tcp_ofld_teardown_io_queues(struct nvme_ctrl *nctrl, 
bool remove)
        /* Placeholder - teardown_io_queues */
 }
 
+static void nvme_tcp_ofld_reconnect_ctrl_work(struct work_struct *work)
+{
+       struct nvme_tcp_ofld_ctrl *ctrl =
+                               container_of(to_delayed_work(work),
+                                            struct nvme_tcp_ofld_ctrl,
+                                            connect_work);
+       struct nvme_ctrl *nctrl = &ctrl->nctrl;
+
+       ++nctrl->nr_reconnects;
+
+       if (nvme_tcp_ofld_setup_ctrl(nctrl, false))
+               goto requeue;
+
+       dev_info(nctrl->device, "Successfully reconnected (%d attempt)\n",
+                nctrl->nr_reconnects);
+
+       nctrl->nr_reconnects = 0;
+
+       return;
+
+requeue:
+       dev_info(nctrl->device, "Failed reconnect attempt %d\n",
+                nctrl->nr_reconnects);
+       nvme_tcp_ofld_reconnect_or_remove(nctrl);
+}
+
+static void nvme_tcp_ofld_error_recovery_work(struct work_struct *work)
+{
+       struct nvme_tcp_ofld_ctrl *ctrl =
+               container_of(work, struct nvme_tcp_ofld_ctrl, err_work);
+       struct nvme_ctrl *nctrl = &ctrl->nctrl;
+
+       nvme_stop_keep_alive(nctrl);
+       nvme_tcp_ofld_teardown_io_queues(nctrl, false);
+       /* unquiesce to fail fast pending requests */
+       nvme_start_queues(nctrl);
+       nvme_tcp_ofld_teardown_admin_queue(nctrl, false);
+       blk_mq_unquiesce_queue(nctrl->admin_q);
+
+       if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_CONNECTING)) {
+               /* state change failure is ok if we started nctrl delete */
+               WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING &&
+                            nctrl->state != NVME_CTRL_DELETING_NOIO);
+               return;
+       }
+
+       nvme_tcp_ofld_reconnect_or_remove(nctrl);
+}
+
 static void
 nvme_tcp_ofld_teardown_ctrl(struct nvme_ctrl *nctrl, bool shutdown)
 {
-       /* Placeholder - err_work and connect_work */
+       struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl);
+
+       cancel_work_sync(&ctrl->err_work);
+       cancel_delayed_work_sync(&ctrl->connect_work);
        nvme_tcp_ofld_teardown_io_queues(nctrl, shutdown);
        blk_mq_quiesce_queue(nctrl->admin_q);
        if (shutdown)
@@ -417,6 +507,31 @@ static void nvme_tcp_ofld_delete_ctrl(struct nvme_ctrl 
*nctrl)
        nvme_tcp_ofld_teardown_ctrl(nctrl, true);
 }
 
+static void nvme_tcp_ofld_reset_ctrl_work(struct work_struct *work)
+{
+       struct nvme_ctrl *nctrl =
+               container_of(work, struct nvme_ctrl, reset_work);
+
+       nvme_stop_ctrl(nctrl);
+       nvme_tcp_ofld_teardown_ctrl(nctrl, false);
+
+       if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_CONNECTING)) {
+               /* state change failure is ok if we started ctrl delete */
+               WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING &&
+                            nctrl->state != NVME_CTRL_DELETING_NOIO);
+               return;
+       }
+
+       if (nvme_tcp_ofld_setup_ctrl(nctrl, false))
+               goto out_fail;
+
+       return;
+
+out_fail:
+       ++nctrl->nr_reconnects;
+       nvme_tcp_ofld_reconnect_or_remove(nctrl);
+}
+
 static int
 nvme_tcp_ofld_init_request(struct blk_mq_tag_set *set,
                           struct request *rq,
@@ -513,6 +628,10 @@ nvme_tcp_ofld_create_ctrl(struct device *ndev, struct 
nvmf_ctrl_options *opts)
                             opts->nr_poll_queues + 1;
        nctrl->sqsize = opts->queue_size - 1;
        nctrl->kato = opts->kato;
+       INIT_DELAYED_WORK(&ctrl->connect_work,
+                         nvme_tcp_ofld_reconnect_ctrl_work);
+       INIT_WORK(&ctrl->err_work, nvme_tcp_ofld_error_recovery_work);
+       INIT_WORK(&nctrl->reset_work, nvme_tcp_ofld_reset_ctrl_work);
        if (!(opts->mask & NVMF_OPT_TRSVCID)) {
                opts->trsvcid =
                        kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h
index a9bcd4f7d150..0fb212f9193a 100644
--- a/drivers/nvme/host/tcp-offload.h
+++ b/drivers/nvme/host/tcp-offload.h
@@ -84,6 +84,15 @@ struct nvme_tcp_ofld_ctrl {
        struct blk_mq_tag_set admin_tag_set;
        struct nvme_tcp_ofld_queue *queues;
 
+       struct work_struct err_work;
+       struct delayed_work connect_work;
+
+       /*
+        * Each entry in the array indicates the number of queues of
+        * corresponding type.
+        */
+       u32 queue_type_mapping[HCTX_MAX_TYPES];
+
        /* Connectivity params */
        struct nvme_tcp_ofld_ctrl_con_params conn_params;
 
@@ -166,3 +175,4 @@ struct nvme_tcp_ofld_ops {
 /* Exported functions for lower vendor specific offload drivers */
 int nvme_tcp_ofld_register_dev(struct nvme_tcp_ofld_dev *dev);
 void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev *dev);
+void nvme_tcp_ofld_error_recovery(struct nvme_ctrl *nctrl);
-- 
2.22.0

Reply via email to