bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
+void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
  const struct tcp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops __read_mostly = {
+
        .resync_request         = nvme_tcp_resync_request,
+       .ddp_teardown_done      = nvme_tcp_ddp_teardown_done,
  };
+static
+int nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
+                         uint16_t command_id,
+                         struct request *rq)
+{
+       struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+       struct net_device *netdev = queue->ctrl->offloading_netdev;
+       int ret;
+
+       if (unlikely(!netdev)) {
+               pr_info_ratelimited("%s: netdev not found\n", __func__);

dev_info_ratelimited

+               return -EINVAL;
+       }
+
+       ret = netdev->tcp_ddp_ops->tcp_ddp_teardown(netdev, queue->sock->sk,
+                                                   &req->ddp, rq);
+       sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
+       req->offloaded = false;
+       return ret;
+}
+
+void nvme_tcp_ddp_teardown_done(void *ddp_ctx)
+{
+       struct request *rq = ddp_ctx;
+       struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+       if (!nvme_try_complete_req(rq, cpu_to_le16(req->status << 1), 
req->result))
+               nvme_complete_rq(rq);
+}
+
+static
+int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue,
+                      uint16_t command_id,
+                      struct request *rq)
+{
+       struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+       struct net_device *netdev = queue->ctrl->offloading_netdev;
+       int ret;
+
+       req->offloaded = false;
+
+       if (unlikely(!netdev)) {
+               pr_info_ratelimited("%s: netdev not found\n", __func__);
+               return -EINVAL;
+       }
+
+       req->ddp.command_id = command_id;
+       req->ddp.sg_table.sgl = req->ddp.first_sgl;
+       ret = sg_alloc_table_chained(&req->ddp.sg_table,
+               blk_rq_nr_phys_segments(rq), req->ddp.sg_table.sgl,
+               SG_CHUNK_SIZE);
+       if (ret)
+               return -ENOMEM;

newline here

+       req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl);
+
+       ret = netdev->tcp_ddp_ops->tcp_ddp_setup(netdev,
+                                                queue->sock->sk,
+                                                &req->ddp);
+       if (!ret)
+               req->offloaded = true;
+       return ret;
+}
+
  static
  int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue,
                            struct nvme_tcp_config *config)
@@ -351,6 +422,25 @@ bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 
flags)
#else +static
+int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue,
+                      uint16_t command_id,
+                      struct request *rq)
+{
+       return -EINVAL;
+}
+
+static
+int nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
+                         uint16_t command_id,
+                         struct request *rq)
+{
+       return -EINVAL;
+}
+
+void nvme_tcp_ddp_teardown_done(void *ddp_ctx)
+{}
+
  static
  int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue,
                            struct nvme_tcp_config *config)
@@ -630,6 +720,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
  static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
                struct nvme_completion *cqe)
  {
+       struct nvme_tcp_request *req;
        struct request *rq;
rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
@@ -641,8 +732,15 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue 
*queue,
                return -EINVAL;
        }
- if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
-               nvme_complete_rq(rq);
+       req = blk_mq_rq_to_pdu(rq);
+       if (req->offloaded) {
+               req->status = cqe->status;
+               req->result = cqe->result;
+               nvme_tcp_teardown_ddp(queue, cqe->command_id, rq);
+       } else {
+               if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
+                       nvme_complete_rq(rq);
+       }
        queue->nr_cqe++;
return 0;
@@ -836,9 +934,18 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, 
struct sk_buff *skb,
  static inline void nvme_tcp_end_request(struct request *rq, u16 status)
  {
        union nvme_result res = {};
+       struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+       struct nvme_tcp_queue *queue = req->queue;
+       struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
- if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
-               nvme_complete_rq(rq);
+       if (req->offloaded) {
+               req->status = cpu_to_le16(status << 1);
+               req->result = res;
+               nvme_tcp_teardown_ddp(queue, pdu->command_id, rq);
+       } else {
+               if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
+                       nvme_complete_rq(rq);
+       }
  }
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
@@ -1115,6 +1222,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct 
nvme_tcp_request *req)
        bool inline_data = nvme_tcp_has_inline_data(req);
        u8 hdgst = nvme_tcp_hdgst_len(queue);
        int len = sizeof(*pdu) + hdgst - req->offset;
+       struct request *rq = blk_mq_rq_from_pdu(req);
        int flags = MSG_DONTWAIT;
        int ret;
@@ -1123,6 +1231,10 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
        else
                flags |= MSG_EOR;
+ if (test_bit(NVME_TCP_Q_OFFLOADS, &queue->flags) &&
+           blk_rq_nr_phys_segments(rq) && rq_data_dir(rq) == READ)
+               nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id, rq);

I'd assume that this is something we want to setup in
nvme_tcp_setup_cmd_pdu. Why do it here?

Reply via email to