From: Geetha sowjanya <gak...@marvell.com>

Upon receiving FLR IRQ for a RVU PF, teardown or cleanup
resources held by that PF_FUNC. This patch cleans up,
NIX LF
 - Stop ingress/egress traffic
 - Disable NPC MCAM entries being used.
 - Free Tx scheduler queues
 - Disable RQ/SQ/CQ HW contexts
NPA LF
 - Disable Pool/Aura HW contexts
In future teardown of SSO/SSOW/TIM/CPT will be added.

Also added a mailbox message for a RVU PF to request
AF, to perform FLR for a RVU VF under it.

Signed-off-by: Geetha sowjanya <gak...@marvell.com>
Signed-off-by: Stanislaw Kardach <skard...@marvell.com>
Signed-off-by: Sunil Goutham <sgout...@marvell.com>
---
 drivers/net/ethernet/marvell/octeontx2/af/mbox.h   | 10 ++-
 drivers/net/ethernet/marvell/octeontx2/af/rvu.c    | 82 +++++++++++++++++++++-
 drivers/net/ethernet/marvell/octeontx2/af/rvu.h    |  2 +
 .../net/ethernet/marvell/octeontx2/af/rvu_nix.c    | 48 +++++++++++++
 .../net/ethernet/marvell/octeontx2/af/rvu_npa.c    | 17 +++++
 5 files changed, 157 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h 
b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 96dd59e..4ea7efd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -123,7 +123,8 @@ static inline struct mbox_msghdr 
*otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
 M(READY,               0x001, msg_req, ready_msg_rsp)                  \
 M(ATTACH_RESOURCES,    0x002, rsrc_attach, msg_rsp)                    \
 M(DETACH_RESOURCES,    0x003, rsrc_detach, msg_rsp)                    \
-M(MSIX_OFFSET,         0x004, msg_req, msix_offset_rsp)                \
+M(MSIX_OFFSET,         0x005, msg_req, msix_offset_rsp)                \
+M(VF_FLR,              0x006, msg_req, msg_rsp)                        \
 /* CGX mbox IDs (range 0x200 - 0x3FF) */                               \
 M(CGX_START_RXTX,      0x200, msg_req, msg_rsp)                        \
 M(CGX_STOP_RXTX,       0x201, msg_req, msg_rsp)                        \
@@ -213,6 +214,13 @@ struct msg_rsp {
        struct mbox_msghdr hdr;
 };
 
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+       RVU_INVALID_VF_ID           = -256,
+};
+
 struct ready_msg_rsp {
        struct mbox_msghdr hdr;
        u16    sclk_feq;        /* SCLK frequency */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c 
b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 0f5923a..60340dc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -29,6 +29,7 @@ static void rvu_set_msix_offset(struct rvu *rvu, struct 
rvu_pfvf *pfvf,
                                struct rvu_block *block, int lf);
 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
                                  struct rvu_block *block, int lf);
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
 
 /* Supported devices */
 static const struct pci_device_id rvu_id_table[] = {
@@ -1320,6 +1321,26 @@ static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, 
struct msg_req *req,
        return 0;
 }
 
+static int rvu_mbox_handler_VF_FLR(struct rvu *rvu, struct msg_req *req,
+                                  struct msg_rsp *rsp)
+{
+       u16 pcifunc = req->hdr.pcifunc;
+       u16 vf, numvfs;
+       u64 cfg;
+
+       vf = pcifunc & RVU_PFVF_FUNC_MASK;
+       cfg = rvu_read64(rvu, BLKADDR_RVUM,
+                        RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+       numvfs = (cfg >> 12) & 0xFF;
+
+       if (vf && vf <= numvfs)
+               __rvu_flr_handler(rvu, pcifunc);
+       else
+               return RVU_INVALID_VF_ID;
+
+       return 0;
+}
+
 static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
                                struct mbox_msghdr *req)
 {
@@ -1601,14 +1622,73 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
                    INTR_MASK(hw->total_pfs) & ~1ULL);
 }
 
+static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
+{
+       struct rvu_block *block;
+       int slot, lf, num_lfs;
+       int err;
+
+       block = &rvu->hw->block[blkaddr];
+       num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+                                       block->type);
+       if (!num_lfs)
+               return;
+       for (slot = 0; slot < num_lfs; slot++) {
+               lf = rvu_get_lf(rvu, block, pcifunc, slot);
+               if (lf < 0)
+                       continue;
+
+               /* Cleanup LF and reset it */
+               if (block->addr == BLKADDR_NIX0)
+                       rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
+               else if (block->addr == BLKADDR_NPA)
+                       rvu_npa_lf_teardown(rvu, pcifunc, lf);
+
+               err = rvu_lf_reset(rvu, block, lf);
+               if (err) {
+                       dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+                               block->addr, lf);
+               }
+       }
+}
+
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+       mutex_lock(&rvu->flr_lock);
+       /* Reset order should reflect inter-block dependencies:
+        * 1. Reset any packet/work sources (NIX, CPT, TIM)
+        * 2. Flush and reset SSO/SSOW
+        * 3. Cleanup pools (NPA)
+        */
+       rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+       rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+       rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
+       rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
+       rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
+       rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+       rvu_detach_rsrcs(rvu, NULL, pcifunc);
+       mutex_unlock(&rvu->flr_lock);
+}
+
 static void rvu_flr_handler(struct work_struct *work)
 {
        struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
        struct rvu *rvu = flrwork->rvu;
-       u16 pf;
+       u16 pcifunc, numvfs, vf;
+       u64 cfg;
+       int pf;
 
        pf = flrwork - rvu->flr_wrk;
 
+       cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+       numvfs = (cfg >> 12) & 0xFF;
+       pcifunc  = pf << RVU_PFVF_PF_SHIFT;
+
+       for (vf = 0; vf < numvfs; vf++)
+               __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
+
+       __rvu_flr_handler(rvu, pcifunc);
+
        /* Signal FLR finish */
        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h 
b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 510ae33..9e3843e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -325,6 +325,7 @@ int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, 
struct msg_req *req,
 /* NPA APIs */
 int rvu_npa_init(struct rvu *rvu);
 void rvu_npa_freemem(struct rvu *rvu);
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
 int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
                                struct npa_aq_enq_req *req,
                                struct npa_aq_enq_rsp *rsp);
@@ -342,6 +343,7 @@ bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
 int rvu_nix_init(struct rvu *rvu);
 void rvu_nix_freemem(struct rvu *rvu);
 int rvu_get_nixlf_count(struct rvu *rvu);
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
                                  struct nix_lf_alloc_req *req,
                                  struct nix_lf_alloc_rsp *rsp);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 
b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d8d8947..ad99cf7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -105,6 +105,17 @@ static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo 
*hw, int blkaddr)
        return NULL;
 }
 
+static void nix_rx_sync(struct rvu *rvu, int blkaddr)
+{
+       int err;
+
+       /*Sync all in flight RX packets to LLC/DRAM */
+       rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+       err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+       if (err)
+               dev_err(rvu->dev, "NIX RX software sync failed\n");
+}
+
 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
                            int lvl, u16 pcifunc, u16 schq)
 {
@@ -2281,3 +2292,40 @@ int rvu_mbox_handler_NIX_LF_STOP_RX(struct rvu *rvu, 
struct msg_req *req,
        rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
        return 0;
 }
+
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
+{
+       struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+       struct hwctx_disable_req ctx_req;
+       int err;
+
+       ctx_req.hdr.pcifunc = pcifunc;
+
+       /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+       nix_interface_deinit(rvu, pcifunc, nixlf);
+       nix_rx_sync(rvu, blkaddr);
+       nix_txschq_free(rvu, pcifunc);
+
+       if (pfvf->sq_ctx) {
+               ctx_req.ctype = NIX_AQ_CTYPE_SQ;
+               err = nix_lf_hwctx_disable(rvu, &ctx_req);
+               if (err)
+                       dev_err(rvu->dev, "SQ ctx disable failed\n");
+       }
+
+       if (pfvf->rq_ctx) {
+               ctx_req.ctype = NIX_AQ_CTYPE_RQ;
+               err = nix_lf_hwctx_disable(rvu, &ctx_req);
+               if (err)
+                       dev_err(rvu->dev, "RQ ctx disable failed\n");
+       }
+
+       if (pfvf->cq_ctx) {
+               ctx_req.ctype = NIX_AQ_CTYPE_CQ;
+               err = nix_lf_hwctx_disable(rvu, &ctx_req);
+               if (err)
+                       dev_err(rvu->dev, "CQ ctx disable failed\n");
+       }
+
+       nix_ctx_free(rvu, pfvf);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 
b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 7531fdc..887daaa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -470,3 +470,20 @@ void rvu_npa_freemem(struct rvu *rvu)
        block = &hw->block[blkaddr];
        rvu_aq_free(rvu, block->aq);
 }
+
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+{
+       struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+       struct hwctx_disable_req ctx_req;
+
+       /* Disable all pools */
+       ctx_req.hdr.pcifunc = pcifunc;
+       ctx_req.ctype = NPA_AQ_CTYPE_POOL;
+       npa_lf_hwctx_disable(rvu, &ctx_req);
+
+       /* Disable all auras */
+       ctx_req.ctype = NPA_AQ_CTYPE_AURA;
+       npa_lf_hwctx_disable(rvu, &ctx_req);
+
+       npa_ctx_free(rvu, pfvf);
+}
-- 
2.7.4

Reply via email to