On Sat, Dec 09, 2023 at 12:28:10AM +0100, Alexander Bluhm wrote:
> On Sat, Dec 09, 2023 at 02:07:06AM +0300, Vitaliy Makkoveev wrote:
> > > > SLIST_ENTRY(pflow_softc) sc_next;
> > >
> > > This list is protected by net lock. Can you add an [N] here?
> > >
> >
> > This is not true. The netlock is not taken while export_pflow() called
> > from pf_purge_states(). I privately shared the diff to fix this, but not
> > committed it yet. I will update it and share after committing this diff.
>
> Why not hold the shared net lock while in pf?
>
Because the export_flow() should be called only if the PFSTATE_PFLOW
flag is set? The netlock should be taken before PF_LOCK, so there is no
reason to always take it in this path only to make optional
export_pflow() call:
pf_purge_expired_states(const unsigned int limit, const unsigned int collect)
{
/* skip */
rw_enter_write(&pf_state_list.pfs_rwl);
PF_LOCK();
PF_STATE_ENTER_WRITE();
SLIST_FOREACH(st, &gcl, gc_list) {
if (st->timeout != PFTM_UNLINKED)
pf_remove_state(st);
pf_free_state(st);
}
pf_remove_state(struct pf_state *st)
{
PF_ASSERT_LOCKED();
/* skip */
RBT_REMOVE(pf_state_tree_id, &tree_id, st);
#if NPFLOW > 0
if (st->state_flags & PFSTATE_PFLOW)
export_pflow(st);
However, I'm not the pf hacker, I's better to ask sashan@ and dlg@.
> My stategy is to convert exclusive net lock to shared net lock step
> by step. When this is complete, we can remove net lock completely.
> Until that happens, we hold exclusive net lock in corner cases and
> know that nothing can go wrong.
>
> Seems to be easier than to be aware of pflowif_list locking when
> working on pf.
>
I already made the diff which turns `pflowif_list' into SMR list. The
diff was tested by Hrvoje and ok'ed by sashan@.
Please note, the `pflowif_list' modifications rely on kernel lock
because it is always taken in the clone create and destroy paths. We
never marked SMR lists locking in the locks description, so I left this
empty. Do you have any idea how to mark it?
I want to commit this diff unless you have objections. After that only
`pflowstats' will have inconsistent protection.
Index: sys/net/if_pflow.c
===================================================================
RCS file: /cvs/src/sys/net/if_pflow.c,v
retrieving revision 1.102
diff -u -p -r1.102 if_pflow.c
--- sys/net/if_pflow.c 8 Dec 2023 23:15:44 -0000 1.102
+++ sys/net/if_pflow.c 9 Dec 2023 00:02:48 -0000
@@ -62,7 +62,7 @@
#define DPRINTF(x)
#endif
-SLIST_HEAD(, pflow_softc) pflowif_list;
+SMR_SLIST_HEAD(, pflow_softc) pflowif_list;
struct pflowstats pflowstats;
void pflowattach(int);
@@ -113,7 +113,7 @@ struct if_clone pflow_cloner =
void
pflowattach(int npflow)
{
- SLIST_INIT(&pflowif_list);
+ SMR_SLIST_INIT(&pflowif_list);
if_clone_attach(&pflow_cloner);
}
@@ -268,9 +268,8 @@ pflow_clone_create(struct if_clone *ifc,
task_set(&pflowif->sc_outputtask, pflow_output_process, pflowif);
/* Insert into list of pflows */
- NET_LOCK();
- SLIST_INSERT_HEAD(&pflowif_list, pflowif, sc_next);
- NET_UNLOCK();
+ KERNEL_ASSERT_LOCKED();
+ SMR_SLIST_INSERT_HEAD_LOCKED(&pflowif_list, pflowif, sc_next);
return (0);
}
@@ -284,9 +283,12 @@ pflow_clone_destroy(struct ifnet *ifp)
NET_LOCK();
sc->sc_dying = 1;
- SLIST_REMOVE(&pflowif_list, sc, pflow_softc, sc_next);
NET_UNLOCK();
+ KERNEL_ASSERT_LOCKED();
+ SMR_SLIST_REMOVE_LOCKED(&pflowif_list, sc, pflow_softc, sc_next);
+ smr_barrier();
+
timeout_del(&sc->sc_tmo);
timeout_del(&sc->sc_tmo6);
timeout_del(&sc->sc_tmo_tmpl);
@@ -812,7 +814,7 @@ export_pflow(struct pf_state *st)
sk = st->key[st->direction == PF_IN ? PF_SK_WIRE : PF_SK_STACK];
- SLIST_FOREACH(sc, &pflowif_list, sc_next) {
+ SMR_SLIST_FOREACH(sc, &pflowif_list, sc_next) {
mtx_enter(&sc->sc_mtx);
switch (sc->sc_version) {
case PFLOW_PROTO_5:
Index: sys/net/if_pflow.h
===================================================================
RCS file: /cvs/src/sys/net/if_pflow.h,v
retrieving revision 1.20
diff -u -p -r1.20 if_pflow.h
--- sys/net/if_pflow.h 8 Dec 2023 23:13:40 -0000 1.20
+++ sys/net/if_pflow.h 9 Dec 2023 00:02:48 -0000
@@ -169,6 +169,8 @@ struct pflow_ipfix_flow6 {
#ifdef _KERNEL
+#include <sys/smr.h>
+
/*
* Locks used to protect struct members and global data
* I immutable after creation
@@ -207,7 +209,7 @@ struct pflow_softc {
mbuf */
struct mbuf *sc_mbuf6; /* [m] current cumulative
mbuf */
- SLIST_ENTRY(pflow_softc) sc_next;
+ SMR_SLIST_ENTRY(pflow_softc) sc_next;
};
extern struct pflow_softc *pflowif;