diff -Nru qtbase-opensource-src-5.15.15+dfsg/debian/changelog qtbase-opensource-src-5.15.15+dfsg/debian/changelog
--- qtbase-opensource-src-5.15.15+dfsg/debian/changelog	2025-06-29 19:50:45.000000000 +0000
+++ qtbase-opensource-src-5.15.15+dfsg/debian/changelog	2025-12-05 22:31:05.000000000 +0000
@@ -1,3 +1,11 @@
+qtbase-opensource-src (5.15.15+dfsg-6+deb13u1) trixie; urgency=medium
+
+  * Non-maintainer upload.
+  * Backport two upstream patches to fix data races in QReadWriteLock
+    (closes: #1122641).
+
+ -- Miao Wang <shankerwangmiao@gmail.com>  Sat, 06 Dec 2025 06:31:05 +0800
+
 qtbase-opensource-src (5.15.15+dfsg-6) unstable; urgency=medium
 
   * Backport upstream patch to fix assertion errors in data: URL parsing
diff -Nru qtbase-opensource-src-5.15.15+dfsg/debian/patches/qreadwritelock_data_race.diff qtbase-opensource-src-5.15.15+dfsg/debian/patches/qreadwritelock_data_race.diff
--- qtbase-opensource-src-5.15.15+dfsg/debian/patches/qreadwritelock_data_race.diff	1970-01-01 00:00:00.000000000 +0000
+++ qtbase-opensource-src-5.15.15+dfsg/debian/patches/qreadwritelock_data_race.diff	2025-12-05 22:31:05.000000000 +0000
@@ -0,0 +1,33 @@
+Description: QReadWriteLock: fix data race on the d_ptr members
+ The loadRelaxed() at the beginning of tryLockForRead/tryLockForWrite
+ isn't enough to bring us the non-atomic write of the recursive bool.
+ Same issue with the std::mutex itself.
+Origin: upstream, https://code.qt.io/cgit/qt/qtbase.git/commit?id=80d01c4ccb697b9d
+Last-Update: 2025-12-14
+
+--- a/src/corelib/thread/qreadwritelock.cpp
++++ b/src/corelib/thread/qreadwritelock.cpp
+@@ -258,7 +258,10 @@ bool QReadWriteLock::tryLockForRead(int
+             d = val;
+         }
+         Q_ASSERT(!isUncontendedLocked(d));
+-        // d is an actual pointer;
++        // d is an actual pointer; acquire its contents
++        d = d_ptr.loadAcquire();
++        if (!d || isUncontendedLocked(d))
++            continue;
+ 
+         if (d->recursive)
+             return d->recursiveLockForRead(timeout);
+@@ -365,7 +368,10 @@ bool QReadWriteLock::tryLockForWrite(int
+             d = val;
+         }
+         Q_ASSERT(!isUncontendedLocked(d));
+-        // d is an actual pointer;
++        // d is an actual pointer; acquire its contents
++        d = d_ptr.loadAcquire();
++        if (!d || isUncontendedLocked(d))
++            continue;
+ 
+         if (d->recursive)
+             return d->recursiveLockForWrite(timeout);
diff -Nru qtbase-opensource-src-5.15.15+dfsg/debian/patches/qreadwritelock_data_race_2.diff qtbase-opensource-src-5.15.15+dfsg/debian/patches/qreadwritelock_data_race_2.diff
--- qtbase-opensource-src-5.15.15+dfsg/debian/patches/qreadwritelock_data_race_2.diff	1970-01-01 00:00:00.000000000 +0000
+++ qtbase-opensource-src-5.15.15+dfsg/debian/patches/qreadwritelock_data_race_2.diff	2025-12-05 22:31:05.000000000 +0000
@@ -0,0 +1,163 @@
+Description: QReadWriteLock: fix data race on weakly-ordered memory architectures
+ The fix changes the relaxed load of d_ptr in lockFor{Read,Write} after
+ the acquire of the mutex to an acquire load, to establish
+ synchronization with the release store of d_ptr when converting from an
+ uncontended lock to a contended lock.
+Origin: upstream, https://code.qt.io/cgit/qt/qtbase.git/commit?id=4fd88011fa7975ce
+Last-Update: 2025-12-14
+
+--- a/src/corelib/thread/qreadwritelock.cpp
++++ b/src/corelib/thread/qreadwritelock.cpp
+@@ -267,14 +267,14 @@ bool QReadWriteLock::tryLockForRead(int
+             return d->recursiveLockForRead(timeout);
+ 
+         auto lock = qt_unique_lock(d->mutex);
+-        if (d != d_ptr.loadRelaxed()) {
++        if (QReadWriteLockPrivate *dd = d_ptr.loadAcquire(); d != dd) {
+             // d_ptr has changed: this QReadWriteLock was unlocked before we had
+             // time to lock d->mutex.
+             // We are holding a lock to a mutex within a QReadWriteLockPrivate
+             // that is already released (or even is already re-used). That's ok
+             // because the QFreeList never frees them.
+             // Just unlock d->mutex (at the end of the scope) and retry.
+-            d = d_ptr.loadAcquire();
++            d = dd;
+             continue;
+         }
+         return d->lockForRead(timeout);
+@@ -377,11 +377,11 @@ bool QReadWriteLock::tryLockForWrite(int
+             return d->recursiveLockForWrite(timeout);
+ 
+         auto lock = qt_unique_lock(d->mutex);
+-        if (d != d_ptr.loadRelaxed()) {
++        if (QReadWriteLockPrivate *dd = d_ptr.loadAcquire(); d != dd) {
+             // The mutex was unlocked before we had time to lock the mutex.
+             // We are holding to a mutex within a QReadWriteLockPrivate that is already released
+             // (or even is already re-used) but that's ok because the QFreeList never frees them.
+-            d = d_ptr.loadAcquire();
++            d = dd;
+             continue;
+         }
+         return d->lockForWrite(timeout);
+--- a/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp
++++ b/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp
+@@ -85,6 +85,7 @@ private slots:
+     void multipleReadersLoop();
+     void multipleWritersLoop();
+     void multipleReadersWritersLoop();
++    void heavyLoadLocks();
+     void countingTest();
+     void limitedReaders();
+     void deleteOnUnlock();
+@@ -635,6 +636,111 @@ public:
+     }
+ };
+ 
++class HeavyLoadLockThread : public QThread
++{
++public:
++    QReadWriteLock &testRwlock;
++    const qsizetype iterations;
++    const int numThreads;
++    inline HeavyLoadLockThread(QReadWriteLock &l, qsizetype iters, int numThreads, QVector<QAtomicInt *> &counters):
++        testRwlock(l),
++        iterations(iters),
++        numThreads(numThreads),
++        counters(counters)
++    { }
++
++private:
++    QVector<QAtomicInt *> &counters;
++    QAtomicInt *getCounter(qsizetype index)
++    {
++        QReadLocker locker(&testRwlock);
++        /*
++          The index is increased monotonically, so the index
++          being requested should be always within or at the end of the
++          counters vector.
++        */
++        Q_ASSERT(index <= counters.size());
++        if (counters.size() <= index || counters[index] == nullptr) {
++            locker.unlock();
++            QWriteLocker wlocker(&testRwlock);
++            if (counters.size() <= index)
++                counters.resize(index + 1, nullptr);
++            if (counters[index] == nullptr)
++                counters[index] = new QAtomicInt(0);
++            return counters[index];
++        }
++        return counters[index];
++    }
++    void releaseCounter(qsizetype index)
++    {
++        QWriteLocker locker(&testRwlock);
++        delete counters[index];
++        counters[index] = nullptr;
++    }
++
++public:
++    void run() override
++    {
++        for (qsizetype i = 0; i < iterations; ++i) {
++            QAtomicInt *counter = getCounter(i);
++            /*
++                Here each counter is accessed by each thread
++                and increaed only once. As a result, when the
++                counter reaches numThreads, i.e. the fetched
++                value before the increment is numThreads-1,
++                we know all threads have accessed this counter
++                and we can delete it safely.
++            */
++            int prev = counter->fetchAndAddRelaxed(1);
++            if (prev == numThreads - 1) {
++#ifdef QT_BUILDING_UNDER_TSAN
++            /*
++                Under TSAN, deleting and freeing an object
++                will trigger a write operation on the memory
++                of the object. Since we used fetchAndAddRelaxed
++                to update the counter, TSAN will report a data
++                race when deleting the counter here. To avoid
++                the false positive, we simply reset the counter
++                to 0 here, with ordered semantics to establish
++                the sequence to ensure the the free-ing option
++                happens after all fetchAndAddRelaxed operations
++                in other threads.
++
++                When not building under TSAN, deleting the counter
++                will not result in any data read or written to the
++                memory region of the counter, so no data race will
++                happen.
++            */
++                counter->fetchAndStoreOrdered(0);
++#endif
++                releaseCounter(i);
++            }
++        }
++    }
++};
++
++/*
++    Multiple threads racing acquiring and releasing
++    locks on the same indices.
++*/
++
++void tst_QReadWriteLock::heavyLoadLocks()
++{
++    constexpr qsizetype iterations = 65536 * 4;
++    constexpr int numThreads = 8;
++    QVector<QAtomicInt *> counters;
++    QReadWriteLock testLock;
++    std::array<std::unique_ptr<HeavyLoadLockThread>, numThreads> threads;
++    for (auto &thread : threads)
++        thread = std::make_unique<HeavyLoadLockThread>(testLock, iterations, numThreads, counters);
++    for (auto &thread : threads)
++        thread->start();
++    for (auto &thread : threads)
++        thread->wait();
++    QVERIFY(counters.size() == iterations);
++    for (qsizetype i = 0; i < iterations; ++i)
++        QVERIFY(counters[i] == nullptr);
++}
+ 
+ /*
+     A writer acquires a read-lock, a reader locks
diff -Nru qtbase-opensource-src-5.15.15+dfsg/debian/patches/series qtbase-opensource-src-5.15.15+dfsg/debian/patches/series
--- qtbase-opensource-src-5.15.15+dfsg/debian/patches/series	2025-06-29 19:50:45.000000000 +0000
+++ qtbase-opensource-src-5.15.15+dfsg/debian/patches/series	2025-12-05 22:31:05.000000000 +0000
@@ -20,6 +20,8 @@
 check_dbus_tray_availability_every_time.diff
 a11y_null_checks.diff
 CVE-2025-5455.diff
+qreadwritelock_data_race.diff
+qreadwritelock_data_race_2.diff
 
 # Debian specific.
 no_htmlinfo_example.diff
