This is an automated email from the ASF dual-hosted git repository.

paulo pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 73063437b6c0ec8535f02488dca7afafc271961c
Merge: d2bdb2ffa8 9500eb129b
Author: Paulo Motta <[email protected]>
AuthorDate: Wed Apr 8 11:14:13 2026 -0400

    Merge branch 'cassandra-5.0' into trunk

 CHANGES.txt                                        |   1 +
 conf/cassandra.yaml                                |   6 +-
 conf/cassandra_latest.yaml                         |   2 +-
 .../cassandra/repair/autorepair/AutoRepair.java    |   1 +
 .../repair/autorepair/AutoRepairUtils.java         |  70 ++++++
 .../cassandra/distributed/impl/Instance.java       |   3 +
 .../repair/AutoRepairSchedulerStatsHelper.java     |   4 -
 .../test/repair/AutoRepairSchedulerTest.java       |  17 --
 .../distributed/upgrade/AutoRepairUpgradeTest.java | 248 +++++++++++++++++++++
 9 files changed, 325 insertions(+), 27 deletions(-)

diff --cc CHANGES.txt
index dbeb6c80f1,9a575f7f02..83b3bb7d67
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,327 -1,28 +1,328 @@@
 -5.0.8
 +6.0-alpha2
 + * Fix a removed TTLed row re-appearance in a materialized view after a 
cursor compaction (CASSANDRA-21152)
 + * Rework ZSTD dictionary compression logic to create a trainer per training 
(CASSANDRA-21209)
 +Merged from 5.0:
+  * Backport Automated Repair Inside Cassandra for CEP-37 (CASSANDRA-21138)
 - * Update cassandra-stress to support TLS 1.3 by default by auto-negotiation 
(CASSANDRA-21007)
 - * Ensure schema created before 2.1 without tableId in folder name can be 
loaded in SnapshotLoader (CASSANDRA-21173)
  Merged from 4.1:
  Merged from 4.0:
 -Backported from 6.0:
 + * Rate limit password changes (CASSANDRA-21202)
 +
 +
 +6.0-alpha1
 + * Improve performance when calculating settled placements during range 
movements (CASSANDRA-21144)
 + * Make shadow gossip round parameters configurable for testing 
(CASSANDRA-21149)
 + * Avoid potential gossip thread deadlock during decommission 
(CASSANDRA-21143)
 + * Improve construction of consensus groups for range movements 
(CASSANDRA-21142)
 + * Support compaction_read_disk_access_mode for cursor-based compaction 
(CASSANDRA-21147)
 + * Allow value/element indexing on frozen collections in SAI (CASSANDRA-18492)
 + * Add tool to offline dump cluster metadata and the log (CASSANDRA-21129)
 + * Send client warnings when writing to a large partition (CASSANDRA-17258)
 + * Harden the possible range of values for max dictionary size and max total 
sample size for dictionary training (CASSANDRA-21194)
 + * Implement a guardrail ensuring that minimum training frequency parameter 
is provided in ZstdDictionaryCompressor (CASSANDRA-21192)
 + * Replace manual referencing with ColumnFamilyStore.selectAndReference when 
training a dictionary (CASSANDRA-21188)
 + * Forbid nodes upgrading to a version which cannot read existing log entries 
(CASSANDRA-21174)
 + * Introduce a check for minimum time to pass to train or import a 
compression dictionary from the last one (CASSANDRA-21179)
 + * Allow overriding compaction strategy parameters during startup 
(CASSANDRA-21169)
 + * Introduce created_at column to system_distributed.compression_dictionaries 
(CASSANDRA-21178)
 + * Be able to detect and remove orphaned compression dictionaries 
(CASSANDRA-21157)
 + * Fix BigTableVerifier to only read a data file during extended verification 
(CASSANDRA-21150) 
 + * Reduce memory allocation during transformation of BatchStatement to 
Mutation (CASSANDRA-21141)
 + * Direct I/O support for compaction reads (CASSANDRA-19987)
 + * Support custom StartupCheck implementations via SPI (CASSANDRA-21093)
 + * Make sstableexpiredblockers support human-readable output with SSTable 
sizes (CASSANDRA-20448)
 + * Enhance nodetool compactionhistory to report more compaction properities 
(CASSANDRA-20081)
 + * Fix initial auto-repairs skipped by too soon check (CASSANDRA-21115)
 + * Add configuration to disk usage guardrails to stop writes across all 
replicas of a keyspace when any node replicating that keyspace exceeds the disk 
usage failure threshold. (CASSANDRA-21024)
 + * BETWEEN where token(Y) > token(Z) returns wrong answer (CASSANDRA-20154)
 + * Optimize memtable flush logic (CASSANDRA-21083)
 + * No need to evict already prepared statements, as it creates a race 
condition between multiple threads (CASSANDRA-17401)
 + * Include Level information for UnifiedCompactionStrategy in nodetool 
tablestats output (CASSANDRA-20820)
 + * Support low-overhead async profiling (CASSANDRA-20854)
 + * Minor perf optimizations around memtable put logic (CASSANDRA-21088)
 + * When level compaction validates its table properties, it used the wrong 
default value for sstable_size_in_mb which allowed properties that would later 
be rejected at runtime (CASSANDRA-20570)
 + * Fix off-by-one bug in exponential backoff for repair retry config 
(CASSANDRA-21102)
 + * Move training parameters for Zstd dictionary compression to CQL 
(CASSANDRA-21078)
 + * Add configuration for sorted imports in source files (CASSANDRA-17925)
 + * Change the eager reference counting of compression dictionaries to lazy 
(CASSANDRA-21074)
 + * Add cursor based optimized compaction path (CASSANDRA-20918)
 + * Ensure peers with LEFT status are expired from gossip state 
(CASSANDRA-21035)
 + * Optimize UTF8Validator.validate for ASCII prefixed Strings 
(CASSANDRA-21075)
 + * Switch LatencyMetrics to use ThreadLocalTimer/ThreadLocalCounter 
(CASSANDRA-21080)
 + * Accord: write rejections would be returned to users as server errors 
rather than INVALID and TxnReferenceOperation didn't handle all collections 
prperly (CASSANDRA-21061)
 + * Use byte[] directly in QueryOptions instead of ByteBuffer and convert them 
to ArrayCell instead of BufferCell to reduce allocations (CASSANDRA-20166)
 + * Log queries scanning too many SSTables per read (CASSANDRA-21048)
 + * Extend nodetool verify to (optionally) validate SAI files (CASSANDRA-20949)
 + * Fix CompressionDictionary being closed while still in use (CASSANDRA-21047)
 + * When updating a multi cell collection element, if the update is rejected 
then the shared Row.Builder is not freed causing all future mutations to be 
rejected (CASSANDRA-21055)
 + * Schema annotations escape validation on CREATE and ALTER DDL statements 
(CASSANDRA-21046)
 + * Calculate once and cache the result of ModificationStatement#requiresRead 
as a perf optimization (CASSANDRA-21040)
 + * Update system schema tables with new distributed keyspace on upgrade 
(CASSANDRA-20872)
 + * Fix issue when running cms reconfiguration with paxos repair disabled 
(CASSANDRA-20869)
 + * Added additional parameter to JVM shutdown to allow for logs to be 
properly shutdown (CASSANDRA-20978)
 + * Improve isGossipOnlyMember and location lookup performance 
(CASSANDRA-21039)
 + * Refactor the way we check if a transformation is allowed to be committed 
during upgrades (CASSANDRA-21043)
 + * Improve debug around paused and disabled compaction 
(CASSANDRA-20131,CASSANDRA-19728)
 + * DiskUsageBroadcaster does not update usageInfo on node replacement 
(CASSANDRA-21033)
 + * Reject PrepareJoin if tokens are already assigned (CASSANDRA-21006)
 + * Don't update registration status if node state for decommissioned peer is 
found with the same address (CASSANDRA-21005)
 + * Avoid NPE when meta keyspace placements are empty before CMS is 
initialized (CASSANDRA-21004)
 + * Gossip entries for hibernating non-members don't block truncate 
(CASSANDRA-21003)
 + * Retry without time limit calculates wait time incorrectly (CASSANDRA-21002)
 + * Don't submit AlterSchemaStatements which produce no effect locally to the 
CMS (CASSANDRA-21001)
 + * Avoid iterating all prepared statements when getting 
PreparedStatementsCacheSize metric (CASSANDRA-21038)
 + * Reduce performance impact of TableMetadataRef.get and 
KeyspaceMetadataRef.get (CASSANDRA-20465)
 + * Improve CMS initialization (CASSANDRA-21036)
 + * Introducing comments and security labels for schema elements 
(CASSANDRA-20943)
 + * Extend nodetool tablestats for dictionary memory usage (CASSANDRA-20940)
 + * Introduce separate GCInspector thresholds for concurrent GC events 
(CASSANDRA-20980)
 + * Reduce contention in MemtableAllocator.allocate (CASSANDRA-20226)
 + * Add export, list, import sub-commands for nodetool compressiondictionary 
(CASSANDRA-20941)
 + * Add support in the binary protocol to allow transactions to have multiple 
conditions (CASSANDRA-20883)
 + * Enable CQLSSTableWriter to create SSTables compressed with a dictionary 
(CASSANDRA-20938)
 + * Support ZSTD dictionary compression (CASSANDRA-17021)
 + * Fix ExceptionsTable when stacktrace has zero elements (CASSANDRA-20992)
 + * Replace blocking wait with non-blocking delay in paxos repair 
(CASSANDRA-20983)
 + * Implementation of CEP-55 - Generation of role names (CASSANDRA-20897)
 + * Add cqlsh autocompletion for the identity mapping feature (CASSANDRA-20021)
 + * Add DDL Guardrail enabling administrators to disallow 
creation/modification of keyspaces with durable_writes = false (CASSANDRA-20913)
 + * Optimize Counter, Meter and Histogram metrics using thread local counters 
(CASSANDRA-20250)
 + * Update snakeyaml to 2.4 (CASSANDRA-20928)
 + * Update Netty to 4.1.125.Final (CASSANDRA-20925)
 + * Expose uncaught exceptions in system_views.uncaught_exceptions table 
(CASSANDRA-20858)
   * Improved observability in AutoRepair to report both expected vs. actual 
repair bytes and expected vs. actual keyspaces (CASSANDRA-20581)
 + * Execution of CreateTriggerStatement should not rely on external state 
(CASSANDRA-20287)
 + * Support LIKE expressions in filtering queries (CASSANDRA-17198)
 + * Make legacy index rebuilds safe on Gossip -> TCM upgrades (CASSANDRA-20887)
 + * Minor improvements and hardening for IndexHints (CASSANDRA-20888)
   * Stop repair scheduler if two major versions are detected (CASSANDRA-20048)
 - * AutoRepair: Safeguard Full repair against disk protection (CASSANDRA-20045)
 + * Optimize audit logic for batch operations especially when audit is not 
enabled for DML (CASSANDRA-20885)
 + * Implement nodetool history (CASSANDRA-20851)
 + * Expose StorageService.dropPreparedStatements via JMX (CASSANDRA-20870)
 + * Expose Metric for Prepared Statement Cache Size (in bytes) 
(CASSANDRA-20864)
 + * Add support for BEGIN TRANSACTION to allow mutations that touch multiple 
partitions (CASSANDRA-20857)
 + * AutoRepair: Safeguard Full repair against disk protection(CASSANDRA-20045)
 + * BEGIN TRANSACTION crashes if a mutation touches multiple rows 
(CASSANDRA-20844)
 + * Fix version range check in MessagingService.getVersionOrdinal 
(CASSANDRA-20842)
 + * Allow custom constraints to be loaded via SPI (CASSANDRA-20824)
 + * Optimize DataPlacement lookup by ReplicationParams (CASSANDRA-20804)
 + * Fix ShortPaxosSimulationTest and AccordSimulationRunner do not execute 
from the cli (CASSANDRA-20805)
 + * Allow overriding arbitrary settings via environment variables 
(CASSANDRA-20749)
 + * Optimize MessagingService.getVersionOrdinal (CASSANDRA-20816)
 + * Optimize TrieMemtable#getFlushSet (CASSANDRA-20760)
 + * Support manual secondary index selection at the CQL level (CASSANDRA-18112)
 + * When regulars CQL mutations run on Accord use the txn timestamp rather 
than server timestamp (CASSANDRA-20744)
 + * When using BEGIN TRANSACTION if a complex mutation exists in the same 
statement as one that uses a reference, then the complex delete is dropped 
(CASSANDRA-20788)
 + * Migrate all nodetool commands from airline to picocli (CASSANDRA-17445)
 + * Journal.TopologyUpdate should not store the local topology as it can be 
inferred from the global on (CASSANDRA-20785)
 + * Accord: Topology serializer has a lot of repeated data, can dedup to 
shrink the cost (CASSANDRA-20715)
 + * Stream individual files in their own transactions and hand over ownership 
to a parent transaction on completion (CASSANDRA-20728)
 + * Limit the number of held heap dumps to not consume disk space excessively 
(CASSANDRA-20457)
 + * Accord: BEGIN TRANSACTIONs IF condition logic does not properly support 
meaningless emptiness and null values (CASSANDRA-20667)
 + * Accord: startup race condition where accord journal tries to access the 2i 
index before its ready (CASSANDRA-20686)
 + * Adopt Unsafe::invokeCleaner for Direct ByteBuffer cleaning 
(CASSANDRA-20677)
 + * Add additional metrics around hints (CASSANDRA-20499)
 + * Support for add and replace in IntervalTree (CASSANDRA-20513)
 + * Enable single_sstable_uplevel by default for LCS (CASSANDRA-18509)
 + * Introduce NativeAccessor to avoid new ByteBuffer allocation on flush for 
each NativeCell (CASSANDRA-20173)
 + * Migrate sstableloader code to its own tools directory and artifact 
(CASSANDRA-20328)
   * Stop AutoRepair monitoring thread upon Cassandra shutdown (CASSANDRA-20623)
 - * Fix race condition in auto-repair scheduler (CASSANDRA-20265)
 - * Implement minimum repair task duration setting for auto-repair scheduler 
(CASSANDRA-20160)
 - * Implement preview_repaired auto-repair type (CASSANDRA-20046)
 - * Automated Repair Inside Cassandra for CEP-37 (CASSANDRA-19918)
 -
 -
 -5.0.7
 + * Avoid duplicate hardlink error upon forceful taking of ephemeral snapshots 
during repair (CASSANDRA-20490)
 + * When a custom disk error handler fails to initiate, fail the startup of a 
node instead of using the no-op handler (CASSANDRA-20614)
 + * Rewrite constraint framework to remove column specification from 
constraint definition, introduce SQL-like NOT NULL (CASSANDRA-20563)
 + * Fix a bug in AutoRepair duration metric calculation if schedule finishes 
quickly (CASSANDRA-20622)
 + * Fix AutoRepair flaky InJvm dtest (CASSANDRA-20620)
 + * Increasing default for auto_repair.sstable_upper_threshold considering 
large Cassandra tables & revert three lines removed from CHANGES.txt due to a 
merge mistake (CASSANDRA-20586)
 + * Fix token restrictions with MIN_TOKEN (CASSANDRO-20557)
 + * Upgrade logback version to 1.5.18 and slf4j dependencies to 2.0.17 
(CASSANDRA-20429)
 + * Switch memtable-related off-heap objects to Native Endian and Memory to 
Little Endian (CASSANDRA-20190)
 + * Change SSTableSimpleScanner to use SSTableReader#openDataReaderForScan 
(CASSANDRA-20538)
 + * Automated Repair Inside Cassandra [CEP-37] (CASSANDRA-19918)
 + * Implement appender of slow queries to system_views.slow_queries table 
(CASSANDRA-13001)
 + * Add autocompletion in CQLSH for built-in functions (CASSANDRA-19631)
 + * Grant permission on keyspaces system_views and system_virtual_schema not 
possible (CASSANDRA-20171)
 + * General Purpose Transactions (Accord) [CEP-15] (CASSANDRA-17092)
 + * Improve performance when getting writePlacementsAllSettled from 
ClusterMetadata (CASSANDRA-20526)
 + * Add nodetool command to dump the contents of the 
system_views.{cluster_metadata_log, cluster_metadata_directory} tables 
(CASSANDRA-20525)
 + * Fix TreeMap race in CollectionVirtualTableAdapter causing us to lose rows 
in the virtual table (CASSANDRA-20524)
 + * Improve metadata log catch up with inter-DC mutation forwarding 
(CASSANDRA-20523)
 + * Support topology-safe changes to Datacenter & Rack for live nodes 
(CASSANDRA-20528)
 + * Add SSTableIntervalTree latency metric (CASSANDRA-20502)
 + * Ignore repetitions of semicolon in CQLSH (CASSANDRA-19956)
 + * Avoid NPE during cms initialization abort (CASSANDRA-20527)
 + * Avoid failing queries when epoch changes and replica goes up/down 
(CASSANDRA-20489)
 + * Split out truncation record lock (CASSANDRA-20480)
 + * Throw new IndexBuildInProgressException when queries fail during index 
build, instead of IndexNotAvailableException (CASSANDRA-20402)
 + * Fix Paxos repair interrupts running transactions (CASSANDRA-20469)
 + * Various fixes in constraint framework (CASSANDRA-20481)
 + * Add support in CAS for -= on numeric types, and fixed improper handling of 
empty bytes which lead to NPE (CASSANDRA-20477)
 + * Do not fail to start a node with materialized views after they are turned 
off in config (CASSANDRA-20452)
 + * Fix nodetool gcstats output, support human-readable units and more output 
formats (CASSANDRA-19022)
 + * Various gossip to TCM upgrade fixes (CASSANDRA-20483)
 + * Add nodetool command to abort failed nodetool cms initialize 
(CASSANDRA-20482)
 + * Repair Paxos for the distributed metadata log when CMS membership changes 
(CASSANDRA-20467)
 + * Reintroduce CASSANDRA-17411 in trunk (CASSANDRA-19346)
 + * Add min/max/mean/percentiles to timer metrics vtable (CASSANDRA-20466)
 + * Add support for time, date, timestamp types in scalar constraint 
(CASSANDRA-20274)
 + * Add regular expression constraint (CASSANDRA-20275)
 + * Improve constraints autocompletion (CASSANDRA-20341)
 + * Add JVM version and Cassandra build date to nodetool version -v 
(CASSANDRA-19721)
 + * Move all disk error logic to DiskErrorsHandler to enable pluggability 
(CASSANDRA-20363)
 + * Fix marking an SSTable as suspected and BufferPool leakage in case of a 
corrupted SSTable read during a compaction (CASSANDRA-20396)
 + * Add missed documentation for CREATE TABLE LIKE (CASSANDRA-20401)
 + * Add OCTET_LENGTH constraint (CASSANDRA-20340)
 + * Reduce memory allocations in miscellaneous places along the hot write path 
(CASSANDRA-20167)
 + * Provide keystore_password_file and truststore_password_file options to 
read credentials from a file (CASSANDRA-13428)
 + * Unregistering a node should also remove it from tokenMap if it is there 
and recalculate the placements (CASSANDRA-20346)
 + * Fix PartitionUpdate.isEmpty deserialization issue to avoid potential 
EOFException (CASSANDRA-20345)
 + * Avoid adding LEFT nodes to tokenMap on upgrade from gossip 
(CASSANDRA-20344)
 + * Allow empty placements when deserializing cluster metadata 
(CASSANDRA-20343)
 + * Reduce heap pressure when initializing CMS (CASSANDRA-20267)
 + * Paxos Repair: NoSuchElementException on 
DistributedSchema.getKeyspaceMetadata (CASSANDRA-20320)
 + * Improve performance of DistributedSchema.validate for large schemas 
(CASSANDRA-20360)
 + * Add JSON constraint (CASSANDRA-20273)
 + * Prevent invalid constraint combinations (CASSANDRA-20330)
 + * Support CREATE TABLE LIKE WITH INDEXES (CASSANDRA-19965)
 + * Invalidate relevant prepared statements on every change to TableMetadata 
(CASSANDRA-20318)
 + * Add per type max size guardrails (CASSANDRA-19677)
 + * Make it possible to abort all kinds of multi step operations 
(CASSANDRA-20217)
 + * Do not leak non-Java exceptions when calling snapshot operations via JMX 
(CASSANDRA-20335)
 + * Implement NOT_NULL constraint (CASSANDRA-20276)
 + * Improve error messages for constraints (CASSANDRA-20266)
 + * Add system_views.partition_key_statistics for querying SSTable metadata 
(CASSANDRA-20161)
 + * CEP-42 - Add Constraints Framework (CASSANDRA-19947)
 + * Add table metric PurgeableTombstoneScannedHistogram and a tracing event 
for scanned purgeable tombstones (CASSANDRA-20132)
 + * Make sure we can parse the expanded CQL before writing it to the log or 
sending it to replicas (CASSANDRA-20218)
 + * Add format_bytes and format_time functions (CASSANDRA-19546)
 + * Fix error when trying to assign a tuple to target type not being a tuple 
(CASSANDRA-20237)
 + * Fail CREATE TABLE LIKE statement if UDTs in target keyspace do not exist 
or they have different structure from ones in source keyspace (CASSANDRA-19966)
 + * Support octet_length and length functions (CASSANDRA-20102)
 + * Make JsonUtils serialize Instant always with the same format 
(CASSANDRA-20209)
 + * Port Harry v2 to trunk (CASSANDRA-20200)
 + * Enable filtering of snapshots on keyspace, table and snapshot name in 
nodetool listsnapshots (CASSANDRA-20151)
 + * Create manifest upon loading where it does not exist or enrich it 
(CASSANDRA-20150)
 + * Propagate true size of snapshot in SnapshotDetailsTabularData to not call 
JMX twice in nodetool listsnapshots (CASSANDRA-20149)
 + * Implementation of CEP-43 - copying a table via CQL by CREATE TABLE LIKE 
(CASSANDRA-19964)
 + * Periodically disconnect roles that are revoked or have LOGIN=FALSE set 
(CASSANDRA-19385)
 + * AST library for CQL-based fuzz tests (CASSANDRA-20198)
 + * Support audit logging for JMX operations (CASSANDRA-20128)
 + * Enable sorting of nodetool status output (CASSANDRA-20104)
 + * Support downgrading after CMS is initialized (CASSANDRA-20145)
 + * Deprecate IEndpointSnitch (CASSANDRA-19488)
 + * Check presence of a snapshot in a case-insensitive manner on macOS 
platform to prevent hardlinking failures (CASSANDRA-20146)
 + * Enable JMX server configuration to be in cassandra.yaml (CASSANDRA-11695)
 + * Parallelized UCS compactions (CASSANDRA-18802)
 + * Avoid prepared statement invalidation race when committing schema changes 
(CASSANDRA-20116)
 + * Restore optimization in MultiCBuilder around building one clustering 
(CASSANDRA-20129)
 + * Consolidate all snapshot management to SnapshotManager and introduce 
SnapshotManagerMBean (CASSANDRA-18111)
 + * Fix RequestFailureReason constants codes (CASSANDRA-20126)
 + * Introduce SSTableSimpleScanner for compaction (CASSANDRA-20092)
 + * Include column drop timestamp in alter table transformation 
(CASSANDRA-18961)
 + * Make JMX SSL configurable in cassandra.yaml (CASSANDRA-18508)
 + * Fix cqlsh CAPTURE command to save query results without trace details when 
TRACING is ON (CASSANDRA-19105)
 + * Optionally prevent tombstone purging during repair (CASSANDRA-20071)
 + * Add post-filtering support for the IN operator in SAI queries 
(CASSANDRA-20025)
 + * Don’t finish ongoing decommission and move operations during startup 
(CASSANDRA-20040)
 + * Nodetool reconfigure cms has correct return code when streaming fails 
(CASSANDRA-19972)
 + * Reintroduce RestrictionSet#iterator() optimization around multi-column 
restrictions (CASSANDRA-20034)
 + * Explicitly localize strings to Locale.US for internal implementation 
(CASSANDRA-19953)
 + * Add -H option for human-friendly output in nodetool compactionhistory 
(CASSANDRA-20015)
 + * Fix type check for referenced duration type for nested types 
(CASSANDRA-19890)
 + * In simulation tests, correctly set the tokens of replacement nodes 
(CASSANDRA-19997)
 + * During TCM upgrade, retain all properties of existing system tables 
(CASSANDRA-19992)
 + * Properly cancel in-flight futures and reject requests in 
EpochAwareDebounce during shutdown (CASSANDRA-19848)
 + * Provide clearer exception message on failing commitlog_disk_access_mode 
combinations (CASSANDRA-19812)
 + * Add total space used for a keyspace to nodetool tablestats 
(CASSANDRA-19671)
 + * Ensure Relation#toRestriction() handles ReversedType properly 
(CASSANDRA-19950)
 + * Add JSON and YAML output option to nodetool gcstats (CASSANDRA-19771)
 + * Introduce metadata serialization version V4 (CASSANDRA-19970)
 + * Allow CMS reconfiguration to work around DOWN nodes (CASSANDRA-19943)
 + * Make TableParams.Serializer set allowAutoSnapshots and incrementalBackups 
(CASSANDRA-19954)
 + * Make sstabledump possible to show tombstones only (CASSANDRA-19939)
 + * Ensure that RFP queries potentially stale replicas even with only key 
columns in the row filter (CASSANDRA-19938)
 + * Allow nodes to change IP address while upgrading to TCM (CASSANDRA-19921)
 + * Retain existing keyspace params on system tables after upgrade 
(CASSANDRA-19916)
 + * Deprecate use of gossip state for paxos electorate verification 
(CASSANDRA-19904)
 + * Update dtest-api to 0.0.17 to fix jvm17 crash in jvm-dtests 
(CASSANDRA-19239)
 + * Add resource leak test and Update Netty to 4.1.113.Final to fix leak 
(CASSANDRA-19783)
 + * Fix incorrect nodetool suggestion when gossip mode is running 
(CASSANDRA-19905)
 + * SAI support for BETWEEN operator (CASSANDRA-19688)
 + * Fix BETWEEN filtering for reversed clustering columns (CASSANDRA-19878)
 + * Retry if node leaves CMS while committing a transformation 
(CASSANDRA-19872)
 + * Add support for NOT operators in WHERE clauses. Fixed Three Valued Logic 
(CASSANDRA-18584)
 + * Allow getendpoints for system tables and make sure getNaturalReplicas work 
for MetaStrategy (CASSANDRA-19846)
 + * On upgrade, handle pre-existing tables with unexpected table ids 
(CASSANDRA-19845)
 + * Reconfigure CMS before assassinate (CASSANDRA-19768)
 + * Warn about unqualified prepared statement only if it is select or 
modification statement (CASSANDRA-18322)
 + * Update legacy peers tables during node replacement (CASSANDRA-19782)
 + * Refactor ColumnCondition (CASSANDRA-19620)
 + * Allow configuring log format for Audit Logs (CASSANDRA-19792)
 + * Support for noboolean rpm (centos7 compatible) packages removed 
(CASSANDRA-19787)
 + * Allow threads waiting for the metadata log follower to be interrupted 
(CASSANDRA-19761)
 + * Support dictionary lookup for CassandraPasswordValidator (CASSANDRA-19762)
 + * Disallow denylisting keys in system_cluster_metadata (CASSANDRA-19713)
 + * Fix gossip status after replacement (CASSANDRA-19712)
 + * Ignore repair requests for system_cluster_metadata (CASSANDRA-19711)
 + * Avoid ClassCastException when verifying tables with reversed partitioner 
(CASSANDRA-19710)
 + * Always repair the full range when repairing system_cluster_metadata 
(CASSANDRA-19709)
 + * Use table-specific partitioners during Paxos repair (CASSANDRA-19714)
 + * Expose current compaction throughput in nodetool (CASSANDRA-13890)
 + * CEP-24 Password validation / generation (CASSANDRA-17457)
 + * Reconfigure CMS after replacement, bootstrap and move operations 
(CASSANDRA-19705)
 + * Support querying LocalStrategy tables with any partitioner 
(CASSANDRA-19692)
 + * Relax slow_query_log_timeout for MultiNodeSAITest (CASSANDRA-19693)
 + * Audit Log entries are missing identity for mTLS connections 
(CASSANDRA-19669)
 + * Add support for the BETWEEN operator in WHERE clauses (CASSANDRA-19604)
 + * Replace Stream iteration with for-loop for 
SimpleRestriction::bindAndGetClusteringElements (CASSANDRA-19679)
 + * Consolidate logging on trace level (CASSANDRA-19632)
 + * Expand DDL statements on coordinator before submission to the CMS 
(CASSANDRA-19592)
 + * Fix number of arguments of String.format() in various classes 
(CASSANDRA-19645)
 + * Remove unused fields from config (CASSANDRA-19599)
 + * Refactor Relation and Restriction hierarchies (CASSANDRA-19341)
 + * Raise priority of TCM internode messages during critical operations 
(CASSANDRA-19517)
 + * Add nodetool command to unregister LEFT nodes (CASSANDRA-19581)
 + * Add cluster metadata id to gossip syn messages (CASSANDRA-19613)
 + * Reduce heap usage occupied by the metrics (CASSANDRA-19567)
 + * Improve handling of transient replicas during range movements 
(CASSANDRA-19344)
 + * Enable debounced internode log requests to be cancelled at shutdown 
(CASSANDRA-19514)
 + * Correctly set last modified epoch when combining multistep operations into 
a single step (CASSANDRA-19538)
 + * Add new TriggersPolicy configuration to allow operators to disable 
triggers (CASSANDRA-19532)
 + * Use Transformation.Kind.id in local and distributed log tables 
(CASSANDRA-19516)
 + * Remove period field from ClusterMetadata and metadata log tables 
(CASSANDRA-19482)
 + * Enrich system_views.pending_hints vtable with hints sizes (CASSANDRA-19486)
 + * Expose all dropwizard metrics in virtual tables (CASSANDRA-14572)
 + * Ensured that PropertyFileSnitchTest do not overwrite 
cassandra-toploogy.properties (CASSANDRA-19502)
 + * Add option for MutualTlsAuthenticator to restrict the certificate validity 
period (CASSANDRA-18951)
 + * Fix StorageService::constructRangeToEndpointMap for non-distributed 
keyspaces (CASSANDRA-19255)
 + * Group nodetool cms commands into single command group (CASSANDRA-19393)
 + * Register the measurements of the bootstrap process as Dropwizard metrics 
(CASSANDRA-19447)
 + * Add LIST SUPERUSERS CQL statement (CASSANDRA-19417)
 + * Modernize CQLSH datetime conversions (CASSANDRA-18879)
 + * Harry model and in-JVM tests for partition-restricted 2i queries 
(CASSANDRA-18275)
 + * Refactor cqlshmain global constants (CASSANDRA-19201)
 + * Remove native_transport_port_ssl (CASSANDRA-19397)
 + * Make nodetool reconfigurecms sync by default and add --cancel to be able 
to cancel ongoing reconfigurations (CASSANDRA-19216)
 + * Expose auth mode in system_views.clients, nodetool clientstats, metrics 
(CASSANDRA-19366)
 + * Remove sealed_periods and last_sealed_period tables (CASSANDRA-19189)
 + * Improve setup and initialisation of LocalLog/LogSpec (CASSANDRA-19271)
 + * Refactor structure of caching metrics and expose auth cache metrics via 
JMX (CASSANDRA-17062)
 + * Allow CQL client certificate authentication to work without sending an 
AUTHENTICATE request (CASSANDRA-18857)
 + * Extend nodetool tpstats and system_views.thread_pools with detailed pool 
parameters (CASSANDRA-19289)
 + * Remove dependency on Sigar in favor of OSHI (CASSANDRA-16565)
 + * Simplify the bind marker and Term logic (CASSANDRA-18813)
 + * Limit cassandra startup to supported JDKs, allow higher JDKs by setting 
CASSANDRA_JDK_UNSUPPORTED (CASSANDRA-18688)
 + * Standardize nodetool tablestats formatting of data units (CASSANDRA-19104)
 + * Make nodetool tablestats use number of significant digits for time and 
average values consistently (CASSANDRA-19015)
 + * Upgrade jackson to 2.15.3 and snakeyaml to 2.1 (CASSANDRA-18875)
 + * Transactional Cluster Metadata [CEP-21] (CASSANDRA-18330)
 + * Add ELAPSED command to cqlsh (CASSANDRA-18861)
 + * Add the ability to disable bulk loading of SSTables (CASSANDRA-18781)
 + * Clean up obsolete functions and simplify cql_version handling in cqlsh 
(CASSANDRA-18787)
 +Merged from 5.0:
 + * Update cassandra-stress to support TLS 1.3 by default by auto-negotiation 
(CASSANDRA-21007)
   * Refactor SAI ANN query execution to use score ordered iterators for 
correctness and speed (CASSANDRA-20086)
 - * Disallow binding an identity to a superuser when the user is a regular 
user (CASSANDRA-21219)
   * Fix ConcurrentModificationException in compaction garbagecollect 
(CASSANDRA-21065)
   * Dynamically skip sharding L0 when SAI Vector index present 
(CASSANDRA-19661)
 - * Optionally force IndexStatusManager to use the optimized index status 
format (CASSANDRA-21132) 
 - * No need to evict already prepared statements, as it creates a race 
condition between multiple threads (CASSANDRA-17401)
 - * Upgrade logback version to 1.5.18 and slf4j dependencies to 2.0.17 
(CASSANDRA-21137)
 + * Optionally force IndexStatusManager to use the optimized index status 
format (CASSANDRA-21132)
   * Automatically disable zero-copy streaming for legacy sstables with old 
bloom filter format (CASSANDRA-21092)
   * Fix CQLSSTableWriter serialization of vector of date and time 
(CASSANDRA-20979)
   * Correctly calculate default for FailureDetector max interval 
(CASSANDRA-21025)
diff --cc conf/cassandra.yaml
index 42935df4d6,e3472fc227..fdfb98403d
--- a/conf/cassandra.yaml
+++ b/conf/cassandra.yaml
@@@ -2757,15 -2295,11 +2757,11 @@@ storage_compatibility_mode: NON
  # the given value.  Defaults to disabled.
  # reject_repair_compaction_threshold: 1024
  
 -# Ratio of disk that must be unused to run repair. It is useful to avoid 
disks filling up during
 +# At least 20% of disk must be unused to run repair. It is useful to avoid 
disks filling up during
  # repair as anti-compaction during repair may contribute to additional space 
temporarily.
 -# For example, setting this to 0.2 means at least 20% of disk must be unused.
 -# Set to 0.0 to disable this check. Defaults to 0.0 (disabled) on 5.0 for 
backward-compatibility.
 -# repair_disk_headroom_reject_ratio: 0.0
 +# if you want to disable this feature (the recommendation is not to, but if 
you want to disable it for whatever reason)
 +# then set the ratio to 0.0
- # repair_disk_headroom_reject_ratio: 0.2;
- 
- # This is the deprecated config which was used to safeguard incremental 
repairs. Use repair_disk_headroom_reject_ratio
- # instead as it safeguards against all repairs.
- # incremental_repair_disk_headroom_reject_ratio: 0.2;
++# repair_disk_headroom_reject_ratio: 0.2
  
  # Configuration for Auto Repair Scheduler.
  #
diff --cc conf/cassandra_latest.yaml
index 13b54aa53f,956e1e803b..382bef78d9
--- a/conf/cassandra_latest.yaml
+++ b/conf/cassandra_latest.yaml
@@@ -2502,11 -2253,11 +2502,11 @@@ storage_compatibility_mode: NON
  # the given value.  Defaults to disabled.
  # reject_repair_compaction_threshold: 1024
  
 -# Ratio of disk that must be unused to run repair. It is useful to avoid 
disks filling up during
 +# At least 20% of disk must be unused to run repair. It is useful to avoid 
disks filling up during
  # repair as anti-compaction during repair may contribute to additional space 
temporarily.
 -# For example, setting this to 0.2 means at least 20% of disk must be unused.
 -# Set to 0.0 to disable this check. Defaults to 0.0 (disabled) on 5.0 for 
backward-compatibility.
 -# repair_disk_headroom_reject_ratio: 0.0
 +# if you want to disable this feature (the recommendation is not to, but if 
you want to disable it for whatever reason)
 +# then set the ratio to 0.0
- # repair_disk_headroom_reject_ratio: 0.2;
++# repair_disk_headroom_reject_ratio: 0.2
  
  # Configuration for Auto Repair Scheduler.
  #
diff --cc src/java/org/apache/cassandra/repair/autorepair/AutoRepair.java
index 967fe540eb,692ef6641e..f0e31c4e59
--- a/src/java/org/apache/cassandra/repair/autorepair/AutoRepair.java
+++ b/src/java/org/apache/cassandra/repair/autorepair/AutoRepair.java
@@@ -132,6 -133,6 +132,7 @@@ public class AutoRepai
              }
  
              AutoRepairUtils.setup();
++            AutoRepairUtils.migrateAutoRepairHistoryForUpgrade();
  
              for (AutoRepairConfig.RepairType repairType : 
AutoRepairConfig.RepairType.values())
              {
diff --cc src/java/org/apache/cassandra/repair/autorepair/AutoRepairUtils.java
index d1a10f9d7e,82dd78caa1..e25da51157
--- a/src/java/org/apache/cassandra/repair/autorepair/AutoRepairUtils.java
+++ b/src/java/org/apache/cassandra/repair/autorepair/AutoRepairUtils.java
@@@ -237,6 -227,6 +237,76 @@@ public class AutoRepairUtil
                            ConsistencyLevel.LOCAL_QUORUM : 
ConsistencyLevel.ONE;
      }
  
++    /**
++     * Migrates auto_repair_history and auto_repair_priority entries from the 
pre-upgrade
++     * host ID to the post-upgrade host ID (NodeId-derived UUID).
++     * No-op if the node was not upgraded or migration already happened.
++     * Called once during AutoRepair.setup(), before repair scheduling begins.
++     */
++    public static void migrateAutoRepairHistoryForUpgrade()
++    {
++        try
++        {
++            Directory directory = ClusterMetadata.current().directory;
++            NodeId myNodeId = 
directory.peerId(FBUtilities.getBroadcastAddressAndPort());
++            if (myNodeId == null)
++                return;
++
++            UUID oldHostId = directory.hostId(myNodeId);
++            UUID newHostId = myNodeId.toUUID();
++
++            if (oldHostId.equals(newHostId))
++            {
++                logger.debug("No host ID migration needed — old and new IDs 
are identical ({})", newHostId);
++                return;
++            }
++
++            logger.info("Migrating auto-repair history from pre-upgrade host 
ID {} to new host ID {}", oldHostId, newHostId);
++
++            for (RepairType repairType : RepairType.values())
++            {
++                // Migrate auto_repair_history using the same distributed 
read/write path as AutoRepair
++                List<AutoRepairHistory> histories = 
getAutoRepairHistory(repairType);
++                if (histories != null)
++                {
++                    for (AutoRepairHistory entry : histories)
++                    {
++                        if (entry.hostId.equals(oldHostId))
++                        {
++                            // Insert new entry with the post-upgrade host 
ID, preserving timestamps
++                            insertNewRepairHistory(repairType, newHostId, 
entry.lastRepairStartTime, entry.lastRepairFinishTime);
++                            // Update start timestamp and repair turn to 
match the original entry
++                            if (entry.repairTurn != null)
++                                updateStartAutoRepairHistory(repairType, 
newHostId, entry.lastRepairStartTime, RepairTurn.valueOf(entry.repairTurn));
++                            // Delete the old entry
++                            deleteAutoRepairHistory(repairType, oldHostId);
++                            logger.info("Migrated auto_repair_history for 
repair type {} from {} to {}", repairType, oldHostId, newHostId);
++                            break;
++                        }
++                    }
++                }
++
++                // Migrate auto_repair_priority
++                Set<UUID> priorityIds = getPriorityHostIds(repairType);
++                if (priorityIds.contains(oldHostId))
++                {
++                    removePriorityStatus(repairType, oldHostId);
++                    SetSerializer<UUID> serializer = 
SetSerializer.getInstance(UUIDSerializer.instance, 
UTF8Type.instance.comparatorSet);
++                    addPriorityHost.execute(QueryState.forInternalCalls(),
++                                           
QueryOptions.forInternalCalls(internalQueryCL,
++                                                                         
Lists.newArrayList(serializer.serialize(Collections.singleton(newHostId)),
++                                                                              
              ByteBufferUtil.bytes(repairType.toString()))),
++                                           
Dispatcher.RequestTime.forImmediateExecution());
++                    logger.info("Migrated auto_repair_priority for repair 
type {} from {} to {}", repairType, oldHostId, newHostId);
++                }
++            }
++        }
++        catch (Exception e)
++        {
++            logger.error("Failed to migrate auto-repair history for upgrade", 
e);
++        }
++    }
++
      public static class AutoRepairHistory
      {
          UUID hostId;
diff --cc test/distributed/org/apache/cassandra/distributed/impl/Instance.java
index 791465eb60,b1d05a8a22..6f18007c67
--- a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
@@@ -125,8 -112,10 +125,9 @@@ import org.apache.cassandra.net.Message
  import org.apache.cassandra.net.MessagingService;
  import org.apache.cassandra.net.NoPayload;
  import org.apache.cassandra.net.Verb;
 -import org.apache.cassandra.schema.MigrationCoordinator;
++import org.apache.cassandra.repair.autorepair.AutoRepair;
  import org.apache.cassandra.schema.Schema;
  import org.apache.cassandra.schema.SchemaConstants;
 -import org.apache.cassandra.repair.autorepair.AutoRepair;
  import org.apache.cassandra.service.ActiveRepairService;
  import org.apache.cassandra.service.CassandraDaemon;
  import org.apache.cassandra.service.ClientState;
@@@ -758,177 -815,6 +759,178 @@@ public class Instance extends IsolatedE
          });
      }
  
 +    protected void partialStartup(ICluster<?> cluster) throws IOException, 
NoSuchFieldException, IllegalAccessException, ExecutionException, 
InterruptedException, StartupException
 +    {
 +        if (config.has(GOSSIP))
 +        {
 +            // TODO: hacky
 +            RING_DELAY.setLong(15000);
 +            GOSSIP_SETTLE_MIN_WAIT_MS.setLong(1000);
 +            GOSSIP_SETTLE_POLL_INTERVAL_MS.setLong(300);
 +            CONSISTENT_RANGE_MOVEMENT.setBoolean(false);
 +            CONSISTENT_SIMULTANEOUS_MOVES_ALLOW.setBoolean(true);
 +        }
 +
 +        assert config.networkTopology().contains(config.broadcastAddress()) : 
String.format("Network topology %s doesn't contain the address %s",
 +                                                                              
              config.networkTopology(), config.broadcastAddress());
 +        
DistributedTestInitialLocationProvider.assign(config.networkTopology());
 +        if (config.has(JMX))
 +            setupMbeanWrapper();
 +        DatabaseDescriptor.daemonInitialization();
 +        if (config.has(JMX))
 +            startJmx();
 +        LoggingSupportFactory.getLoggingSupport().onStartup();
 +        logSystemInfo(inInstancelogger);
 +        Config.log(DatabaseDescriptor.getRawConfig());
 +
 +        DiskErrorsHandlerService.configure();
 +        CassandraDaemon.getInstanceForTesting().migrateSystemDataIfNeeded();
 +
 +        CommitLog.instance.start();
 +
 +        SnapshotManager.instance.start(false);
 +        SnapshotManager.instance.clearExpiredSnapshots();
 +        SnapshotManager.instance.clearEphemeralSnapshots();
 +        SnapshotManager.instance.resumeSnapshotCleanup();
 +
 +        CassandraDaemon.getInstanceForTesting().runStartupChecks();
 +
 +        Keyspace.setInitialized(); // TODO: this seems to be superfluous by 
now
 +        if (!config.has(NETWORK))
 +        {
 +            propagateMessagingVersions(cluster); // fake messaging needs to 
know messaging version for filters
 +        }
 +
 +        
CassandraDaemon.disableAutoCompaction(Schema.instance.localKeyspaces().names());
 +        Startup.initialize(DatabaseDescriptor.getSeeds(),
 +                           TestProcessor::new,
 +                           () -> {
 +                                if (config.has(NETWORK))
 +                                {
 +                                    
MessagingService.instance().waitUntilListeningUnchecked();
 +                                }
 +                                else
 +                                {
 +                                    // Even though we don't use 
MessagingService, access the static SocketFactory
 +                                    // instance here so that we start the 
static event loop state
 +                                     registerMockMessaging(cluster);
 +                                }
 +                                internodeMessagingStarted = true;
 +                                registerInboundFilter(cluster);
 +                                registerOutboundFilter(cluster);
 +        });
 +        
CassandraDaemon.disableAutoCompaction(Schema.instance.distributedKeyspaces().names());
 +        QueryProcessor.registerStatementInvalidatingListener();
 +        TestChangeListener.register();
 +
 +        // We need to persist this as soon as possible after startup checks.
 +        // This should be the first write to SystemKeyspace (CASSANDRA-11742)
 +        SystemKeyspace.persistLocalMetadata();
 +
 +        // Start up virtual table support
 +        CassandraDaemon.getInstanceForTesting().setupVirtualKeyspaces();
 +
 +        // Replay any CommitLogSegments found on disk
 +        try
 +        {
 +            CommitLog.instance.recoverSegmentsOnDisk();
 +            NodeId self = ClusterMetadata.current().myNodeId();
 +            if (self != null)
 +                AccordService.localStartup(self);
 +        }
 +        catch (IOException e)
 +        {
 +            throw new RuntimeException(e);
 +        }
 +
 +        // Re-populate token metadata after commit log recover (new peers 
might be loaded onto system keyspace #10293)
 +        //StorageService.instance.populateTokenMetadata();
 +
 +        try
 +        {
 +            PaxosState.maybeRebuildUncommittedState();
 +        }
 +        catch (IOException e)
 +        {
 +            throw new RuntimeException(e);
 +        }
 +
 +        Verb.HINT_REQ.unsafeSetSerializer(DTestSerializer::new);
 +
 +        JVMStabilityInspector.replaceKiller(new 
InstanceKiller(Instance.this::shutdown));
 +
 +        
StorageService.instance.registerDaemon(CassandraDaemon.getInstanceForTesting());
 +        if (config.has(GOSSIP))
 +        {
 +            try
 +            {
 +                StorageService.instance.initServer();
 +            }
 +            catch (Exception e)
 +            {
 +                // I am tired of looking up my notes for how to fix this... 
so why not tell the user?
 +                Throwable cause = 
com.google.common.base.Throwables.getRootCause(e);
 +                if (cause instanceof BindException && "Can't assign requested 
address".equals(cause.getMessage()))
 +                    throw new RuntimeException("Unable to bind, run the 
following in a termanl and try again:\nfor subnet in $(seq 0 5); do for id in 
$(seq 0 5); do sudo ifconfig lo0 alias \"127.0.$subnet.$id\"; done; done;", e);
 +                throw e;
 +            }
 +            StorageService.instance.removeShutdownHook();
 +        }
 +        else
 +        {
 +            Stream<?> peers = cluster.stream().filter(IInstance::isValid);
 +            Schema.instance.saveSystemKeyspace();
 +            ClusterMetadataService.instance().processor().fetchLogAndWait();
 +            NodeId self = Register.maybeRegister();
 +            RegistrationStatus.instance.onRegistration();
 +            if (!AccordService.isSetupOrStarting())
 +                AccordService.localStartup(self);
 +            AccordService.distributedStartup();
 +
 +            boolean joinRing = config.get(Constants.KEY_DTEST_JOIN_RING) == 
null || (boolean) config.get(Constants.KEY_DTEST_JOIN_RING);
 +            if (ClusterMetadata.current().directory.peerState(self) != 
NodeState.JOINED && joinRing)
 +            {
 +                ClusterMetadataService.instance().commit(new UnsafeJoin(self,
 +                                                                        new 
HashSet<>(BootStrapper.getBootstrapTokens(ClusterMetadata.current(), 
FBUtilities.getBroadcastAddressAndPort())),
 +                                                                        
ClusterMetadataService.instance().placementProvider()));
 +
 +                
SystemKeyspace.setBootstrapState(SystemKeyspace.BootstrapState.COMPLETED);
 +                if (config.has(BLANK_GOSSIP))
 +                    peers.forEach(peer -> 
GossipHelper.statusToBlank((IInvokableInstance) peer).accept(this));
 +                else if (cluster instanceof Cluster)
 +                    peers.forEach(peer -> 
GossipHelper.statusToNormal((IInvokableInstance) peer).accept(this));
 +                else
 +                    peers.forEach(peer -> 
GossipHelper.unsafeStatusToNormal(this, (IInstance) peer));
 +            }
 +            Gossiper.instance.register(StorageService.instance);
 +            StorageService.instance.unsafeSetInitialized();
 +        }
 +
 +        CassandraDaemon.getInstanceForTesting().completeSetup();
 +        CassandraDaemon.enableAutoCompaction(Schema.instance.getKeyspaces());
 +
 +        AuditLogManager.instance.initialize();
 +
 +        if (config.has(NATIVE_PROTOCOL))
 +        {
 +            
CassandraDaemon.getInstanceForTesting().initializeClientTransports();
 +            CassandraDaemon.getInstanceForTesting().start();
 +        }
 +
 +        if 
(!FBUtilities.getBroadcastAddressAndPort().getAddress().equals(broadcastAddress().getAddress())
 ||
 +            FBUtilities.getBroadcastAddressAndPort().getPort() != 
broadcastAddress().getPort())
 +            throw new IllegalStateException(String.format("%s != %s", 
FBUtilities.getBroadcastAddressAndPort(), broadcastAddress()));
 +
 +        ClusterMetadataService.instance().processor().fetchLogAndWait();
 +
 +
 +        ActiveRepairService.instance().start();
 +        StreamManager.instance.start();
 +        PaxosState.startAutoRepairs();
++        StorageService.instance.doAutoRepairSetup();
 +        CassandraDaemon.getInstanceForTesting().completeSetup();
 +    }
 +
      @Override
      public void postStartup()
      {
@@@ -1024,11 -903,8 +1026,12 @@@
                                  () -> SSTableReader.shutdownBlocking(1L, 
MINUTES),
                                  () -> 
shutdownAndWait(Collections.singletonList(ActiveRepairService.repairCommandExecutor())),
                                  () -> 
ActiveRepairService.instance().shutdownNowAndWait(1L, MINUTES),
+                                 () -> AutoRepair.instance.shutdownBlocking(),
 -                                () -> SnapshotManager.shutdownAndWait(1L, 
MINUTES)
 +                                () -> EpochAwareDebounce.instance.close(),
 +                                SnapshotManager.instance::close,
 +                                () -> 
IndexStatusManager.instance.shutdownAndWait(1L, MINUTES),
 +                                DiskErrorsHandlerService::close,
 +                                () -> ThreadLocalMetrics.shutdownCleaner(1L, 
MINUTES)
              );
  
              internodeMessagingStarted = false;
diff --cc 
test/distributed/org/apache/cassandra/distributed/test/repair/AutoRepairSchedulerStatsHelper.java
index e8d5ddd564,4e1d64e6dd..2dea0dbabe
--- 
a/test/distributed/org/apache/cassandra/distributed/test/repair/AutoRepairSchedulerStatsHelper.java
+++ 
b/test/distributed/org/apache/cassandra/distributed/test/repair/AutoRepairSchedulerStatsHelper.java
@@@ -119,14 -127,14 +119,10 @@@ public class AutoRepairSchedulerStatsHe
  
      public static void testSchedulerStats() throws ParseException
      {
--        // ensure there was no history of previous repair runs through the 
scheduler
--        Object[][] rows = 
cluster.coordinator(1).execute(String.format("SELECT repair_type, host_id, 
repair_start_ts, repair_finish_ts, repair_turn FROM %s.%s", 
DISTRIBUTED_KEYSPACE_NAME, SystemDistributedKeyspace.AUTO_REPAIR_HISTORY), 
ConsistencyLevel.QUORUM);
--        assertEquals(0, rows.length);
--
          // disabling AutoRepair for system_distributed and system_auth tables 
to avoid
          // interfering with the repaired bytes/plans calculation
 -        disableAutoRepair(SystemDistributedKeyspace.NAME, new 
HashSet<>(RepairTokenRangeSplitterTest.SYSTEM_DISTRIBUTED_TABLE_NAMES));
 -        disableAutoRepair(SchemaConstants.AUTH_KEYSPACE_NAME, new 
HashSet<>(RepairTokenRangeSplitterTest.AUTH_TABLE_NAMES));
 +        disableAutoRepair(SystemDistributedKeyspace.NAME, 
SystemDistributedKeyspace.TABLE_NAMES);
 +        disableAutoRepair(SchemaConstants.AUTH_KEYSPACE_NAME, 
AuthKeyspace.TABLE_NAMES);
  
          insertData();
  
diff --cc 
test/distributed/org/apache/cassandra/distributed/test/repair/AutoRepairSchedulerTest.java
index 9e111e2bb6,8b09ad1231..ac5a764d6d
--- 
a/test/distributed/org/apache/cassandra/distributed/test/repair/AutoRepairSchedulerTest.java
+++ 
b/test/distributed/org/apache/cassandra/distributed/test/repair/AutoRepairSchedulerTest.java
@@@ -31,18 -39,12 +31,17 @@@ import org.junit.Assert
  import org.junit.BeforeClass;
  import org.junit.Test;
  
 +import org.apache.cassandra.Util;
 +import org.apache.cassandra.config.DurationSpec;
  import org.apache.cassandra.distributed.Cluster;
  import org.apache.cassandra.distributed.api.ConsistencyLevel;
 +import org.apache.cassandra.distributed.api.TokenSupplier;
  import org.apache.cassandra.distributed.test.TestBaseImpl;
 +import org.apache.cassandra.metrics.AutoRepairMetrics;
 +import org.apache.cassandra.metrics.AutoRepairMetricsManager;
  import org.apache.cassandra.repair.autorepair.AutoRepair;
  import org.apache.cassandra.repair.autorepair.AutoRepairConfig;
 -import org.apache.cassandra.service.AutoRepairService;
 +import org.apache.cassandra.schema.SystemDistributedKeyspace;
- import org.apache.cassandra.service.AutoRepairService;
  import org.apache.cassandra.utils.FBUtilities;
  
  import static 
org.apache.cassandra.schema.SchemaConstants.DISTRIBUTED_KEYSPACE_NAME;
@@@ -120,22 -124,22 +119,6 @@@ public class AutoRepairSchedulerTest ex
      @Test
      public void testScheduler() throws ParseException
      {
--        // ensure there was no history of previous repair runs through the 
scheduler
--        Object[][] rows = 
cluster.coordinator(1).execute(String.format("SELECT repair_type, host_id, 
repair_start_ts, repair_finish_ts, repair_turn FROM %s.%s", 
DISTRIBUTED_KEYSPACE_NAME, SystemDistributedKeyspace.AUTO_REPAIR_HISTORY), 
ConsistencyLevel.QUORUM);
--        assertEquals(0, rows.length);
--
--        cluster.forEach(i -> i.runOnInstance(() -> {
--            try
--            {
--                AutoRepairService.setup();
--                AutoRepair.instance.setup();
--            }
--            catch (Exception e)
--            {
--                throw new RuntimeException(e);
--            }
--        }));
--
          // validate that the repair ran on all nodes
          cluster.forEach(i -> i.runOnInstance(() -> {
              String broadcastAddress  = 
FBUtilities.getJustBroadcastAddress().toString();
diff --cc 
test/distributed/org/apache/cassandra/distributed/upgrade/AutoRepairUpgradeTest.java
index 0000000000,0000000000..4986156756
new file mode 100644
--- /dev/null
+++ 
b/test/distributed/org/apache/cassandra/distributed/upgrade/AutoRepairUpgradeTest.java
@@@ -1,0 -1,0 +1,248 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.cassandra.distributed.upgrade;
++
++import java.util.Date;
++import java.util.HashMap;
++import java.util.Map;
++import java.util.Set;
++import java.util.UUID;
++import java.util.concurrent.ConcurrentHashMap;
++import java.util.concurrent.TimeUnit;
++import java.util.stream.Collectors;
++
++import com.google.common.collect.ImmutableMap;
++
++import org.junit.Test;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++import org.apache.cassandra.distributed.UpgradeableCluster;
++import org.apache.cassandra.distributed.api.ConsistencyLevel;
++import org.apache.cassandra.distributed.api.Feature;
++import org.apache.cassandra.repair.autorepair.AutoRepairConfig;
++
++import static org.awaitility.Awaitility.await;
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertFalse;
++import static org.junit.Assert.assertTrue;
++
++/**
++ * Upgrade test for auto-repair verifying that it runs successfully before 
and after
++ * upgrading from 5.0 to current. The first repair round executes on 5.0 nodes
++ * (automatically during startup) and the second round after all nodes are 
upgraded.
++ *
++ * Host IDs change across the upgrade (from random UUIDs to NodeId-derived 
UUIDs).
++ * The migration in {@code 
AutoRepairUtils.migrateAutoRepairHistoryForUpgrade()} re-keys
++ * entries under the new host IDs, preserving repair timestamps. The test 
verifies that:
++ * <ol>
++ *   <li>3 repair history entries exist before the upgrade (on 5.0)</li>
++ *   <li>3 entries exist after upgrade, keyed by new host IDs, retaining 
per-node pre-upgrade timestamps</li>
++ *   <li>After repair runs, each entry's timestamp exceeds its own 
pre-upgrade value</li>
++ * </ol>
++ *
++ * Auto-repair is started automatically during node startup via
++ * {@code StorageService.doAutoRepairSetup()} when the config is enabled.
++ * In 5.0, the JVM property {@code cassandra.autorepair.enable=true} is also 
required.
++ */
++public class AutoRepairUpgradeTest extends UpgradeTestBase
++{
++    private static final Logger logger = 
LoggerFactory.getLogger(AutoRepairUpgradeTest.class);
++
++    @Test
++    public void testAutoRepairAcrossUpgrade() throws Throwable
++    {
++        // 5.0 requires this JVM property to enable auto-repair (schema 
tables, JMX, scheduler).
++        // Trunk does not use this property.
++        System.setProperty("cassandra.autorepair.enable", "true"); // 
checkstyle: suppress nearby 'blockSystemPropertyUsage'
++
++        // Maps pre-upgrade host ID -> finish timestamp, captured right 
before upgrade
++        Map<String, Long> preUpgradeTimestamps = new ConcurrentHashMap<>();
++
++        new TestCase()
++        .nodes(3)
++        .singleUpgradeToCurrentFrom(v50)
++        .withConfig(cfg -> cfg.with(Feature.NETWORK, Feature.GOSSIP)
++                              .set("auto_repair",
++                                   ImmutableMap.of(
++                                   "repair_type_overrides",
++                                   
ImmutableMap.of(AutoRepairConfig.RepairType.FULL.getConfigName(),
++                                                   ImmutableMap.of(
++                                                   "initial_scheduler_delay", 
"60s",
++                                                   "enabled", "true",
++                                                   "parallel_repair_count", 
"3",
++                                                   
"allow_parallel_replica_repair", "true",
++                                                   "min_repair_interval", 
"60s"))))
++                              .set("auto_repair.enabled", "true")
++                              
.set("auto_repair.global_settings.repair_by_keyspace", "true")
++                              
.set("auto_repair.global_settings.repair_retry_backoff", "5s")
++                              .set("auto_repair.repair_task_min_duration", 
"0s")
++                              .set("auto_repair.repair_check_interval", 
"60s"))
++        .setup(cluster -> {
++            cluster.schemaChange("CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE +
++                                 " WITH replication = {'class': 
'SimpleStrategy', 'replication_factor': 3};");
++            cluster.schemaChange("CREATE TABLE IF NOT EXISTS " + KEYSPACE +
++                                 ".tbl (pk int, ck text, v1 int, v2 int, 
PRIMARY KEY (pk, ck))");
++
++            // Wait for auto-repair to complete on all 5.0 nodes.
++            waitForNEntries(cluster, 3);
++
++            assertEquals("Expected repair history for all 3 nodes on 5.0",
++                         3, captureFinishTimestamps(cluster).size());
++        })
++        .runBeforeClusterUpgrade(cluster -> {
++            // Wait for any in-flight repair to complete before capturing 
timestamps.
++            waitForNoInFlightRepairs(cluster);
++            preUpgradeTimestamps.putAll(captureFinishTimestamps(cluster));
++            logger.info("Pre-upgrade timestamps: {}", preUpgradeTimestamps);
++
++            // Seed auto_repair_priority with pre-upgrade host IDs to test 
priority migration.
++            String hostIdSet = preUpgradeTimestamps.keySet().stream()
++                                                   .map(id -> id.toString())
++                                                   
.collect(Collectors.joining(", "));
++            cluster.coordinator(1).execute(
++                String.format("INSERT INTO 
system_distributed.auto_repair_priority (repair_type, repair_priority) VALUES 
('%s', {%s})",
++                              AutoRepairConfig.RepairType.FULL.toString(), 
hostIdSet),
++                ConsistencyLevel.QUORUM);
++            logger.info("Seeded auto_repair_priority with pre-upgrade host 
IDs: {}", preUpgradeTimestamps.keySet());
++        })
++        .runAfterClusterUpgrade(cluster -> {
++            // Phase 1: Verify migration — old entries replaced by new 
entries with
++            // different host IDs but preserved per-node timestamps
++            Map<String, Long> migratedTimestamps = 
captureFinishTimestamps(cluster);
++            logger.info("Pre-upgrade entries: {}, post-migration entries: 
{}", preUpgradeTimestamps, migratedTimestamps);
++            assertEquals("Expected exactly 3 migrated entries", 3, 
migratedTimestamps.size());
++
++            // Host IDs must have changed — new entries should not use 
pre-upgrade IDs
++            for (String id : migratedTimestamps.keySet())
++                assertFalse("Migrated entry should use new host ID, not 
pre-upgrade ID " + id,
++                            preUpgradeTimestamps.containsKey(id));
++
++            // Each migrated entry should retain its exact original per-node 
timestamp.
++            // Since host IDs changed, we compare by value: every migrated 
timestamp must
++            // exist in the pre-upgrade set (values preserved exactly during 
migration).
++            for (Long ts : migratedTimestamps.values())
++                assertTrue("Migrated timestamp " + ts + " should match a 
pre-upgrade timestamp",
++                           preUpgradeTimestamps.containsValue(ts));
++
++            // Verify auto_repair_priority migration: old host IDs should be 
replaced by new ones,
++            // and the total count should remain the same (3 entries seeded 
before upgrade).
++            Set<String> priorityIds = capturePriorityHostIds(cluster);
++            logger.info("Post-migration priority IDs: {}", priorityIds);
++            assertEquals("Priority set should have same number of entries 
after migration",
++                         preUpgradeTimestamps.size(), priorityIds.size());
++            for (String id : priorityIds)
++                assertFalse("Priority should not contain pre-upgrade host ID 
" + id,
++                            preUpgradeTimestamps.containsKey(id));
++
++            // Phase 2: Wait for repair to run (after initial_scheduler_delay 
expires),
++            // then verify each entry's timestamp exceeds its own migrated 
value.
++            Map<String, Long> migratedSnapshot = new 
HashMap<>(migratedTimestamps);
++            waitForAllTimestampsExceeded(cluster, migratedSnapshot);
++
++            Map<String, Long> postRepairTimestamps = 
captureFinishTimestamps(cluster);
++            assertEquals("Expected 3 entries after repair", 3, 
postRepairTimestamps.size());
++            assertEquals("Post-repair entries should use same host IDs as 
migrated",
++                         migratedSnapshot.keySet(), 
postRepairTimestamps.keySet());
++            for (Map.Entry<String, Long> entry : 
postRepairTimestamps.entrySet())
++                assertTrue("Post-repair timestamp for " + entry.getKey() + " 
should exceed migrated timestamp",
++                           entry.getValue() > 
migratedSnapshot.get(entry.getKey()));
++
++            // Priority table should be cleared after repair completes
++            Set<String> postRepairPriorityIds = 
capturePriorityHostIds(cluster);
++            assertTrue("Priority set should be empty after post-upgrade 
repair completes, but was: " + postRepairPriorityIds,
++                       postRepairPriorityIds.isEmpty());
++        })
++        .run();
++    }
++
++    private void waitForNEntries(UpgradeableCluster cluster, int expected)
++    {
++        await().atMost(5, TimeUnit.MINUTES)
++               .pollInterval(2, TimeUnit.SECONDS)
++               .until(() -> captureFinishTimestamps(cluster).size() >= 
expected);
++    }
++
++    private void waitForAllTimestampsExceeded(UpgradeableCluster cluster, 
Map<String, Long> baseline)
++    {
++        await().atMost(5, TimeUnit.MINUTES)
++               .pollInterval(2, TimeUnit.SECONDS)
++               .until(() -> {
++                   Map<String, Long> current = 
captureFinishTimestamps(cluster);
++                   if (current.size() < baseline.size())
++                       return false;
++                   for (Map.Entry<String, Long> entry : baseline.entrySet())
++                   {
++                       Long currentTs = current.get(entry.getKey());
++                       if (currentTs == null || currentTs <= entry.getValue())
++                           return false;
++                   }
++                   return true;
++               });
++    }
++
++    private Set<String> capturePriorityHostIds(UpgradeableCluster cluster)
++    {
++        Object[][] rows = cluster.coordinator(1).execute(
++            String.format("SELECT repair_priority FROM 
system_distributed.auto_repair_priority WHERE repair_type='%s'",
++                          AutoRepairConfig.RepairType.FULL.toString()),
++            ConsistencyLevel.QUORUM);
++        if (rows.length == 0 || rows[0][0] == null)
++            return Set.of();
++        @SuppressWarnings("unchecked")
++        Set<UUID> uuids = (Set<UUID>) rows[0][0];
++        return uuids.stream().map(UUID::toString).collect(Collectors.toSet());
++    }
++
++    private void waitForNoInFlightRepairs(UpgradeableCluster cluster)
++    {
++        await().atMost(2, TimeUnit.MINUTES)
++               .pollInterval(1, TimeUnit.SECONDS)
++               .until(() -> {
++                   Object[][] rows = cluster.coordinator(1).execute(
++                       String.format("SELECT host_id, repair_start_ts, 
repair_finish_ts FROM system_distributed.auto_repair_history WHERE 
repair_type='%s'",
++                                     
AutoRepairConfig.RepairType.FULL.toString()),
++                       ConsistencyLevel.QUORUM);
++                   for (Object[] row : rows)
++                   {
++                       long startTs = ((Date) row[1]).getTime();
++                       long finishTs = ((Date) row[2]).getTime();
++                       if (startTs > finishTs)
++                           return false; // repair still in flight
++                   }
++                   return true;
++               });
++    }
++
++    private Map<String, Long> captureFinishTimestamps(UpgradeableCluster 
cluster)
++    {
++        Object[][] rows = cluster.coordinator(1).execute(
++            String.format("SELECT host_id, repair_finish_ts FROM 
system_distributed.auto_repair_history WHERE repair_type='%s'",
++                          AutoRepairConfig.RepairType.FULL.toString()),
++            ConsistencyLevel.QUORUM);
++        Map<String, Long> timestamps = new HashMap<>();
++        for (Object[] row : rows)
++        {
++            String hostId = row[0].toString();
++            long finishTs = ((Date) row[1]).getTime();
++            timestamps.put(hostId, finishTs);
++        }
++        return timestamps;
++    }
++}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to