This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new bef03c9f9f9 branch-3.0: [feature](inverted index) introduce config 
enable_new_partition_inverted_index_v2_format to set new partiton inverted 
index format v2 #55069 (#55277)
bef03c9f9f9 is described below

commit bef03c9f9f9fe0ae02397b7f4b84695a23b465d1
Author: Jack <[email protected]>
AuthorDate: Fri Sep 5 09:49:51 2025 +0800

    branch-3.0: [feature](inverted index) introduce config 
enable_new_partition_inverted_index_v2_format to set new partiton inverted 
index format v2 #55069 (#55277)
    
    cherry pick from #55069
---
 .../main/java/org/apache/doris/common/Config.java  |   7 +
 .../cloud/datasource/CloudInternalCatalog.java     |   8 +-
 .../apache/doris/datasource/InternalCatalog.java   |   8 +-
 .../cloud/datasource/CloudInternalCatalogTest.java | 775 +++++++++++++++++++++
 .../doris/datasource/InternalCatalogTest.java      | 584 ++++++++++++++++
 .../test_inverted_index_storage_format_upgrade.out | Bin 0 -> 1241 bytes
 ...st_inverted_index_storage_format_upgrade.groovy | 409 +++++++++++
 7 files changed, 1789 insertions(+), 2 deletions(-)

diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java 
b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
index c09f3f31510..2476d927fe0 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
@@ -2905,6 +2905,13 @@ public class Config extends ConfigBase {
     })
     public static String inverted_index_storage_format = "V2";
 
+    @ConfField(mutable = true, masterOnly = true, description = {
+            "是否为新分区启用倒排索引 V2 存储格式。启用后,新创建的分区将使用 V2 格式,而不管表的原始格式如何。",
+            "Enable V2 storage format for inverted indexes in new partitions. 
When enabled, newly created partitions "
+                    + "will use V2 format regardless of the table's original 
format."
+    })
+    public static boolean enable_new_partition_inverted_index_v2_format = 
false;
+
     @ConfField(mutable = true, masterOnly = true, description = {
             "是否在unique表mow上开启delete语句写delete predicate。若开启,会提升delete语句的性能,"
                     + 
"但delete后进行部分列更新可能会出现部分数据错误的情况。若关闭,会降低delete语句的性能来保证正确性。",
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java
 
b/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java
index ad7596c18fa..98c6bed2ecb 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java
@@ -160,7 +160,13 @@ public class CloudInternalCatalog extends InternalCatalog {
             Cloud.CreateTabletsRequest.Builder requestBuilder = 
Cloud.CreateTabletsRequest.newBuilder();
             List<String> rowStoreColumns =
                     tbl.getTableProperty().getCopiedRowStoreColumns();
+            TInvertedIndexFileStorageFormat effectiveIndexStorageFormat =
+                        (Config.enable_new_partition_inverted_index_v2_format
+                                && tbl.getInvertedIndexFileStorageFormat() == 
TInvertedIndexFileStorageFormat.V1)
+                                ? TInvertedIndexFileStorageFormat.V2
+                                : tbl.getInvertedIndexFileStorageFormat();
             for (Tablet tablet : index.getTablets()) {
+                // Use resolved format that considers global override for new 
partitions
                 OlapFile.TabletMetaCloudPB.Builder builder = 
createTabletMetaBuilder(tbl.getId(), indexId,
                         partitionId, tablet, tabletType, schemaHash, keysType, 
shortKeyColumnCount,
                         bfColumns, tbl.getBfFpp(), indexes, columns, 
tbl.getDataSortInfo(),
@@ -174,7 +180,7 @@ public class CloudInternalCatalog extends InternalCatalog {
                         tbl.disableAutoCompaction(),
                         tbl.getRowStoreColumnsUniqueIds(rowStoreColumns),
                         tbl.getEnableMowLightDelete(),
-                        tbl.getInvertedIndexFileStorageFormat(),
+                        effectiveIndexStorageFormat,
                         tbl.rowStorePageSize(),
                         tbl.variantEnableFlattenNested(),
                         tbl.storagePageSize());
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
index da1b141bc8c..8cfcac11202 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
@@ -2199,7 +2199,13 @@ public class InternalCatalog implements 
CatalogIf<Database> {
                             tbl.storagePageSize());
 
                     task.setStorageFormat(tbl.getStorageFormat());
-                    
task.setInvertedIndexFileStorageFormat(tbl.getInvertedIndexFileStorageFormat());
+                    // Use resolved format that considers global override for 
new partitions
+                    TInvertedIndexFileStorageFormat effectiveFormat =
+                            
(Config.enable_new_partition_inverted_index_v2_format
+                                    && tbl.getInvertedIndexFileStorageFormat() 
== TInvertedIndexFileStorageFormat.V1)
+                                    ? TInvertedIndexFileStorageFormat.V2
+                                    : tbl.getInvertedIndexFileStorageFormat();
+                    task.setInvertedIndexFileStorageFormat(effectiveFormat);
                     if (!CollectionUtils.isEmpty(clusterKeyIndexes)) {
                         task.setClusterKeyIndexes(clusterKeyIndexes);
                         LOG.info("table: {}, partition: {}, index: {}, tablet: 
{}, cluster key indexes: {}",
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/cloud/datasource/CloudInternalCatalogTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/cloud/datasource/CloudInternalCatalogTest.java
new file mode 100644
index 00000000000..a7f3ddd6c15
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/cloud/datasource/CloudInternalCatalogTest.java
@@ -0,0 +1,775 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.cloud.datasource;
+
+import org.apache.doris.analysis.DataSortInfo;
+import org.apache.doris.analysis.UserIdentity;
+import org.apache.doris.catalog.CatalogTestUtil;
+import org.apache.doris.catalog.Column;
+import org.apache.doris.catalog.DataProperty;
+import org.apache.doris.catalog.Database;
+import org.apache.doris.catalog.Env;
+import org.apache.doris.catalog.EnvFactory;
+import org.apache.doris.catalog.FakeEditLog;
+import org.apache.doris.catalog.FakeEnv;
+import org.apache.doris.catalog.HashDistributionInfo;
+import org.apache.doris.catalog.Index;
+import org.apache.doris.catalog.KeysType;
+import org.apache.doris.catalog.MaterializedIndexMeta;
+import org.apache.doris.catalog.OlapTable;
+import org.apache.doris.catalog.PrimitiveType;
+import org.apache.doris.catalog.ReplicaAllocation;
+import org.apache.doris.catalog.TableProperty;
+import org.apache.doris.catalog.Tablet;
+import org.apache.doris.cloud.catalog.CloudEnv;
+import org.apache.doris.cloud.catalog.CloudEnvFactory;
+import org.apache.doris.cloud.catalog.CloudPartition;
+import org.apache.doris.cloud.catalog.ComputeGroup;
+import org.apache.doris.cloud.proto.Cloud;
+import org.apache.doris.cloud.rpc.MetaServiceProxy;
+import org.apache.doris.cloud.rpc.VersionHelper;
+import org.apache.doris.cloud.system.CloudSystemInfoService;
+import org.apache.doris.common.Config;
+import org.apache.doris.common.DdlException;
+import org.apache.doris.common.FeConstants;
+import org.apache.doris.common.UserException;
+import org.apache.doris.mysql.privilege.AccessControllerManager;
+import org.apache.doris.mysql.privilege.Auth;
+import org.apache.doris.mysql.privilege.PrivPredicate;
+import org.apache.doris.persist.EditLog;
+import org.apache.doris.proto.OlapFile;
+import org.apache.doris.qe.ConnectContext;
+import org.apache.doris.qe.OriginStatement;
+import org.apache.doris.resource.Tag;
+import org.apache.doris.system.Backend;
+import org.apache.doris.system.SystemInfoService;
+import org.apache.doris.thrift.TCompressionType;
+import org.apache.doris.thrift.TInvertedIndexFileStorageFormat;
+import org.apache.doris.thrift.TStorageMedium;
+import org.apache.doris.thrift.TStorageType;
+import org.apache.doris.thrift.TTabletType;
+import org.apache.doris.utframe.MockedMetaServerFactory;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import mockit.Mock;
+import mockit.MockUp;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.lang.reflect.InvocationTargetException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+
+public class CloudInternalCatalogTest {
+
+    private static final String TEST_DB_NAME = "test_cloud_db";
+    private static final String TEST_TABLE_NAME = "test_cloud_table";
+
+    private static FakeEditLog fakeEditLog;
+    private static FakeEnv fakeEnv;
+    private static Env masterEnv;
+    private static EditLog testEditLog;
+    private static Database db;
+    private ConnectContext ctx;
+
+    @Before
+    public void setUp() throws InstantiationException, IllegalAccessException, 
IllegalArgumentException,
+            InvocationTargetException, NoSuchMethodException, 
SecurityException, UserException {
+        FeConstants.runningUnitTest = true;
+        Config.enable_new_partition_inverted_index_v2_format = false;
+
+        // Mock VersionHelper globally to avoid all meta service calls
+        new MockUp<VersionHelper>() {
+            @Mock
+            public long getVersionFromMeta(long dbId, long tableId, long 
partitionId) {
+                return 1L;
+            }
+
+            @Mock
+            public long getVisibleVersion(long dbId, long tableId, long 
partitionId) {
+                return 1L;
+            }
+        };
+
+        // Mock CloudPartition globally to avoid meta service calls
+        new MockUp<CloudPartition>() {
+            @Mock
+            public long getVisibleVersion() {
+                return 1L;
+            }
+        };
+
+        // Setup for MetaServiceProxy mock
+        new MockUp<MetaServiceProxy>(MetaServiceProxy.class) {
+            @Mock
+            public Cloud.BeginTxnResponse beginTxn(Cloud.BeginTxnRequest 
request) {
+                Cloud.BeginTxnResponse.Builder beginTxnResponseBuilder = 
Cloud.BeginTxnResponse.newBuilder();
+                beginTxnResponseBuilder.setTxnId(1000)
+                        .setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                                
.setCode(Cloud.MetaServiceCode.OK).setMsg("OK"));
+                return beginTxnResponseBuilder.build();
+            }
+
+            @Mock
+            public Cloud.CommitTxnResponse commitTxn(Cloud.CommitTxnRequest 
request) {
+                Cloud.TxnInfoPB.Builder txnInfoBuilder = 
Cloud.TxnInfoPB.newBuilder();
+                txnInfoBuilder.setDbId(CatalogTestUtil.testDbId1);
+                
txnInfoBuilder.addAllTableIds(Lists.newArrayList(CatalogTestUtil.testTableId1));
+                txnInfoBuilder.setLabel("test_label");
+                txnInfoBuilder.setListenerId(-1);
+                Cloud.CommitTxnResponse.Builder commitTxnResponseBuilder = 
Cloud.CommitTxnResponse.newBuilder();
+                
commitTxnResponseBuilder.setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                                
.setCode(Cloud.MetaServiceCode.OK).setMsg("OK"))
+                        .setTxnInfo(txnInfoBuilder.build());
+                return commitTxnResponseBuilder.build();
+            }
+
+            @Mock
+            public Cloud.CheckTxnConflictResponse 
checkTxnConflict(Cloud.CheckTxnConflictRequest request) {
+                Cloud.CheckTxnConflictResponse.Builder 
checkTxnConflictResponseBuilder =
+                        Cloud.CheckTxnConflictResponse.newBuilder();
+                
checkTxnConflictResponseBuilder.setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                                
.setCode(Cloud.MetaServiceCode.OK).setMsg("OK"))
+                        .setFinished(true);
+                return checkTxnConflictResponseBuilder.build();
+            }
+
+            @Mock
+            public Cloud.GetClusterResponse getCluster(Cloud.GetClusterRequest 
request) {
+                Cloud.GetClusterResponse.Builder getClusterResponseBuilder = 
Cloud.GetClusterResponse.newBuilder();
+                Cloud.ClusterPB.Builder clusterBuilder = 
Cloud.ClusterPB.newBuilder();
+                
clusterBuilder.setClusterId("test_id").setClusterName("test_group");
+
+                Cloud.NodeInfoPB.Builder node1 = Cloud.NodeInfoPB.newBuilder();
+                node1.setCloudUniqueId("test_cloud")
+                        .setName("host1")
+                        .setIp("host1")
+                        .setHost("host1")
+                        .setHeartbeatPort(123)
+                        .setEditLogPort(125)
+                        .setStatus(Cloud.NodeStatusPB.NODE_STATUS_RUNNING);
+                clusterBuilder.addNodes(node1.build());
+                
getClusterResponseBuilder.setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                                
.setCode(Cloud.MetaServiceCode.OK).setMsg("OK"))
+                        .addCluster(clusterBuilder.build());
+                return getClusterResponseBuilder.build();
+            }
+
+            @Mock
+            public Cloud.CreateTabletsResponse 
createTablets(Cloud.CreateTabletsRequest request) {
+                Cloud.CreateTabletsResponse.Builder responseBuilder = 
Cloud.CreateTabletsResponse.newBuilder();
+                
responseBuilder.setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                        .setCode(Cloud.MetaServiceCode.OK).setMsg("OK"));
+                return responseBuilder.build();
+            }
+
+            @Mock
+            public Cloud.FinishTabletJobResponse 
finishTabletJob(Cloud.FinishTabletJobRequest request) {
+                Cloud.FinishTabletJobResponse.Builder responseBuilder = 
Cloud.FinishTabletJobResponse.newBuilder();
+                
responseBuilder.setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                        .setCode(Cloud.MetaServiceCode.OK).setMsg("OK"));
+                return responseBuilder.build();
+            }
+
+            @Mock
+            public Cloud.GetCurrentMaxTxnResponse 
getCurrentMaxTxnId(Cloud.GetCurrentMaxTxnRequest request) {
+                Cloud.GetCurrentMaxTxnResponse.Builder builder = 
Cloud.GetCurrentMaxTxnResponse.newBuilder();
+                builder.setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                                
.setCode(Cloud.MetaServiceCode.OK).setMsg("OK"))
+                        .setCurrentMaxTxnId(1000);
+                return builder.build();
+            }
+        };
+
+        Config.cloud_unique_id = "test_cloud";
+        Config.meta_service_endpoint = 
MockedMetaServerFactory.METASERVER_DEFAULT_IP + ":" + 20121;
+
+        EnvFactory envFactory = EnvFactory.getInstance();
+        masterEnv = envFactory.createEnv(false);
+        SystemInfoService cloudSystemInfo = Env.getCurrentSystemInfo();
+        fakeEnv = new FakeEnv();
+        FakeEnv.setSystemInfo(cloudSystemInfo);
+
+        fakeEditLog = new FakeEditLog();
+        testEditLog = null; // Will be set by MockUp
+        FakeEnv.setEnv(masterEnv);
+
+        ctx = new ConnectContext();
+        ctx.setEnv(masterEnv);
+        UserIdentity rootUser = new UserIdentity("root", "%");
+        rootUser.setIsAnalyzed();
+        ctx.setCurrentUserIdentity(rootUser);
+        ctx.setThreadLocalInfo();
+        ctx.setCloudCluster("test_group");
+
+        Assert.assertTrue(envFactory instanceof CloudEnvFactory);
+        Assert.assertTrue(masterEnv instanceof CloudEnv);
+
+        new MockUp<Env>() {
+            @Mock
+            public Env getCurrentEnv() {
+                return masterEnv;
+            }
+
+            @Mock
+            public EditLog getEditLog() {
+                if (testEditLog == null) {
+                    testEditLog = new EditLog("test") {
+                        // Override to avoid initialization issues
+                    };
+                }
+                return testEditLog;
+            }
+
+            @Mock
+            public AccessControllerManager getAccessManager() {
+                return new AccessControllerManager(masterEnv.getAuth()) {
+                    @Override
+                    public boolean checkTblPriv(ConnectContext ctx, String 
ctl, String db, String tbl,
+                            PrivPredicate wanted) {
+                        return true; // Allow all access for test
+                    }
+                };
+            }
+        };
+
+        new MockUp<Auth>() {
+            @Mock
+            public String getDefaultCloudCluster(String user) {
+                return "test_group"; // Return default cluster for test
+            }
+
+            @Mock
+            public ComputeGroup getComputeGroup(String user) {
+                // Return a test compute group for the mock
+                return new ComputeGroup("test_id", "test_group", 
ComputeGroup.ComputeTypeEnum.SQL);
+            }
+        };
+
+        // Mock cloud environment permissions
+        new MockUp<CloudEnv>() {
+            @Mock
+            public void checkCloudClusterPriv(String cluster) throws Exception 
{
+                // Always allow for tests
+            }
+        };
+
+        // Mock ConnectContext to avoid compute group permission check
+        new MockUp<ConnectContext>() {
+            @Mock
+            public String getCloudCluster() {
+                return "test_group";
+            }
+
+            @Mock
+            public UserIdentity getCurrentUserIdentity() {
+                UserIdentity rootUser = new UserIdentity("root", "%");
+                rootUser.setIsAnalyzed();
+                return rootUser;
+            }
+        };
+
+        // Setup CloudSystemInfoService directly like CloudIndexTest
+        Assert.assertTrue(Env.getCurrentSystemInfo() instanceof 
CloudSystemInfoService);
+        CloudSystemInfoService systemInfo = (CloudSystemInfoService) 
Env.getCurrentSystemInfo();
+        Backend backend = new Backend(10001L, "host1", 123);
+        backend.setAlive(true);
+        backend.setBePort(456);
+        backend.setHttpPort(789);
+        backend.setBrpcPort(321);
+        Map<String, String> newTagMap = Tag.DEFAULT_BACKEND_TAG.toMap();
+        newTagMap.put(Tag.CLOUD_CLUSTER_STATUS, "NORMAL");
+        newTagMap.put(Tag.CLOUD_CLUSTER_NAME, "test_group");
+        newTagMap.put(Tag.CLOUD_CLUSTER_ID, "test_id");
+        newTagMap.put(Tag.CLOUD_CLUSTER_PUBLIC_ENDPOINT, "");
+        newTagMap.put(Tag.CLOUD_CLUSTER_PRIVATE_ENDPOINT, "");
+        newTagMap.put(Tag.CLOUD_UNIQUE_ID, "test_cloud");
+        backend.setTagMap(newTagMap);
+        List<Backend> backends = Lists.newArrayList(backend);
+        systemInfo.updateCloudClusterMapNoLock(backends, new ArrayList<>());
+
+        db = new Database(CatalogTestUtil.testDbId1, TEST_DB_NAME);
+        masterEnv.unprotectCreateDb(db);
+    }
+
+    @Test
+    public void testCloudMixedFormatPartitions() throws Exception {
+        // Test: Old partitions keep V1, new partitions use V2 when config is 
enabled
+
+        // Step 1: Create initial partition with V1 format (config disabled)
+        Config.enable_new_partition_inverted_index_v2_format = false;
+        Map<Long, TInvertedIndexFileStorageFormat> partitionFormats = 
Maps.newHashMap();
+
+        // Mock sendCreateTabletsRpc to avoid actual meta service calls
+        new MockUp<CloudInternalCatalog>() {
+            @Mock
+            public Cloud.CreateTabletsResponse 
sendCreateTabletsRpc(Cloud.CreateTabletsRequest.Builder requestBuilder)
+                    throws DdlException {
+                return Cloud.CreateTabletsResponse.newBuilder()
+                        .setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                                .setCode(Cloud.MetaServiceCode.OK)
+                                .setMsg("OK"))
+                        .build();
+            }
+        };
+
+        // Mock createTabletMetaBuilder to capture formats for each partition
+        new MockUp<CloudInternalCatalog>() {
+            @Mock
+            public OlapFile.TabletMetaCloudPB.Builder 
createTabletMetaBuilder(long tableId, long indexId,
+                    long partitionId, Tablet tablet, TTabletType tabletType, 
int schemaHash, KeysType keysType,
+                    short shortKeyColumnCount, Set<String> bfColumns, double 
bfFpp, List<Index> indexes,
+                    List<Column> schemaColumns, DataSortInfo dataSortInfo, 
TCompressionType compressionType,
+                    String storagePolicy, boolean isInMemory, boolean isShadow,
+                    String tableName, long ttlSeconds, boolean 
enableUniqueKeyMergeOnWrite,
+                    boolean storeRowColumn, int schemaVersion, String 
compactionPolicy,
+                    Long timeSeriesCompactionGoalSizeMbytes, Long 
timeSeriesCompactionFileCountThreshold,
+                    Long timeSeriesCompactionTimeThresholdSeconds, Long 
timeSeriesCompactionEmptyRowsetsThreshold,
+                    Long timeSeriesCompactionLevelThreshold, boolean 
disableAutoCompaction,
+                    List<Integer> rowStoreColumnUniqueIds, boolean 
enableMowLightDelete,
+                    TInvertedIndexFileStorageFormat 
invertedIndexFileStorageFormat, long pageSize,
+                    boolean variantEnableFlattenNested, long storagePageSize) 
throws DdlException {
+
+                // Track format for each partition
+                partitionFormats.put(partitionId, 
invertedIndexFileStorageFormat);
+                return OlapFile.TabletMetaCloudPB.newBuilder();
+            }
+        };
+
+        CloudInternalCatalog cloudCatalog = (CloudInternalCatalog) 
masterEnv.getInternalCatalog();
+
+        // Create MaterializedIndexMeta for base index
+        long baseIndexId = 2000L;
+        MaterializedIndexMeta indexMeta =
+                new MaterializedIndexMeta(
+                        baseIndexId,
+                        Lists.newArrayList(new Column("col1",
+                                PrimitiveType.INT)),
+                        0, // schema version
+                        100, // schema hash
+                        (short) 1, // short key column count
+                        TStorageType.COLUMN,
+                        KeysType.DUP_KEYS,
+                        new OriginStatement("CREATE TABLE test", 0) // origin 
stmt
+                );
+        Map<Long, MaterializedIndexMeta> indexIdToMeta = Maps.newHashMap();
+        indexIdToMeta.put(baseIndexId, indexMeta);
+
+        // Mock OlapTable with V1 format
+        new MockUp<OlapTable>() {
+            @Mock
+            public TInvertedIndexFileStorageFormat 
getInvertedIndexFileStorageFormat() {
+                return TInvertedIndexFileStorageFormat.V1; // Table has V1 
format
+            }
+
+            @Mock
+            public long getId() {
+                return 1000L;
+            }
+
+            @Mock
+            public long getBaseIndexId() {
+                return baseIndexId;
+            }
+
+            @Mock
+            public String getStorageVaultId() {
+                return "vault_id";
+            }
+
+            @Mock
+            public String getStorageVaultName() {
+                return "vault_name";
+            }
+
+            @Mock
+            public String getName() {
+                return "test_table";
+            }
+
+            @Mock
+            public java.util.List<Index> getIndexes() {
+                return Lists.newArrayList();
+            }
+
+            @Mock
+            public TableProperty getTableProperty() {
+                return new TableProperty(Maps.newHashMap());
+            }
+
+            @Mock
+            public double getBfFpp() {
+                return 0.05;
+            }
+
+            @Mock
+            public DataSortInfo getDataSortInfo() {
+                return null;
+            }
+        };
+
+        // Create initial partition
+        long partition1Id = 3000L;
+        try {
+            OlapTable table = new OlapTable();
+            cloudCatalog.createPartitionWithIndices(
+                    db.getId(), table, partition1Id, "p1",
+                    indexIdToMeta, // Pass proper index metadata
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            // Expected in test environment
+        }
+
+        // Verify partition1 uses V1 format (config was disabled)
+        Assert.assertEquals("First partition should use V1 format when config 
is disabled",
+                TInvertedIndexFileStorageFormat.V1, 
partitionFormats.get(partition1Id));
+
+        // Step 2: Enable config and create new partition
+        Config.enable_new_partition_inverted_index_v2_format = true;
+
+        long partition2Id = 3001L;
+        try {
+            OlapTable table = new OlapTable();
+            cloudCatalog.createPartitionWithIndices(
+                    db.getId(), table, partition2Id, "p2",
+                    indexIdToMeta, // Pass proper index metadata
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            // Expected in test environment
+        }
+
+        // Step 3: Verify mixed formats
+        Assert.assertEquals("First partition should still be V1",
+                TInvertedIndexFileStorageFormat.V1, 
partitionFormats.get(partition1Id));
+        Assert.assertEquals("Second partition should be upgraded to V2",
+                TInvertedIndexFileStorageFormat.V2, 
partitionFormats.get(partition2Id));
+    }
+
+    @Test
+    public void testCloudV1FormatRemainsWhenConfigDisabled() throws Exception {
+        // Test: V1 table format should remain V1 when config is disabled
+        Config.enable_new_partition_inverted_index_v2_format = false;
+
+        AtomicReference<TInvertedIndexFileStorageFormat> capturedFormat = new 
AtomicReference<>();
+
+        // Mock sendCreateTabletsRpc to avoid actual meta service calls
+        new MockUp<CloudInternalCatalog>() {
+            @Mock
+            public Cloud.CreateTabletsResponse 
sendCreateTabletsRpc(Cloud.CreateTabletsRequest.Builder requestBuilder)
+                    throws DdlException {
+                return Cloud.CreateTabletsResponse.newBuilder()
+                        .setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                                .setCode(Cloud.MetaServiceCode.OK)
+                                .setMsg("OK"))
+                        .build();
+            }
+        };
+
+        // Mock createTabletMetaBuilder to capture the actual format used 
during partition creation
+        new MockUp<CloudInternalCatalog>() {
+            @Mock
+            public OlapFile.TabletMetaCloudPB.Builder 
createTabletMetaBuilder(long tableId, long indexId,
+                    long partitionId, Tablet tablet, TTabletType tabletType, 
int schemaHash, KeysType keysType,
+                    short shortKeyColumnCount, Set<String> bfColumns, double 
bfFpp, List<Index> indexes,
+                    List<Column> schemaColumns, DataSortInfo dataSortInfo, 
TCompressionType compressionType,
+                    String storagePolicy, boolean isInMemory, boolean isShadow,
+                    String tableName, long ttlSeconds, boolean 
enableUniqueKeyMergeOnWrite,
+                    boolean storeRowColumn, int schemaVersion, String 
compactionPolicy,
+                    Long timeSeriesCompactionGoalSizeMbytes, Long 
timeSeriesCompactionFileCountThreshold,
+                    Long timeSeriesCompactionTimeThresholdSeconds, Long 
timeSeriesCompactionEmptyRowsetsThreshold,
+                    Long timeSeriesCompactionLevelThreshold, boolean 
disableAutoCompaction,
+                    List<Integer> rowStoreColumnUniqueIds, boolean 
enableMowLightDelete,
+                    TInvertedIndexFileStorageFormat 
invertedIndexFileStorageFormat, long pageSize,
+                    boolean variantEnableFlattenNested, long storagePageSize) 
throws DdlException {
+
+                // Capture the actual format passed to createTabletMetaBuilder
+                capturedFormat.set(invertedIndexFileStorageFormat);
+                return OlapFile.TabletMetaCloudPB.newBuilder();
+            }
+        };
+
+        CloudInternalCatalog cloudCatalog = (CloudInternalCatalog) 
masterEnv.getInternalCatalog();
+
+        // Create MaterializedIndexMeta for base index
+        long baseIndexId = 2000L;
+        MaterializedIndexMeta indexMeta =
+                new MaterializedIndexMeta(
+                        baseIndexId,
+                        Lists.newArrayList(new Column("col1",
+                                PrimitiveType.INT)),
+                        0, // schema version
+                        100, // schema hash
+                        (short) 1, // short key column count
+                        TStorageType.COLUMN,
+                        KeysType.DUP_KEYS,
+                        new OriginStatement("CREATE TABLE test", 0) // origin 
stmt
+                );
+        Map<Long, MaterializedIndexMeta> indexIdToMeta = Maps.newHashMap();
+        indexIdToMeta.put(baseIndexId, indexMeta);
+
+        // Create a mock OlapTable with V1 format
+        new MockUp<OlapTable>() {
+            @Mock
+            public TInvertedIndexFileStorageFormat 
getInvertedIndexFileStorageFormat() {
+                return TInvertedIndexFileStorageFormat.V1; // Table originally 
has V1 format
+            }
+
+            @Mock
+            public long getId() {
+                return 1000L;
+            }
+
+            @Mock
+            public long getBaseIndexId() {
+                return baseIndexId;
+            }
+
+            @Mock
+            public String getStorageVaultId() {
+                return "vault_id";
+            }
+
+            @Mock
+            public String getStorageVaultName() {
+                return "vault_name";
+            }
+
+            @Mock
+            public String getName() {
+                return "test_table";
+            }
+
+            @Mock
+            public java.util.List<Index> getIndexes() {
+                return Lists.newArrayList();
+            }
+
+            @Mock
+            public TableProperty getTableProperty() {
+                return new TableProperty(Maps.newHashMap());
+            }
+
+            @Mock
+            public double getBfFpp() {
+                return 0.05;
+            }
+
+            @Mock
+            public DataSortInfo getDataSortInfo() {
+                return null;
+            }
+        };
+
+        try {
+            OlapTable table = new OlapTable();
+
+            // Call the actual createPartitionWithIndices method to test no 
upgrade when config disabled
+            cloudCatalog.createPartitionWithIndices(
+                    db.getId(), table, 3000L, "test_partition",
+                    indexIdToMeta, // Pass proper index metadata
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            // It's expected to fail in test environment, we only care about 
the format capture
+        }
+
+        // Verify that V1 table format remains V1 when config is disabled
+        Assert.assertEquals("V1 table format should remain V1 when config is 
disabled",
+                TInvertedIndexFileStorageFormat.V1, capturedFormat.get());
+    }
+
+    @Test
+    public void testCloudV2TableFormatBehavior() throws Exception {
+        // Test V2 table format behavior in cloud mode - should remain V2 
regardless of config
+        Config.enable_new_partition_inverted_index_v2_format = true;
+        AtomicReference<TInvertedIndexFileStorageFormat> capturedFormat = new 
AtomicReference<>();
+        // Mock sendCreateTabletsRpc to avoid actual meta service calls
+        new MockUp<CloudInternalCatalog>() {
+            @Mock
+            public Cloud.CreateTabletsResponse 
sendCreateTabletsRpc(Cloud.CreateTabletsRequest.Builder requestBuilder)
+                    throws DdlException {
+                return Cloud.CreateTabletsResponse.newBuilder()
+                        .setStatus(Cloud.MetaServiceResponseStatus.newBuilder()
+                                .setCode(Cloud.MetaServiceCode.OK)
+                                .setMsg("OK"))
+                        .build();
+            }
+        };
+
+        // Mock createTabletMetaBuilder to capture the actual format used 
during partition creation
+        new MockUp<CloudInternalCatalog>() {
+            @Mock
+            public OlapFile.TabletMetaCloudPB.Builder 
createTabletMetaBuilder(long tableId, long indexId,
+                    long partitionId, Tablet tablet, TTabletType tabletType, 
int schemaHash, KeysType keysType,
+                    short shortKeyColumnCount, Set<String> bfColumns, double 
bfFpp, List<Index> indexes,
+                    List<Column> schemaColumns, DataSortInfo dataSortInfo, 
TCompressionType compressionType,
+                    String storagePolicy, boolean isInMemory, boolean isShadow,
+                    String tableName, long ttlSeconds, boolean 
enableUniqueKeyMergeOnWrite,
+                    boolean storeRowColumn, int schemaVersion, String 
compactionPolicy,
+                    Long timeSeriesCompactionGoalSizeMbytes, Long 
timeSeriesCompactionFileCountThreshold,
+                    Long timeSeriesCompactionTimeThresholdSeconds, Long 
timeSeriesCompactionEmptyRowsetsThreshold,
+                    Long timeSeriesCompactionLevelThreshold, boolean 
disableAutoCompaction,
+                    List<Integer> rowStoreColumnUniqueIds, boolean 
enableMowLightDelete,
+                    TInvertedIndexFileStorageFormat 
invertedIndexFileStorageFormat, long pageSize,
+                    boolean variantEnableFlattenNested, long storagePageSize) 
throws DdlException {
+                // Capture the actual format passed to createTabletMetaBuilder
+                capturedFormat.set(invertedIndexFileStorageFormat);
+                return OlapFile.TabletMetaCloudPB.newBuilder();
+            }
+        };
+        CloudInternalCatalog cloudCatalog = (CloudInternalCatalog) 
masterEnv.getInternalCatalog();
+        // Create MaterializedIndexMeta for base index
+        long baseIndexId = 2000L;
+        MaterializedIndexMeta indexMeta =
+                new MaterializedIndexMeta(
+                        baseIndexId,
+                        Lists.newArrayList(new Column("col1",
+                                PrimitiveType.INT)),
+                        0, // schema version
+                        100, // schema hash
+                        (short) 1, // short key column count
+                        TStorageType.COLUMN,
+                        KeysType.DUP_KEYS,
+                        new OriginStatement("CREATE TABLE test", 0) // origin 
stmt
+                );
+        Map<Long, MaterializedIndexMeta> indexIdToMeta = Maps.newHashMap();
+        indexIdToMeta.put(baseIndexId, indexMeta);
+
+        // Create a mock OlapTable with V2 format
+        new MockUp<OlapTable>() {
+            @Mock
+            public TInvertedIndexFileStorageFormat 
getInvertedIndexFileStorageFormat() {
+                return TInvertedIndexFileStorageFormat.V2; // Table originally 
has V2 format
+            }
+
+            @Mock
+            public long getId() {
+                return 1000L;
+            }
+
+            @Mock
+            public long getBaseIndexId() {
+                return baseIndexId;
+            }
+
+            @Mock
+            public String getStorageVaultId() {
+                return "vault_id";
+            }
+
+            @Mock
+            public String getStorageVaultName() {
+                return "vault_name";
+            }
+
+            @Mock
+            public String getName() {
+                return "test_table";
+            }
+
+            @Mock
+            public java.util.List<Index> getIndexes() {
+                return Lists.newArrayList();
+            }
+
+            @Mock
+            public TableProperty getTableProperty() {
+                return new TableProperty(Maps.newHashMap());
+            }
+
+            @Mock
+            public double getBfFpp() {
+                return 0.05;
+            }
+
+            @Mock
+            public DataSortInfo getDataSortInfo() {
+                return null;
+            }
+        };
+
+        try {
+            OlapTable table = new OlapTable();
+
+            // Call the actual createPartitionWithIndices method to test V2 
format behavior
+            cloudCatalog.createPartitionWithIndices(
+                    db.getId(), table, 3000L, "test_partition",
+                    indexIdToMeta, // Pass proper index metadata
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            // It's expected to fail in test environment, we only care about 
the format capture
+        }
+
+        // Verify that V2 table format remains V2 when config is enabled
+        Assert.assertEquals("V2 table format should remain V2 when config is 
enabled",
+                TInvertedIndexFileStorageFormat.V2, capturedFormat.get());
+
+        // Test with config disabled - V2 should still remain V2
+        capturedFormat.set(null); // Reset
+        Config.enable_new_partition_inverted_index_v2_format = false;
+
+        try {
+            OlapTable table = new OlapTable();
+            cloudCatalog.createPartitionWithIndices(
+                    db.getId(), table, 3001L, "test_partition2",
+                    indexIdToMeta, // Pass proper index metadata
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            // It's expected to fail in test environment, we only care about 
the format capture
+        }
+
+        // Verify that V2 table format remains V2 even when config is disabled
+        Assert.assertEquals("V2 table format should remain V2 when config is 
disabled",
+                TInvertedIndexFileStorageFormat.V2, capturedFormat.get());
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/InternalCatalogTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/InternalCatalogTest.java
new file mode 100644
index 00000000000..4b310b1dcf1
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/InternalCatalogTest.java
@@ -0,0 +1,584 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource;
+
+import org.apache.doris.analysis.DataSortInfo;
+import org.apache.doris.analysis.UserIdentity;
+import org.apache.doris.catalog.CatalogTestUtil;
+import org.apache.doris.catalog.Column;
+import org.apache.doris.catalog.DataProperty;
+import org.apache.doris.catalog.Database;
+import org.apache.doris.catalog.DistributionInfo;
+import org.apache.doris.catalog.Env;
+import org.apache.doris.catalog.EnvFactory;
+import org.apache.doris.catalog.FakeEditLog;
+import org.apache.doris.catalog.FakeEnv;
+import org.apache.doris.catalog.HashDistributionInfo;
+import org.apache.doris.catalog.Index;
+import org.apache.doris.catalog.KeysType;
+import org.apache.doris.catalog.MaterializedIndex;
+import org.apache.doris.catalog.MaterializedIndexMeta;
+import org.apache.doris.catalog.MetaIdGenerator.IdGeneratorBuffer;
+import org.apache.doris.catalog.OlapTable;
+import org.apache.doris.catalog.PrimitiveType;
+import org.apache.doris.catalog.Replica;
+import org.apache.doris.catalog.Replica.ReplicaState;
+import org.apache.doris.catalog.ReplicaAllocation;
+import org.apache.doris.catalog.TableProperty;
+import org.apache.doris.catalog.Tablet;
+import org.apache.doris.catalog.TabletMeta;
+import org.apache.doris.common.Config;
+import org.apache.doris.common.DdlException;
+import org.apache.doris.common.FeConstants;
+import org.apache.doris.common.MarkedCountDownLatch;
+import org.apache.doris.common.Status;
+import org.apache.doris.persist.EditLog;
+import org.apache.doris.qe.ConnectContext;
+import org.apache.doris.qe.OriginStatement;
+import org.apache.doris.system.Backend;
+import org.apache.doris.system.SystemInfoService;
+import org.apache.doris.task.CreateReplicaTask;
+import org.apache.doris.thrift.TInvertedIndexFileStorageFormat;
+import org.apache.doris.thrift.TStorageMedium;
+import org.apache.doris.thrift.TStorageType;
+import org.apache.doris.thrift.TTabletType;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import mockit.Invocation;
+import mockit.Mock;
+import mockit.MockUp;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.lang.reflect.InvocationTargetException;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+
+public class InternalCatalogTest {
+
+    private static final String TEST_DB_NAME = "test_db";
+    private static final String TEST_TABLE_NAME = "test_table";
+
+    private static FakeEditLog fakeEditLog;
+    private static FakeEnv fakeEnv;
+    private static Env masterEnv;
+    private static EditLog testEditLog;
+    private static Database db;
+    private ConnectContext ctx;
+
+    @Before
+    public void setUp() throws InstantiationException, IllegalAccessException, 
IllegalArgumentException,
+            InvocationTargetException, NoSuchMethodException, 
SecurityException {
+        FeConstants.runningUnitTest = true;
+        Config.enable_new_partition_inverted_index_v2_format = false;
+
+        EnvFactory envFactory = EnvFactory.getInstance();
+        masterEnv = envFactory.createEnv(false);
+        fakeEnv = new FakeEnv();
+
+        // Create SystemInfoService with a live backend
+        SystemInfoService systemInfoService = new SystemInfoService();
+        Backend backend = new Backend(0, "127.0.0.1", 9050);
+        backend.updateOnce(9060, 8040, 9070); // bePort, httpPort, beRpcPort
+        systemInfoService.addBackend(backend);
+
+        FakeEnv.setSystemInfo(systemInfoService);
+
+        fakeEditLog = new FakeEditLog();
+        testEditLog = null;
+        FakeEnv.setEnv(masterEnv);
+
+        ctx = new ConnectContext();
+        ctx.setEnv(masterEnv);
+        UserIdentity rootUser = new UserIdentity("root", "%");
+        rootUser.setIsAnalyzed();
+        ctx.setCurrentUserIdentity(rootUser);
+        ctx.setThreadLocalInfo();
+
+        new MockUp<Env>() {
+            @Mock
+            public Env getCurrentEnv() {
+                return masterEnv;
+            }
+
+            @Mock
+            public EditLog getEditLog() {
+                if (testEditLog == null) {
+                    testEditLog = new EditLog("test") {
+                    };
+                }
+                return testEditLog;
+            }
+        };
+
+        db = new Database(CatalogTestUtil.testDbId1, TEST_DB_NAME);
+        masterEnv.unprotectCreateDb(db);
+    }
+
+    @Test
+    public void testMixedFormatPartitions() throws Exception {
+        // Test: Old partitions keep V1, new partitions use V2 when config is 
enabled
+
+        // Step 1: Create initial partition with V1 format (config disabled)
+        Config.enable_new_partition_inverted_index_v2_format = false;
+        Map<Long, TInvertedIndexFileStorageFormat> partitionFormats = 
Maps.newHashMap();
+
+        // Mock MarkedCountDownLatch to immediately return success
+        new MockUp<MarkedCountDownLatch>() {
+            @Mock
+            public boolean await(long time, java.util.concurrent.TimeUnit 
unit) {
+                return true; // Immediately return success
+            }
+
+            @Mock
+            public Status getStatus() {
+                return Status.OK;
+            }
+        };
+
+        new MockUp<InternalCatalog>() {
+            @Mock
+            public TStorageMedium createTablets(MaterializedIndex index, 
ReplicaState replicaState,
+                    DistributionInfo distributionInfo, long version, 
ReplicaAllocation replicaAlloc,
+                    TabletMeta tabletMeta, Set<Long> tabletIdSet, 
IdGeneratorBuffer idGeneratorBuffer,
+                    boolean isStorageMediumSpecified) throws DdlException {
+                Tablet tablet = new org.apache.doris.catalog.Tablet(10001);
+                Replica replica = new Replica(10031, 0, 0, replicaState);
+                tablet.addReplica(replica, true);
+                index.addTablet(tablet, tabletMeta);
+                tabletIdSet.add(tablet.getId());
+                return TStorageMedium.HDD;
+            }
+        };
+        // Mock CreateReplicaTask to capture the format set for each partition
+        new MockUp<CreateReplicaTask>() {
+            @Mock
+            public void setInvertedIndexFileStorageFormat(Invocation inv, 
TInvertedIndexFileStorageFormat format) {
+                // Capture the format for this partition
+                // We'll use a simple approach to capture the format without 
calling the real method
+                // since we're in a mock context
+                CreateReplicaTask self = inv.getInvokedInstance();
+                long pid = self.getPartitionId();
+                partitionFormats.put(pid, format); // Use a default key for now
+            }
+        };
+
+        InternalCatalog internalCatalog = (InternalCatalog) 
masterEnv.getInternalCatalog();
+
+        // Create MaterializedIndexMeta for base index
+        long baseIndexId = 2000L;
+        MaterializedIndexMeta indexMeta =
+                new MaterializedIndexMeta(
+                        baseIndexId,
+                        Lists.newArrayList(new Column("col1",
+                                PrimitiveType.INT)),
+                        0,
+                        100,
+                        (short) 1,
+                        TStorageType.COLUMN,
+                        KeysType.DUP_KEYS,
+                        new OriginStatement("CREATE TABLE test", 0)
+                );
+        Map<Long, MaterializedIndexMeta> indexIdToMeta = Maps.newHashMap();
+        indexIdToMeta.put(baseIndexId, indexMeta);
+
+        // Mock OlapTable with V1 format
+        new MockUp<OlapTable>() {
+            @Mock
+            public TInvertedIndexFileStorageFormat 
getInvertedIndexFileStorageFormat() {
+                return TInvertedIndexFileStorageFormat.V1;
+            }
+
+            @Mock
+            public long getId() {
+                return 1000L;
+            }
+
+            @Mock
+            public long getBaseIndexId() {
+                return baseIndexId;
+            }
+
+            @Mock
+            public String getName() {
+                return "test_table";
+            }
+
+            @Mock
+            public java.util.List<Index> getIndexes() {
+                return Lists.newArrayList();
+            }
+
+            @Mock
+            public TableProperty getTableProperty() {
+                return new TableProperty(Maps.newHashMap());
+            }
+
+            @Mock
+            public double getBfFpp() {
+                return 0.05;
+            }
+
+            @Mock
+            public DataSortInfo getDataSortInfo() {
+                return null;
+            }
+        };
+
+        // Create initial partition
+        long partition1Id = 3000L;
+        try {
+            OlapTable table = new OlapTable();
+            internalCatalog.createPartitionWithIndices(
+                    db.getId(), table, partition1Id, "p1",
+                    indexIdToMeta,
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            e.printStackTrace();
+            // Expected in test environment
+        }
+
+        // Verify partition1 uses V1 format (config was disabled)
+        Assert.assertEquals("First partition should use V1 format when config 
is disabled",
+                TInvertedIndexFileStorageFormat.V1, 
partitionFormats.get(partition1Id));
+
+        // Step 2: Enable config and create new partition
+        Config.enable_new_partition_inverted_index_v2_format = true;
+
+        long partition2Id = 3001L;
+        try {
+            OlapTable table = new OlapTable();
+            internalCatalog.createPartitionWithIndices(
+                    db.getId(), table, partition2Id, "p2",
+                    indexIdToMeta,
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            e.printStackTrace();
+            // Expected in test environment
+        }
+
+        // Step 3: Verify mixed formats
+        Assert.assertEquals("First partition should still be V1",
+                TInvertedIndexFileStorageFormat.V1, 
partitionFormats.get(partition1Id));
+        Assert.assertEquals("Second partition should be upgraded to V2",
+                TInvertedIndexFileStorageFormat.V2, 
partitionFormats.get(partition2Id));
+    }
+
+    @Test
+    public void testV1FormatRemainsWhenConfigDisabled() throws Exception {
+        // Test: V1 table format should remain V1 when config is disabled
+        Config.enable_new_partition_inverted_index_v2_format = false;
+
+        AtomicReference<TInvertedIndexFileStorageFormat> capturedFormat = new 
AtomicReference<>();
+
+        // Mock MarkedCountDownLatch to immediately return success
+        new MockUp<MarkedCountDownLatch>() {
+            @Mock
+            public boolean await(long time, java.util.concurrent.TimeUnit 
unit) {
+                return true; // Immediately return success
+            }
+
+            @Mock
+            public Status getStatus() {
+                return Status.OK;
+            }
+        };
+
+        new MockUp<InternalCatalog>() {
+            @Mock
+            public TStorageMedium createTablets(MaterializedIndex index, 
ReplicaState replicaState,
+                    DistributionInfo distributionInfo, long version, 
ReplicaAllocation replicaAlloc,
+                    TabletMeta tabletMeta, Set<Long> tabletIdSet, 
IdGeneratorBuffer idGeneratorBuffer,
+                    boolean isStorageMediumSpecified) throws DdlException {
+                Tablet tablet = new org.apache.doris.catalog.Tablet(10001);
+                Replica replica = new Replica(10031, 0, 0, replicaState);
+                tablet.addReplica(replica, true);
+                index.addTablet(tablet, tabletMeta);
+                tabletIdSet.add(tablet.getId());
+                return TStorageMedium.HDD;
+            }
+        };
+        // Mock CreateReplicaTask to capture the format set for each partition
+        new MockUp<CreateReplicaTask>() {
+            @Mock
+            public void setInvertedIndexFileStorageFormat(Invocation inv, 
TInvertedIndexFileStorageFormat format) {
+                // Capture the format for this partition
+                // We'll use a simple approach to capture the format without 
calling the real method
+                // since we're in a mock context
+                capturedFormat.set(format); // Use a default key for now
+            }
+        };
+
+        InternalCatalog internalCatalog = (InternalCatalog) 
masterEnv.getInternalCatalog();
+
+        // Create MaterializedIndexMeta for base index
+        long baseIndexId = 2000L;
+        MaterializedIndexMeta indexMeta =
+                new MaterializedIndexMeta(
+                        baseIndexId,
+                        Lists.newArrayList(new Column("col1",
+                                PrimitiveType.INT)),
+                        0,
+                        100,
+                        (short) 1,
+                        TStorageType.COLUMN,
+                        KeysType.DUP_KEYS,
+                        new OriginStatement("CREATE TABLE test", 0)
+                );
+        Map<Long, MaterializedIndexMeta> indexIdToMeta = Maps.newHashMap();
+        indexIdToMeta.put(baseIndexId, indexMeta);
+
+        // Create a mock OlapTable with V1 format
+        new MockUp<OlapTable>() {
+            @Mock
+            public TInvertedIndexFileStorageFormat 
getInvertedIndexFileStorageFormat() {
+                return TInvertedIndexFileStorageFormat.V1;
+            }
+
+            @Mock
+            public long getId() {
+                return 1000L;
+            }
+
+            @Mock
+            public long getBaseIndexId() {
+                return baseIndexId;
+            }
+
+            @Mock
+            public String getName() {
+                return "test_table";
+            }
+
+            @Mock
+            public java.util.List<Index> getIndexes() {
+                return Lists.newArrayList();
+            }
+
+            @Mock
+            public TableProperty getTableProperty() {
+                return new TableProperty(Maps.newHashMap());
+            }
+
+            @Mock
+            public double getBfFpp() {
+                return 0.05;
+            }
+
+            @Mock
+            public DataSortInfo getDataSortInfo() {
+                return null;
+            }
+        };
+
+        try {
+            OlapTable table = new OlapTable();
+
+            // Call the actual createPartitionWithIndices method to test no 
upgrade when config disabled
+            internalCatalog.createPartitionWithIndices(
+                    db.getId(), table, 3000L, "test_partition",
+                    indexIdToMeta,
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            e.printStackTrace();
+            // It's expected to fail in test environment, we only care about 
the format capture
+        }
+
+        // Verify that V1 table format remains V1 when config is disabled
+        Assert.assertEquals("V1 table format should remain V1 when config is 
disabled",
+                TInvertedIndexFileStorageFormat.V1, capturedFormat.get());
+    }
+
+    @Test
+    public void testV2TableFormatBehavior() throws Exception {
+        // Test V2 table format behavior - should remain V2 regardless of 
config
+        Config.enable_new_partition_inverted_index_v2_format = true;
+        AtomicReference<TInvertedIndexFileStorageFormat> capturedFormat = new 
AtomicReference<>();
+
+        // Mock MarkedCountDownLatch to immediately return success
+        new MockUp<MarkedCountDownLatch>() {
+            @Mock
+            public boolean await(long time, java.util.concurrent.TimeUnit 
unit) {
+                return true; // Immediately return success
+            }
+
+            @Mock
+            public Status getStatus() {
+                return Status.OK;
+            }
+        };
+
+        new MockUp<InternalCatalog>() {
+            @Mock
+            public TStorageMedium createTablets(MaterializedIndex index, 
ReplicaState replicaState,
+                    DistributionInfo distributionInfo, long version, 
ReplicaAllocation replicaAlloc,
+                    TabletMeta tabletMeta, Set<Long> tabletIdSet, 
IdGeneratorBuffer idGeneratorBuffer,
+                    boolean isStorageMediumSpecified) throws DdlException {
+                Tablet tablet = new org.apache.doris.catalog.Tablet(10001);
+                Replica replica = new Replica(10031, 0, 0, replicaState);
+                tablet.addReplica(replica, true);
+                index.addTablet(tablet, tabletMeta);
+                tabletIdSet.add(tablet.getId());
+                return TStorageMedium.HDD;
+            }
+        };
+        // Mock CreateReplicaTask to capture the format set for each partition
+        new MockUp<CreateReplicaTask>() {
+            @Mock
+            public void setInvertedIndexFileStorageFormat(Invocation inv, 
TInvertedIndexFileStorageFormat format) {
+                // Capture the format for this partition
+                // We'll use a simple approach to capture the format without 
calling the real method
+                // since we're in a mock context
+                capturedFormat.set(format); // Use a default key for now
+            }
+        };
+
+        InternalCatalog internalCatalog = (InternalCatalog) 
masterEnv.getInternalCatalog();
+
+        // Create MaterializedIndexMeta for base index
+        long baseIndexId = 2000L;
+        MaterializedIndexMeta indexMeta =
+                new MaterializedIndexMeta(
+                        baseIndexId,
+                        Lists.newArrayList(new Column("col1",
+                                PrimitiveType.INT)),
+                        0,
+                        100,
+                        (short) 1,
+                        TStorageType.COLUMN,
+                        KeysType.DUP_KEYS,
+                        new OriginStatement("CREATE TABLE test", 0)
+                );
+        Map<Long, MaterializedIndexMeta> indexIdToMeta = Maps.newHashMap();
+        indexIdToMeta.put(baseIndexId, indexMeta);
+
+        // Create a mock OlapTable with V2 format
+        new MockUp<OlapTable>() {
+            @Mock
+            public TInvertedIndexFileStorageFormat 
getInvertedIndexFileStorageFormat() {
+                return TInvertedIndexFileStorageFormat.V2;
+            }
+
+            @Mock
+            public long getId() {
+                return 1000L;
+            }
+
+            @Mock
+            public long getBaseIndexId() {
+                return baseIndexId;
+            }
+
+            @Mock
+            public String getName() {
+                return "test_table";
+            }
+
+            @Mock
+            public java.util.List<Index> getIndexes() {
+                return Lists.newArrayList();
+            }
+
+            @Mock
+            public TableProperty getTableProperty() {
+                return new TableProperty(Maps.newHashMap());
+            }
+
+            @Mock
+            public double getBfFpp() {
+                return 0.05;
+            }
+
+            @Mock
+            public DataSortInfo getDataSortInfo() {
+                return null;
+            }
+        };
+
+        try {
+            OlapTable table = new OlapTable();
+
+            // Call the actual createPartitionWithIndices method to test V2 
format behavior
+            internalCatalog.createPartitionWithIndices(
+                    db.getId(), table, 3000L, "test_partition",
+                    indexIdToMeta,
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            e.printStackTrace();
+            // It's expected to fail in test environment, we only care about 
the format capture
+        }
+
+        // Verify that V2 table format remains V2 when config is enabled
+        Assert.assertEquals("V2 table format should remain V2 when config is 
enabled",
+                TInvertedIndexFileStorageFormat.V2, capturedFormat.get());
+
+        // Test with config disabled - V2 should still remain V2
+        capturedFormat.set(null); // Reset
+        Config.enable_new_partition_inverted_index_v2_format = false;
+
+        try {
+            OlapTable table = new OlapTable();
+            internalCatalog.createPartitionWithIndices(
+                    db.getId(), table, 3001L, "test_partition2",
+                    indexIdToMeta,
+                    new HashDistributionInfo(1, Lists.newArrayList()),
+                    new DataProperty(TStorageMedium.HDD),
+                    new ReplicaAllocation((short) 1),
+                    1L, Sets.newHashSet(), Sets.newHashSet(),
+                    false,
+                    TTabletType.TABLET_TYPE_DISK,
+                    "", null, null, false);
+        } catch (Exception e) {
+            e.printStackTrace();
+            // It's expected to fail in test environment, we only care about 
the format capture
+        }
+
+        // Verify that V2 table format remains V2 even when config is disabled
+        Assert.assertEquals("V2 table format should remain V2 when config is 
disabled",
+                TInvertedIndexFileStorageFormat.V2, capturedFormat.get());
+    }
+
+}
diff --git 
a/regression-test/data/inverted_index_p0/test_inverted_index_storage_format_upgrade.out
 
b/regression-test/data/inverted_index_p0/test_inverted_index_storage_format_upgrade.out
new file mode 100644
index 00000000000..b812262f0e1
Binary files /dev/null and 
b/regression-test/data/inverted_index_p0/test_inverted_index_storage_format_upgrade.out
 differ
diff --git 
a/regression-test/suites/inverted_index_p0/test_inverted_index_storage_format_upgrade.groovy
 
b/regression-test/suites/inverted_index_p0/test_inverted_index_storage_format_upgrade.groovy
new file mode 100644
index 00000000000..230ef1e078d
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/test_inverted_index_storage_format_upgrade.groovy
@@ -0,0 +1,409 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_inverted_index_storage_format_upgrade", "nonConcurrent") {
+    def tableName = "test_inverted_index_format_upgrade"
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort)
+    
+    def show_nested_index_file_on_tablet = { ip, port, tablet ->
+        return http_client("GET", 
String.format("http://%s:%s/api/show_nested_index_file?tablet_id=%s";, ip, port, 
tablet))
+    }
+    
+    def get_tablet_inverted_index_format = { tblName, partitionName = null ->
+        def targetTablet = null
+        if (partitionName != null) {
+            def tablets = sql_return_maparray """ show tablets from ${tblName} 
partition ${partitionName}; """
+            logger.info("tablets: ${tablets}")
+            if (tablets.size() >= 0) {
+                targetTablet = tablets[0]
+            }
+        } else {
+            def tablets = sql_return_maparray """ show tablets from 
${tblName}; """
+            logger.info("tablets: ${tablets}")
+            if (tablets.size() >= 0) {
+                targetTablet = tablets[0]
+            }
+        }
+        
+        if (targetTablet == null) {
+            logger.error("No tablet found for table: ${tblName}, partition: 
${partitionName}")
+            return null
+        }
+        
+        String tablet_id = targetTablet.TabletId
+        String backend_id = targetTablet.BackendId
+        String ip = backendId_to_backendIP.get(backend_id)
+        String port = backendId_to_backendHttpPort.get(backend_id)
+        
+        def (code, out, err) = show_nested_index_file_on_tablet(ip, port, 
tablet_id)
+        logger.info("Get tablet inverted index format: tablet_id=${tablet_id}, 
partition=${partitionName}, code=" + code)
+        
+        if (code == 0 && out != null) {
+            def jsonResponse = parseJson(out.trim())
+            if (jsonResponse.rowsets != null && jsonResponse.rowsets.size() > 
0) {
+                // Return the format from the first rowset
+                def format = jsonResponse.rowsets[0].index_storage_format
+                logger.info("Tablet ${tablet_id} in partition ${partitionName} 
has format: ${format}")
+                return format
+            }
+        }
+        
+        logger.warn("Could not determine format for tablet ${tablet_id}")
+        return null
+    }
+    
+    def get_fe_config = { key ->
+        def result = sql "SHOW FRONTEND CONFIG LIKE '${key}'"
+        if (result.size() > 0) {
+            return result[0][1]
+        }
+        return null
+    }
+
+    def set_fe_config = { key, value ->
+        sql "ADMIN SET FRONTEND CONFIG ('${key}' = '${value}')"
+        // Wait a bit for config to take effect
+        sleep(2000)
+    }
+
+    def getJobState = { tblName ->
+        def jobStateResult = sql """  SHOW ALTER TABLE COLUMN WHERE 
TableName='${tblName}' ORDER BY createtime DESC LIMIT 1 """
+        return jobStateResult.size() > 0 ? jobStateResult[0][9] : "FINISHED"
+    }
+
+    def waitForJob = { tblName ->
+        int max_try_secs = 60
+        while (max_try_secs--) {
+            String res = getJobState(tblName)
+            if (res == "FINISHED" || res == "CANCELLED") {
+                assertEquals("FINISHED", res)
+                sleep(3000)
+                break
+            } else {
+                Thread.sleep(1000)
+                if (max_try_secs < 1) {
+                    println "test timeout," + "state:" + res
+                    assertEquals("FINISHED", res)
+                }
+            }
+        }
+    }
+
+    def originalConfigValue
+    try {
+        // Get original config value to restore later
+        originalConfigValue = 
get_fe_config("enable_new_partition_inverted_index_v2_format")
+        logger.info("Original enable_new_partition_inverted_index_v2_format 
value: ${originalConfigValue}")
+
+        // Test 1: Mixed format partitions - V1 table with config 
disabled/enabled
+        logger.info("=== Test 1: Mixed Format Partitions ===")
+        
+        // Step 1: Disable config, create table with V1 format
+        set_fe_config("enable_new_partition_inverted_index_v2_format", "false")
+        sql "DROP TABLE IF EXISTS ${tableName}"
+        
+        sql """
+            CREATE TABLE ${tableName} (
+                id int(11) NOT NULL,
+                name varchar(255) NOT NULL,
+                description text,
+                score int(11),
+                INDEX idx_name (name) USING INVERTED,
+                INDEX idx_description (description) USING INVERTED 
PROPERTIES("parser"="english"),
+                INDEX idx_score (score) USING INVERTED
+            )
+            DUPLICATE KEY(id)
+            PARTITION BY RANGE(id) (
+                PARTITION p1 VALUES [("1"), ("100"))
+            )
+            DISTRIBUTED BY HASH(id) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "inverted_index_storage_format" = "V1"
+            );
+        """
+        
+        // Verify table was created with V1 format
+        def tableInfo = sql "SHOW CREATE TABLE ${tableName}"
+        assertTrue(tableInfo[0][1].contains("inverted_index_storage_format") 
&& tableInfo[0][1].contains("V1"))
+        logger.info("Table created with V1 format successfully")
+        
+        // Insert some test data
+        sql "INSERT INTO ${tableName} VALUES (1, 'alice', 'alice loves 
programming', 95)"
+        sql "INSERT INTO ${tableName} VALUES (2, 'bob', 'bob enjoys coding', 
88)"
+        sql "INSERT INTO ${tableName} VALUES (50, 'charlie', 'charlie studies 
algorithms', 92)"
+        
+        // Sync data to ensure tablets are created
+        sql "SELECT * FROM ${tableName};"
+        
+        // Verify inverted index works with V1 format
+        qt_sql_v1_original "SELECT * FROM ${tableName} WHERE name MATCH 
'alice' ORDER BY id"
+        qt_sql_v1_original_text "SELECT * FROM ${tableName} WHERE description 
MATCH 'programming' ORDER BY id"
+        qt_sql_v1_original_numeric "SELECT * FROM ${tableName} WHERE score = 
95 ORDER BY id"
+        
+        // Verify p1 partition uses V1 format through API
+        def p1_format = get_tablet_inverted_index_format(tableName, "p1")
+        assertEquals("V1", p1_format)
+        
+        // Step 2: Enable config for V2 format upgrade  
+        set_fe_config("enable_new_partition_inverted_index_v2_format", "true")
+        
+        // Add new partition - should use V2 format due to config upgrade
+        sql "ALTER TABLE ${tableName} ADD PARTITION p2 VALUES [('100'), 
('200'))"
+        waitForJob(tableName)
+        
+        // Insert data into new partition
+        sql "INSERT INTO ${tableName} VALUES (150, 'david', 'david develops 
applications', 89)"
+        sql "INSERT INTO ${tableName} VALUES (180, 'eve', 'eve explores 
databases', 94)"
+        
+        // Sync data 
+        sql "SELECT * FROM ${tableName};"
+        
+        // Verify both partitions work correctly
+        qt_sql_mixed_all "SELECT * FROM ${tableName} ORDER BY id"
+        qt_sql_mixed_p1 "SELECT * FROM ${tableName} WHERE id < 100 AND name 
MATCH 'alice' ORDER BY id"
+        qt_sql_mixed_p2 "SELECT * FROM ${tableName} WHERE id >= 100 AND name 
MATCH 'david' ORDER BY id"
+        qt_sql_mixed_text "SELECT * FROM ${tableName} WHERE description MATCH 
'programming databases' ORDER BY id"
+        
+        // Verify formats through API - this is the key validation
+        def p1_format_after = get_tablet_inverted_index_format(tableName, "p1")
+        def p2_format = get_tablet_inverted_index_format(tableName, "p2")
+        
+        assertEquals("V1", p1_format_after) // p1 should still be V1
+        assertEquals("V2", p2_format)       // p2 should be upgraded to V2
+        
+        logger.info("Mixed partition format test completed successfully - p1: 
${p1_format_after}, p2: ${p2_format}")
+        
+        // Test 2: V1 format preservation when config is disabled
+        logger.info("=== Test 2: V1 Format Preservation ===")
+        
+        def tableName2 = "${tableName}_v1_preserve"
+        set_fe_config("enable_new_partition_inverted_index_v2_format", "false")
+        
+        sql "DROP TABLE IF EXISTS ${tableName2}"
+        sql """
+            CREATE TABLE ${tableName2} (
+                id int(11) NOT NULL,
+                content varchar(255),
+                INDEX idx_content (content) USING INVERTED 
PROPERTIES("parser"="unicode")
+            )
+            DUPLICATE KEY(id)
+            PARTITION BY RANGE(id) (
+                PARTITION p1 VALUES [("1"), ("100"))
+            )
+            DISTRIBUTED BY HASH(id) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "inverted_index_storage_format" = "V1"
+            );
+        """
+        
+        sql "INSERT INTO ${tableName2} VALUES (1, 'test content for V1 
format')"
+        
+        // Sync data
+        sql "SELECT * FROM ${tableName2};"
+        
+        qt_sql_v1_preserve "SELECT * FROM ${tableName2} WHERE content MATCH 
'test' ORDER BY id"
+        
+        // Verify initial partition format
+        def initial_format = get_tablet_inverted_index_format(tableName2, "p1")
+        assertEquals("V1", initial_format)
+        
+        // Verify V1 format is preserved when adding partitions with config 
disabled
+        sql "ALTER TABLE ${tableName2} ADD PARTITION p2 VALUES [('100'), 
('200'))"
+        waitForJob(tableName2)
+        
+        sql "INSERT INTO ${tableName2} VALUES (150, 'new content in second 
partition')"
+        
+        // Sync data
+        sql "SELECT * FROM ${tableName2};"
+        
+        qt_sql_v1_preserve_new "SELECT * FROM ${tableName2} WHERE content 
MATCH 'content' ORDER BY id"
+        
+        // Verify both partitions use V1 format when config is disabled  
+        def p1_v1_format = get_tablet_inverted_index_format(tableName2, "p1")  
// initial partition
+        def p2_v1_format = get_tablet_inverted_index_format(tableName2, "p2")
+        
+        // Both should be V1 since config is disabled
+        assertEquals("V1", p1_v1_format)
+        assertEquals("V1", p2_v1_format)
+        
+        logger.info("V1 format preservation test completed successfully")
+        
+        // Test 3: V2 format behavior - remains V2 regardless of config
+        logger.info("=== Test 3: V2 Format Behavior ===")
+        
+        def tableName3 = "${tableName}_v2_behavior"
+        set_fe_config("enable_new_partition_inverted_index_v2_format", "true")
+        
+        sql "DROP TABLE IF EXISTS ${tableName3}"
+        sql """
+            CREATE TABLE ${tableName3} (
+                id int(11) NOT NULL,
+                content varchar(255),
+                INDEX idx_content (content) USING INVERTED 
PROPERTIES("parser"="unicode")
+            )
+            DUPLICATE KEY(id)
+            PARTITION BY RANGE(id) (
+                PARTITION p1 VALUES [("1"), ("100"))
+            )
+            DISTRIBUTED BY HASH(id) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "inverted_index_storage_format" = "V2"
+            );
+        """
+        
+        sql "INSERT INTO ${tableName3} VALUES (1, 'test content for V2 
format')"
+        
+        // Sync data
+        sql "SELECT * FROM ${tableName3};"
+        
+        qt_sql_v2_enabled "SELECT * FROM ${tableName3} WHERE content MATCH 
'test' ORDER BY id"
+        
+        // Verify initial partition uses V2 format
+        def p1_v2_format = get_tablet_inverted_index_format(tableName3, "p1")
+        assertEquals("V2", p1_v2_format)
+        
+        // Disable config and verify V2 format is still preserved
+        set_fe_config("enable_new_partition_inverted_index_v2_format", "false")
+        
+        sql "ALTER TABLE ${tableName3} ADD PARTITION p2 VALUES [('100'), 
('200'))"
+        waitForJob(tableName3)
+        
+        sql "INSERT INTO ${tableName3} VALUES (150, 'new content should still 
use V2')"
+        
+        // Sync data
+        sql "SELECT * FROM ${tableName3};"
+        
+        qt_sql_v2_disabled_config "SELECT * FROM ${tableName3} WHERE content 
MATCH 'content' ORDER BY id"
+        
+        // Verify both partitions still use V2 format even when config is 
disabled
+        def p1_v2_after = get_tablet_inverted_index_format(tableName3, "p1")  
+        def p2_v2_format = get_tablet_inverted_index_format(tableName3, "p2")
+        
+        assertEquals("V2", p1_v2_after) // p1 should remain V2
+        assertEquals("V2", p2_v2_format) // p2 should also be V2 (V2 table 
format preserved)
+        
+        // Verify table info still shows V2 format
+        def tableInfo3 = sql "SHOW CREATE TABLE ${tableName3}"
+        assertTrue(tableInfo3[0][1].contains("inverted_index_storage_format") 
&& tableInfo3[0][1].contains("V2"))
+        
+        logger.info("V2 format behavior test completed successfully")
+        
+        // Test 4: Performance comparison between formats (basic functionality 
test)
+        logger.info("=== Test 4: Format Functionality Verification ===")
+        
+        def testData = [
+            [1, "apple", "red apple is sweet"],
+            [2, "banana", "yellow banana is nutritious"],
+            [3, "cherry", "red cherry is sour"],
+            [4, "date", "brown date is sweet"],
+            [5, "elderberry", "purple elderberry is rare"]
+        ]
+        
+        // Test with V1 format
+        set_fe_config("enable_new_partition_inverted_index_v2_format", "false")
+        def tableName4V1 = "${tableName}_func_v1"
+        sql "DROP TABLE IF EXISTS ${tableName4V1}"
+        sql """
+            CREATE TABLE ${tableName4V1} (
+                id int(11) NOT NULL,
+                name varchar(255),
+                description text,
+                INDEX idx_name (name) USING INVERTED,
+                INDEX idx_description (description) USING INVERTED 
PROPERTIES("parser"="english")
+            )
+            DUPLICATE KEY(id)
+            DISTRIBUTED BY HASH(id) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "inverted_index_storage_format" = "V1"
+            );
+        """
+        
+        // Insert test data for V1
+        for (data in testData) {
+            sql "INSERT INTO ${tableName4V1} VALUES (${data[0]}, '${data[1]}', 
'${data[2]}')"
+        }
+        
+        // Sync data
+        sql "SELECT * FROM ${tableName4V1};"
+        
+        // Test with V2 format
+        set_fe_config("enable_new_partition_inverted_index_v2_format", "true")
+        def tableName4V2 = "${tableName}_func_v2"
+        sql "DROP TABLE IF EXISTS ${tableName4V2}"
+        sql """
+            CREATE TABLE ${tableName4V2} (
+                id int(11) NOT NULL,
+                name varchar(255),
+                description text,
+                INDEX idx_name (name) USING INVERTED,
+                INDEX idx_description (description) USING INVERTED 
PROPERTIES("parser"="english")
+            )
+            DUPLICATE KEY(id)
+            DISTRIBUTED BY HASH(id) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "inverted_index_storage_format" = "V2"
+            );
+        """
+        
+        // Insert test data for V2
+        for (data in testData) {
+            sql "INSERT INTO ${tableName4V2} VALUES (${data[0]}, '${data[1]}', 
'${data[2]}')"
+        }
+        
+        // Sync data
+        sql "SELECT * FROM ${tableName4V2};"
+        
+        // Verify formats through API
+        def v1_format = get_tablet_inverted_index_format(tableName4V1)
+        def v2_format = get_tablet_inverted_index_format(tableName4V2)
+        
+        assertEquals("V1", v1_format)
+        assertEquals("V2", v2_format)
+        
+        // Compare functionality between V1 and V2
+        qt_sql_v1_func "SELECT * FROM ${tableName4V1} WHERE name MATCH 'apple' 
ORDER BY id"
+        qt_sql_v2_func "SELECT * FROM ${tableName4V2} WHERE name MATCH 'apple' 
ORDER BY id"
+        
+        qt_sql_v1_text_func "SELECT * FROM ${tableName4V1} WHERE description 
MATCH 'sweet' ORDER BY id"
+        qt_sql_v2_text_func "SELECT * FROM ${tableName4V2} WHERE description 
MATCH 'sweet' ORDER BY id"
+        
+        logger.info("Format functionality verification test completed 
successfully")
+        
+        // Clean up test tables
+        sql "DROP TABLE IF EXISTS ${tableName}"
+        sql "DROP TABLE IF EXISTS ${tableName}_v1_preserve"
+        sql "DROP TABLE IF EXISTS ${tableName}_v2_behavior"  
+        sql "DROP TABLE IF EXISTS ${tableName4V1}"
+        sql "DROP TABLE IF EXISTS ${tableName4V2}"
+        
+        logger.info("All inverted index storage format upgrade tests completed 
successfully")
+        
+    } finally {
+        // Restore original config
+        if (originalConfigValue != null) {
+            set_fe_config("enable_new_partition_inverted_index_v2_format", 
originalConfigValue)
+            logger.info("Restored 
enable_new_partition_inverted_index_v2_format to: ${originalConfigValue}")
+        }
+    }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to