This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 29942322d23 [Pick](Variant) pick some fix (#37926)
29942322d23 is described below

commit 29942322d2311c51895ff8f41313f596425a0d1f
Author: lihangyu <15605149...@163.com>
AuthorDate: Wed Jul 17 09:41:51 2024 +0800

    [Pick](Variant) pick some fix (#37926)
    
    #37674
    #37839
    #37883
    #37857
    #37794
    #37640
    #37557
    #37217
    #37121
---
 be/src/cloud/cloud_meta_mgr.cpp                    |   7 +-
 be/src/cloud/cloud_tablet.cpp                      |   7 +-
 be/src/common/config.cpp                           |   1 +
 be/src/common/config.h                             |   2 +
 be/src/olap/rowset/segment_v2/column_reader.cpp    |   6 +
 be/src/vec/columns/column_object.cpp               | 129 +++++++++----
 be/src/vec/columns/column_object.h                 | 184 ++++++++++++++----
 be/src/vec/common/schema_util.cpp                  |  27 ++-
 be/src/vec/common/schema_util.h                    |   1 +
 .../data_types/serde/data_type_object_serde.cpp    |  10 +
 .../vec/data_types/serde/data_type_object_serde.h  |   4 +-
 be/src/vec/json/parse2column.cpp                   |  10 +-
 cloud/src/meta-service/meta_service.cpp            |   5 +-
 .../java/org/apache/doris/catalog/OlapTable.java   |  50 +++++
 .../common/proc/RemoteIndexSchemaProcDir.java      |   4 +-
 .../common/proc/RemoteIndexSchemaProcNode.java     |   9 +
 .../common/util/FetchRemoteTabletSchemaUtil.java   |   2 +
 .../java/org/apache/doris/qe/SessionVariable.java  |   5 +
 .../apache/doris/qe/ShortCircuitQueryContext.java  |  12 +-
 .../export_p0/test_outfile_csv_variant_type.out    |  41 ++++
 .../data/point_query_p0/test_point_query.out       | 146 ++++----------
 regression-test/data/variant_p0/desc.out           |   8 +
 regression-test/data/variant_p0/rqg/rqg1.out       | 215 +++++++++++++++++++++
 .../export_p0/test_outfile_csv_variant_type.groovy | 141 ++++++++++++++
 .../suites/point_query_p0/test_point_query.groovy  |  24 +++
 .../suites/prepared_stmt_p0/prepared_stmt.groovy   |   4 +-
 .../suites/variant_p0/delete_update.groovy         |   4 +-
 regression-test/suites/variant_p0/desc.groovy      |  20 ++
 regression-test/suites/variant_p0/mtmv.groovy      |  95 +++++++++
 regression-test/suites/variant_p0/rqg/load.groovy  |  23 +++
 regression-test/suites/variant_p0/rqg/rqg1.sql     |  37 ++++
 31 files changed, 1027 insertions(+), 206 deletions(-)

diff --git a/be/src/cloud/cloud_meta_mgr.cpp b/be/src/cloud/cloud_meta_mgr.cpp
index 732f3023e91..ce40f7ed539 100644
--- a/be/src/cloud/cloud_meta_mgr.cpp
+++ b/be/src/cloud/cloud_meta_mgr.cpp
@@ -747,7 +747,12 @@ Status CloudMetaMgr::update_tmp_rowset(const RowsetMeta& 
rs_meta) {
     CreateRowsetResponse resp;
     req.set_cloud_unique_id(config::cloud_unique_id);
 
-    RowsetMetaPB rs_meta_pb = rs_meta.get_rowset_pb(true);
+    // Variant schema maybe updated, so we need to update the schema as well.
+    // The updated rowset meta after `rowset->merge_rowset_meta` in 
`BaseTablet::update_delete_bitmap`
+    // will be lost in `update_tmp_rowset` if skip_schema.So in order to keep 
the latest schema we should keep schema in update_tmp_rowset
+    // for variant type
+    bool skip_schema = rs_meta.tablet_schema()->num_variant_columns() == 0;
+    RowsetMetaPB rs_meta_pb = rs_meta.get_rowset_pb(skip_schema);
     doris_rowset_meta_to_cloud(req.mutable_rowset_meta(), 
std::move(rs_meta_pb));
     Status st =
             retry_rpc("update committed rowset", req, &resp, 
&MetaService_Stub::update_tmp_rowset);
diff --git a/be/src/cloud/cloud_tablet.cpp b/be/src/cloud/cloud_tablet.cpp
index 5f77c7f44c1..d2596d8a7d2 100644
--- a/be/src/cloud/cloud_tablet.cpp
+++ b/be/src/cloud/cloud_tablet.cpp
@@ -427,8 +427,11 @@ Result<std::unique_ptr<RowsetWriter>> 
CloudTablet::create_transient_rowset_write
     RowsetWriterContext context;
     context.rowset_state = PREPARED;
     context.segments_overlap = OVERLAPPING;
-    context.tablet_schema = std::make_shared<TabletSchema>();
-    context.tablet_schema->copy_from(*(rowset.tablet_schema()));
+    // During a partial update, the extracted columns of a variant should not 
be included in the tablet schema.
+    // This is because the partial update for a variant needs to ignore the 
extracted columns.
+    // Otherwise, the schema types in different rowsets might be inconsistent. 
When performing a partial update,
+    // the complete variant is constructed by reading all the sub-columns of 
the variant.
+    context.tablet_schema = 
rowset.tablet_schema()->copy_without_variant_extracted_columns();
     context.newest_write_timestamp = UnixSeconds();
     context.tablet_id = table_id();
     context.enable_segcompaction = false;
diff --git a/be/src/common/config.cpp b/be/src/common/config.cpp
index c8e62465b27..3590c7afd30 100644
--- a/be/src/common/config.cpp
+++ b/be/src/common/config.cpp
@@ -1000,6 +1000,7 @@ 
DEFINE_Bool(enable_index_apply_preds_except_leafnode_of_andnode, "true");
 DEFINE_mBool(variant_enable_flatten_nested, "false");
 DEFINE_mDouble(variant_ratio_of_defaults_as_sparse_column, "1");
 DEFINE_mInt64(variant_threshold_rows_to_estimate_sparse_column, "1000");
+DEFINE_mBool(variant_throw_exeception_on_invalid_json, "false");
 
 // block file cache
 DEFINE_Bool(enable_file_cache, "false");
diff --git a/be/src/common/config.h b/be/src/common/config.h
index 9e348698929..a52a0357eb1 100644
--- a/be/src/common/config.h
+++ b/be/src/common/config.h
@@ -1182,6 +1182,8 @@ 
DECLARE_mDouble(variant_ratio_of_defaults_as_sparse_column);
 // Threshold to estimate a column is sparsed
 // Notice: TEST ONLY
 DECLARE_mInt64(variant_threshold_rows_to_estimate_sparse_column);
+// Treat invalid json format str as string, instead of throwing exception if 
false
+DECLARE_mBool(variant_throw_exeception_on_invalid_json);
 
 DECLARE_mBool(enable_merge_on_write_correctness_check);
 // USED FOR DEBUGING
diff --git a/be/src/olap/rowset/segment_v2/column_reader.cpp 
b/be/src/olap/rowset/segment_v2/column_reader.cpp
index d0f2830712d..2891e8aaa12 100644
--- a/be/src/olap/rowset/segment_v2/column_reader.cpp
+++ b/be/src/olap/rowset/segment_v2/column_reader.cpp
@@ -1601,6 +1601,9 @@ Status VariantRootColumnIterator::next_batch(size_t* n, 
vectorized::MutableColum
     if (obj.is_null_root()) {
         obj.create_root();
     }
+    if (!obj.is_finalized()) {
+        obj.finalize();
+    }
     auto root_column = obj.get_root();
     RETURN_IF_ERROR(_inner_iter->next_batch(n, root_column, has_null));
     obj.incr_num_rows(*n);
@@ -1634,6 +1637,9 @@ Status VariantRootColumnIterator::read_by_rowids(const 
rowid_t* rowids, const si
     if (obj.is_null_root()) {
         obj.create_root();
     }
+    if (!obj.is_finalized()) {
+        obj.finalize();
+    }
     auto root_column = obj.get_root();
     RETURN_IF_ERROR(_inner_iter->read_by_rowids(rowids, count, root_column));
     obj.incr_num_rows(count);
diff --git a/be/src/vec/columns/column_object.cpp 
b/be/src/vec/columns/column_object.cpp
index 043c442e275..b56c4857334 100644
--- a/be/src/vec/columns/column_object.cpp
+++ b/be/src/vec/columns/column_object.cpp
@@ -21,6 +21,7 @@
 #include "vec/columns/column_object.h"
 
 #include <assert.h>
+#include <fmt/core.h>
 #include <fmt/format.h>
 #include <glog/logging.h>
 #include <parallel_hashmap/phmap.h>
@@ -34,6 +35,7 @@
 #include <map>
 #include <memory>
 #include <optional>
+#include <sstream>
 #include <vector>
 
 #include "common/compiler_util.h" // IWYU pragma: keep
@@ -677,8 +679,6 @@ void ColumnObject::check_consistency() const {
     }
     for (const auto& leaf : subcolumns) {
         if (num_rows != leaf->data.size()) {
-            // LOG(FATAL) << "unmatched column:" << leaf->path.get_path()
-            //            << ", expeted rows:" << num_rows << ", but meet:" << 
leaf->data.size();
             throw doris::Exception(doris::ErrorCode::INTERNAL_ERROR,
                                    "unmatched column: {}, expeted rows: {}, 
but meet: {}",
                                    leaf->path.get_path(), num_rows, 
leaf->data.size());
@@ -791,26 +791,61 @@ void ColumnObject::insert_default() {
     ++num_rows;
 }
 
-Field ColumnObject::operator[](size_t n) const {
-    if (!is_finalized()) {
-        const_cast<ColumnObject*>(this)->finalize();
-    }
-    VariantMap map;
-    for (const auto& entry : subcolumns) {
-        if 
(WhichDataType(remove_nullable(entry->data.data_types.back())).is_json()) {
+void ColumnObject::Subcolumn::get(size_t n, Field& res) const {
+    if (is_finalized()) {
+        if (least_common_type.get_base_type_id() == TypeIndex::JSONB) {
             // JsonbFiled is special case
-            Field f = JsonbField();
-            (*entry->data.data.back()).get(n, f);
-            map[entry->path.get_path()] = std::move(f);
-            continue;
+            res = JsonbField();
+        }
+        get_finalized_column().get(n, res);
+        return;
+    }
+
+    size_t ind = n;
+    if (ind < num_of_defaults_in_prefix) {
+        if (least_common_type.get_base_type_id() == TypeIndex::Nothing) {
+            res = Null();
+            return;
         }
-        map[entry->path.get_path()] = (*entry->data.data.back())[n];
+        res = least_common_type.get()->get_default();
+        return;
     }
-    return map;
+
+    ind -= num_of_defaults_in_prefix;
+    for (size_t i = 0; i < data.size(); ++i) {
+        const auto& part = data[i];
+        const auto& part_type = data_types[i];
+        if (ind < part->size()) {
+            res = vectorized::remove_nullable(part_type)->get_default();
+            part->get(ind, res);
+            Field new_field;
+            convert_field_to_type(res, *least_common_type.get(), &new_field);
+            res = new_field;
+            return;
+        }
+
+        ind -= part->size();
+    }
+
+    throw doris::Exception(ErrorCode::OUT_OF_BOUND, "Index ({}) for getting 
field is out of range",
+                           n);
+}
+
+Field ColumnObject::operator[](size_t n) const {
+    Field object;
+    get(n, object);
+    return object;
 }
 
 void ColumnObject::get(size_t n, Field& res) const {
-    res = (*this)[n];
+    assert(n < size());
+    res = VariantMap();
+    auto& object = res.get<VariantMap&>();
+
+    for (const auto& entry : subcolumns) {
+        auto it = object.try_emplace(entry->path.get_path()).first;
+        entry->data.get(n, it->second);
+    }
 }
 
 Status ColumnObject::try_insert_indices_from(const IColumn& src, const int* 
indices_begin,
@@ -1380,7 +1415,10 @@ void ColumnObject::strip_outer_array() {
 
 ColumnPtr ColumnObject::filter(const Filter& filter, ssize_t count) const {
     if (!is_finalized()) {
-        const_cast<ColumnObject*>(this)->finalize();
+        auto finalized = clone_finalized();
+        auto& finalized_object = assert_cast<ColumnObject&>(*finalized);
+        return finalized_object.apply_for_subcolumns(
+                [&](const auto& subcolumn) { return subcolumn.filter(filter, 
count); });
     }
     auto new_column = ColumnObject::create(true, false);
     for (auto& entry : subcolumns) {
@@ -1545,22 +1583,13 @@ void ColumnObject::insert_indices_from(const IColumn& 
src, const uint32_t* indic
     }
 }
 
-void ColumnObject::update_hash_with_value(size_t n, SipHash& hash) const {
+void ColumnObject::for_each_imutable_subcolumn(ImutableColumnCallback 
callback) const {
     if (!is_finalized()) {
-        // finalize has no side effect and can be safely used in const 
functions
-        const_cast<ColumnObject*>(this)->finalize();
+        auto finalized = clone_finalized();
+        auto& finalized_object = assert_cast<ColumnObject&>(*finalized);
+        finalized_object.for_each_imutable_subcolumn(callback);
+        return;
     }
-    for_each_imutable_subcolumn([&](const auto& subcolumn) {
-        if (n >= subcolumn.size()) {
-            LOG(FATAL) << n << " greater than column size " << subcolumn.size()
-                       << " sub_column_info:" << subcolumn.dump_structure()
-                       << " total lines of this column " << num_rows;
-        }
-        return subcolumn.update_hash_with_value(n, hash);
-    });
-}
-
-void ColumnObject::for_each_imutable_subcolumn(ImutableColumnCallback 
callback) const {
     for (const auto& entry : subcolumns) {
         for (auto& part : entry->data.data) {
             callback(*part);
@@ -1568,6 +1597,40 @@ void 
ColumnObject::for_each_imutable_subcolumn(ImutableColumnCallback callback)
     }
 }
 
+void ColumnObject::update_hash_with_value(size_t n, SipHash& hash) const {
+    for_each_imutable_subcolumn(
+            [&](const auto& subcolumn) { return 
subcolumn.update_hash_with_value(n, hash); });
+}
+
+void ColumnObject::update_hashes_with_value(uint64_t* __restrict hashes,
+                                            const uint8_t* __restrict 
null_data) const {
+    for_each_imutable_subcolumn([&](const auto& subcolumn) {
+        return subcolumn.update_hashes_with_value(hashes, nullptr);
+    });
+}
+
+void ColumnObject::update_xxHash_with_value(size_t start, size_t end, 
uint64_t& hash,
+                                            const uint8_t* __restrict 
null_data) const {
+    for_each_imutable_subcolumn([&](const auto& subcolumn) {
+        return subcolumn.update_xxHash_with_value(start, end, hash, nullptr);
+    });
+}
+
+void ColumnObject::update_crcs_with_value(uint32_t* __restrict hash, 
PrimitiveType type,
+                                          uint32_t rows, uint32_t offset,
+                                          const uint8_t* __restrict null_data) 
const {
+    for_each_imutable_subcolumn([&](const auto& subcolumn) {
+        return subcolumn.update_crcs_with_value(hash, type, rows, offset, 
nullptr);
+    });
+}
+
+void ColumnObject::update_crc_with_value(size_t start, size_t end, uint32_t& 
hash,
+                                         const uint8_t* __restrict null_data) 
const {
+    for_each_imutable_subcolumn([&](const auto& subcolumn) {
+        return subcolumn.update_crc_with_value(start, end, hash, nullptr);
+    });
+}
+
 std::string ColumnObject::debug_string() const {
     std::stringstream res;
     res << get_family_name() << "(num_row = " << num_rows;
@@ -1600,8 +1663,4 @@ Status ColumnObject::sanitize() const {
     return Status::OK();
 }
 
-void ColumnObject::replace_column_data(const IColumn& col, size_t row, size_t 
self_row) {
-    LOG(FATAL) << "Method replace_column_data is not supported for " << 
get_name();
-}
-
 } // namespace doris::vectorized
diff --git a/be/src/vec/columns/column_object.h 
b/be/src/vec/columns/column_object.h
index f19d51796a8..441589bdfbb 100644
--- a/be/src/vec/columns/column_object.h
+++ b/be/src/vec/columns/column_object.h
@@ -123,6 +123,8 @@ public:
 
         size_t get_dimensions() const { return 
least_common_type.get_dimensions(); }
 
+        void get(size_t n, Field& res) const;
+
         /// Checks the consistency of column's parts stored in @data.
         void checkTypes() const;
 
@@ -295,16 +297,6 @@ public:
     // return null if not found
     const Subcolumn* get_subcolumn(const PathInData& key, size_t index_hint) 
const;
 
-    /** More efficient methods of manipulation */
-    [[noreturn]] IColumn& get_data() {
-        LOG(FATAL) << "Not implemented method get_data()";
-        __builtin_unreachable();
-    }
-    [[noreturn]] const IColumn& get_data() const {
-        LOG(FATAL) << "Not implemented method get_data()";
-        __builtin_unreachable();
-    }
-
     // return null if not found
     Subcolumn* get_subcolumn(const PathInData& key);
 
@@ -429,35 +421,11 @@ public:
 
     void get(size_t n, Field& res) const override;
 
-    /// All other methods throw exception.
-    StringRef get_data_at(size_t) const override {
-        LOG(FATAL) << "should not call the method in column object";
-        return StringRef();
-    }
-
     Status try_insert_indices_from(const IColumn& src, const int* 
indices_begin,
                                    const int* indices_end);
 
-    StringRef serialize_value_into_arena(size_t n, Arena& arena,
-                                         char const*& begin) const override {
-        LOG(FATAL) << "should not call the method in column object";
-        return StringRef();
-    }
-
-    void for_each_imutable_subcolumn(ImutableColumnCallback callback) const;
-
-    const char* deserialize_and_insert_from_arena(const char* pos) override {
-        LOG(FATAL) << "should not call the method in column object";
-        return nullptr;
-    }
-
     void update_hash_with_value(size_t n, SipHash& hash) const override;
 
-    void insert_data(const char* pos, size_t length) override {
-        LOG(FATAL) << "should not call the method in column object";
-        __builtin_unreachable();
-    }
-
     ColumnPtr filter(const Filter&, ssize_t) const override;
 
     Status filter_by_selector(const uint16_t* sel, size_t sel_size, IColumn* 
col_ptr) override;
@@ -468,11 +436,11 @@ public:
 
     bool is_variable_length() const override { return true; }
 
-    void replace_column_data(const IColumn&, size_t row, size_t self_row) 
override;
-
     template <typename Func>
     MutableColumnPtr apply_for_subcolumns(Func&& func) const;
 
+    void for_each_imutable_subcolumn(ImutableColumnCallback callback) const;
+
     // Extract path from root column and replace root with new extracted 
column,
     // root must be jsonb type
     Status extract_root(const PathInData& path);
@@ -488,6 +456,150 @@ public:
     Status sanitize() const;
 
     std::string debug_string() const;
+
+    void update_hashes_with_value(uint64_t* __restrict hashes,
+                                  const uint8_t* __restrict null_data = 
nullptr) const override;
+
+    void update_xxHash_with_value(size_t start, size_t end, uint64_t& hash,
+                                  const uint8_t* __restrict null_data) const 
override;
+
+    void update_crcs_with_value(uint32_t* __restrict hash, PrimitiveType type, 
uint32_t rows,
+                                uint32_t offset = 0,
+                                const uint8_t* __restrict null_data = nullptr) 
const override;
+
+    void update_crc_with_value(size_t start, size_t end, uint32_t& hash,
+                               const uint8_t* __restrict null_data) const 
override;
+
+    // Not implemented
+    MutableColumnPtr get_shrinked_column() override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "get_shrinked_column" + 
std::string(get_family_name()));
+    }
+
+    Int64 get_int(size_t /*n*/) const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "get_int" + std::string(get_family_name()));
+    }
+
+    bool get_bool(size_t /*n*/) const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "get_bool" + std::string(get_family_name()));
+    }
+
+    void insert_many_fix_len_data(const char* pos, size_t num) override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "insert_many_fix_len_data" + 
std::string(get_family_name()));
+    }
+
+    void insert_many_dict_data(const int32_t* data_array, size_t start_index, 
const StringRef* dict,
+                               size_t data_num, uint32_t dict_num = 0) 
override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "insert_many_dict_data" + 
std::string(get_family_name()));
+    }
+
+    void insert_many_binary_data(char* data_array, uint32_t* len_array,
+                                 uint32_t* start_offset_array, size_t num) 
override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "insert_many_binary_data" + 
std::string(get_family_name()));
+    }
+
+    void insert_many_continuous_binary_data(const char* data, const uint32_t* 
offsets,
+                                            const size_t num) override {
+        throw doris::Exception(
+                ErrorCode::NOT_IMPLEMENTED_ERROR,
+                "insert_many_continuous_binary_data" + 
std::string(get_family_name()));
+    }
+
+    void insert_many_strings(const StringRef* strings, size_t num) override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "insert_many_strings" + 
std::string(get_family_name()));
+    }
+
+    void insert_many_strings_overflow(const StringRef* strings, size_t num,
+                                      size_t max_length) override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "insert_many_strings_overflow" + 
std::string(get_family_name()));
+    }
+
+    void insert_many_raw_data(const char* pos, size_t num) override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "insert_many_raw_data" + 
std::string(get_family_name()));
+    }
+
+    size_t get_max_row_byte_size() const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "get_max_row_byte_size" + 
std::string(get_family_name()));
+    }
+
+    void serialize_vec(std::vector<StringRef>& keys, size_t num_rows,
+                       size_t max_row_byte_size) const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "serialize_vec" + 
std::string(get_family_name()));
+    }
+
+    void serialize_vec_with_null_map(std::vector<StringRef>& keys, size_t 
num_rows,
+                                     const uint8_t* null_map) const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "serialize_vec_with_null_map" + 
std::string(get_family_name()));
+    }
+
+    void deserialize_vec(std::vector<StringRef>& keys, const size_t num_rows) 
override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "deserialize_vec" + 
std::string(get_family_name()));
+    }
+
+    void deserialize_vec_with_null_map(std::vector<StringRef>& keys, const 
size_t num_rows,
+                                       const uint8_t* null_map) override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "deserialize_vec_with_null_map" + 
std::string(get_family_name()));
+    }
+
+    Status filter_by_selector(const uint16_t* sel, size_t sel_size, IColumn* 
col_ptr) const {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "filter_by_selector" + 
std::string(get_family_name()));
+    }
+
+    bool structure_equals(const IColumn&) const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "structure_equals" + 
std::string(get_family_name()));
+    }
+
+    StringRef get_raw_data() const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "get_raw_data" + 
std::string(get_family_name()));
+    }
+
+    size_t size_of_value_if_fixed() const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "size_of_value_if_fixed" + 
std::string(get_family_name()));
+    }
+
+    StringRef get_data_at(size_t) const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "get_data_at" + std::string(get_family_name()));
+    }
+
+    StringRef serialize_value_into_arena(size_t n, Arena& arena,
+                                         char const*& begin) const override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "serialize_value_into_arena" + 
std::string(get_family_name()));
+    }
+
+    const char* deserialize_and_insert_from_arena(const char* pos) override {
+        throw doris::Exception(
+                ErrorCode::NOT_IMPLEMENTED_ERROR,
+                "deserialize_and_insert_from_arena" + 
std::string(get_family_name()));
+    }
+
+    void insert_data(const char* pos, size_t length) override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "insert_data" + std::string(get_family_name()));
+    }
+
+    void replace_column_data(const IColumn&, size_t row, size_t self_row) 
override {
+        throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR,
+                               "replace_column_data" + 
std::string(get_family_name()));
+    }
 };
 
 } // namespace doris::vectorized
diff --git a/be/src/vec/common/schema_util.cpp 
b/be/src/vec/common/schema_util.cpp
index 4a048973c96..b85efe17148 100644
--- a/be/src/vec/common/schema_util.cpp
+++ b/be/src/vec/common/schema_util.cpp
@@ -475,7 +475,7 @@ Status _parse_variant_columns(Block& block, const 
std::vector<int>& variant_pos,
             // already parsed
             continue;
         }
-        ColumnPtr raw_json_column;
+        ColumnPtr scalar_root_column;
         if (WhichDataType(remove_nullable(var.get_root_type())).is_json()) {
             // TODO more efficient way to parse jsonb type, currently we just 
convert jsonb to
             // json str and parse them into variant
@@ -483,22 +483,31 @@ Status _parse_variant_columns(Block& block, const 
std::vector<int>& variant_pos,
                                         var.get_root()->is_nullable()
                                                 ? 
make_nullable(std::make_shared<DataTypeString>())
                                                 : 
std::make_shared<DataTypeString>(),
-                                        &raw_json_column));
-            if (raw_json_column->is_nullable()) {
-                raw_json_column = assert_cast<const 
ColumnNullable*>(raw_json_column.get())
-                                          ->get_nested_column_ptr();
+                                        &scalar_root_column));
+            if (scalar_root_column->is_nullable()) {
+                scalar_root_column = assert_cast<const 
ColumnNullable*>(scalar_root_column.get())
+                                             ->get_nested_column_ptr();
             }
         } else {
             const auto& root = *var.get_root();
-            raw_json_column =
+            scalar_root_column =
                     root.is_nullable()
                             ? assert_cast<const 
ColumnNullable&>(root).get_nested_column_ptr()
                             : var.get_root();
         }
 
-        variant_column = ColumnObject::create(true);
-        parse_json_to_variant(*variant_column.get(),
-                              assert_cast<const 
ColumnString&>(*raw_json_column));
+        if (scalar_root_column->is_column_string()) {
+            variant_column = ColumnObject::create(true);
+            parse_json_to_variant(*variant_column.get(),
+                                  assert_cast<const 
ColumnString&>(*scalar_root_column));
+        } else {
+            // Root maybe other types rather than string like 
ColumnObject(Int32).
+            // In this case, we should finlize the root and cast to JSON type
+            auto expected_root_type =
+                    
make_nullable(std::make_shared<ColumnObject::MostCommonType>());
+            
const_cast<ColumnObject&>(var).ensure_root_node_type(expected_root_type);
+            variant_column = var.assume_mutable();
+        }
 
         // Wrap variant with nullmap if it is nullable
         ColumnPtr result = variant_column->get_ptr();
diff --git a/be/src/vec/common/schema_util.h b/be/src/vec/common/schema_util.h
index 3a62c2bd6fe..b012c7ccfc0 100644
--- a/be/src/vec/common/schema_util.h
+++ b/be/src/vec/common/schema_util.h
@@ -83,6 +83,7 @@ struct ParseContext {
     // record an extract json column, used for encoding row store
     bool record_raw_json_column = false;
 };
+
 // three steps to parse and encode variant columns into flatterned columns
 // 1. parse variant from raw json string
 // 2. finalize variant column to each subcolumn least commn types, default 
ignore sparse sub columns
diff --git a/be/src/vec/data_types/serde/data_type_object_serde.cpp 
b/be/src/vec/data_types/serde/data_type_object_serde.cpp
index b72c295d8d0..c228f074d2e 100644
--- a/be/src/vec/data_types/serde/data_type_object_serde.cpp
+++ b/be/src/vec/data_types/serde/data_type_object_serde.cpp
@@ -111,6 +111,16 @@ void 
DataTypeObjectSerDe::read_one_cell_from_jsonb(IColumn& column, const JsonbV
     variant.insert(field);
 }
 
+Status DataTypeObjectSerDe::serialize_one_cell_to_json(const IColumn& column, 
int row_num,
+                                                       BufferWritable& bw,
+                                                       FormatOptions& options) 
const {
+    const auto* var = check_and_get_column<ColumnObject>(column);
+    if (!var->serialize_one_row_to_string(row_num, bw)) {
+        return Status::InternalError("Failed to serialize variant {}", 
var->dump_structure());
+    }
+    return Status::OK();
+}
+
 void DataTypeObjectSerDe::write_column_to_arrow(const IColumn& column, const 
NullMap* null_map,
                                                 arrow::ArrayBuilder* 
array_builder, int start,
                                                 int end, const 
cctz::time_zone& ctz) const {
diff --git a/be/src/vec/data_types/serde/data_type_object_serde.h 
b/be/src/vec/data_types/serde/data_type_object_serde.h
index 7bf8da438c5..89ac6d5cff3 100644
--- a/be/src/vec/data_types/serde/data_type_object_serde.h
+++ b/be/src/vec/data_types/serde/data_type_object_serde.h
@@ -39,9 +39,7 @@ public:
     DataTypeObjectSerDe(int nesting_level = 1) : DataTypeSerDe(nesting_level) 
{};
 
     Status serialize_one_cell_to_json(const IColumn& column, int row_num, 
BufferWritable& bw,
-                                      FormatOptions& options) const override {
-        return Status::NotSupported("serialize_one_cell_to_json with type 
[{}]", column.get_name());
-    }
+                                      FormatOptions& options) const override;
 
     Status serialize_column_to_json(const IColumn& column, int start_idx, int 
end_idx,
                                     BufferWritable& bw, FormatOptions& 
options) const override {
diff --git a/be/src/vec/json/parse2column.cpp b/be/src/vec/json/parse2column.cpp
index a154ad14333..a62405a7116 100644
--- a/be/src/vec/json/parse2column.cpp
+++ b/be/src/vec/json/parse2column.cpp
@@ -143,8 +143,14 @@ void parse_json_to_variant(IColumn& column, const char* 
src, size_t length,
     }
     if (!result) {
         VLOG_DEBUG << "failed to parse " << std::string_view(src, length) << 
", length= " << length;
-        throw doris::Exception(ErrorCode::INVALID_ARGUMENT, "Failed to parse 
object {}",
-                               std::string_view(src, length));
+        if (config::variant_throw_exeception_on_invalid_json) {
+            throw doris::Exception(ErrorCode::INVALID_ARGUMENT, "Failed to 
parse object {}",
+                                   std::string_view(src, length));
+        }
+        // Treat as string
+        PathInData root_path;
+        Field field(src, length);
+        result = ParseResult {{root_path}, {field}};
     }
     auto& [paths, values] = *result;
     assert(paths.size() == values.size());
diff --git a/cloud/src/meta-service/meta_service.cpp 
b/cloud/src/meta-service/meta_service.cpp
index 189a687b26a..9edeb0cb4f9 100644
--- a/cloud/src/meta-service/meta_service.cpp
+++ b/cloud/src/meta-service/meta_service.cpp
@@ -1216,7 +1216,10 @@ void 
MetaServiceImpl::update_tmp_rowset(::google::protobuf::RpcController* contr
         msg = fmt::format("failed to check whether rowset exists, err={}", 
err);
         return;
     }
-
+    if (rowset_meta.has_variant_type_in_schema()) {
+        write_schema_dict(code, msg, instance_id, txn.get(), &rowset_meta);
+        if (code != MetaServiceCode::OK) return;
+    }
     DCHECK_GT(rowset_meta.txn_expiration(), 0);
     if (!rowset_meta.SerializeToString(&update_val)) {
         code = MetaServiceCode::PROTOBUF_SERIALIZE_ERR;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
index 471cbdf0ec6..81666227986 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -101,6 +101,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -2761,6 +2762,55 @@ public class OlapTable extends Table implements 
MTMVRelatedTableIf, GsonPostProc
         return tablets;
     }
 
+    // Get sample tablets for remote desc schema
+    // 1. Estimate tablets for a partition, 1 at least
+    // 2. Pick the partition sorted with id in desc order, greater id with the 
newest partition
+    // 3. Truncate to sampleSize
+    public List<Tablet> getSampleTablets(int sampleSize) {
+        List<Tablet> sampleTablets = new ArrayList<>();
+        // Filter partition with empty data
+        Collection<Partition> partitions = getPartitions()
+                .stream()
+                .filter(partition -> partition.getVisibleVersion() > 
Partition.PARTITION_INIT_VERSION)
+                .collect(Collectors.toList());
+        if (partitions.isEmpty()) {
+            return sampleTablets;
+        }
+        // 1. Estimate tablets for a partition, 1 at least
+        int estimatePartitionTablets = Math.max(sampleSize / 
partitions.size(), 1);
+
+        // 2. Sort the partitions by id in descending order (greater id means 
the newest partition)
+        List<Partition> sortedPartitions = partitions.stream().sorted(new 
Comparator<Partition>() {
+            @Override
+            public int compare(Partition p1, Partition p2) {
+                // compare with desc order
+                return Long.compare(p2.getId(), p1.getId());
+            }
+        }).collect(Collectors.toList());
+
+        // 3. Collect tablets from partitions
+        for (Partition partition : sortedPartitions) {
+            List<Tablet> targetTablets = new 
ArrayList<>(partition.getBaseIndex().getTablets());
+            Collections.shuffle(targetTablets);
+            if (!targetTablets.isEmpty()) {
+                // Ensure we do not exceed the available number of tablets
+                int tabletsToFetch = Math.min(targetTablets.size(), 
estimatePartitionTablets);
+                sampleTablets.addAll(targetTablets.subList(0, tabletsToFetch));
+            }
+
+            if (sampleTablets.size() >= sampleSize) {
+                break;
+            }
+        }
+
+        // 4. Truncate to sample size if needed
+        if (sampleTablets.size() > sampleSize) {
+            sampleTablets = sampleTablets.subList(0, sampleSize);
+        }
+
+        return sampleTablets;
+    }
+
     // During `getNextVersion` and `updateVisibleVersionAndTime` period,
     // the write lock on the table should be held continuously
     public void updateVisibleVersionAndTime(long visibleVersion, long 
visibleVersionTime) {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/common/proc/RemoteIndexSchemaProcDir.java
 
b/fe/fe-core/src/main/java/org/apache/doris/common/proc/RemoteIndexSchemaProcDir.java
index 195e66b86e0..f2531b7ec15 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/common/proc/RemoteIndexSchemaProcDir.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/common/proc/RemoteIndexSchemaProcDir.java
@@ -24,6 +24,7 @@ import org.apache.doris.catalog.TableIf;
 import org.apache.doris.catalog.Tablet;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.util.FetchRemoteTabletSchemaUtil;
+import org.apache.doris.qe.ConnectContext;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
@@ -62,7 +63,8 @@ public class RemoteIndexSchemaProcDir implements 
ProcDirInterface {
         table.readLock();
         try {
             OlapTable olapTable = (OlapTable) table;
-            tablets = olapTable.getAllTablets();
+            // Get sample tablets for remote desc schema
+            tablets = 
olapTable.getSampleTablets(ConnectContext.get().getSessionVariable().maxFetchRemoteTabletCount);
         } finally {
             table.readUnlock();
         }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/common/proc/RemoteIndexSchemaProcNode.java
 
b/fe/fe-core/src/main/java/org/apache/doris/common/proc/RemoteIndexSchemaProcNode.java
index 8176b09bbf7..cdb1bbc133e 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/common/proc/RemoteIndexSchemaProcNode.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/common/proc/RemoteIndexSchemaProcNode.java
@@ -23,11 +23,13 @@ import org.apache.doris.catalog.Partition;
 import org.apache.doris.catalog.Tablet;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.util.FetchRemoteTabletSchemaUtil;
+import org.apache.doris.qe.ConnectContext;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
+import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 
@@ -62,6 +64,13 @@ public class RemoteIndexSchemaProcNode implements 
ProcNodeInterface {
                 tablets.add(tablet);
             }
         }
+        // Get the maximum number of Remote Tablets that can be fetched
+        int maxFetchCount = 
ConnectContext.get().getSessionVariable().maxFetchRemoteTabletCount;
+        // If the number of tablets is greater than the maximum fetch count, 
randomly select maxFetchCount tablets
+        if (tablets.size() > maxFetchCount) {
+            Collections.shuffle(tablets);
+            tablets = tablets.subList(0, maxFetchCount);
+        }
         List<Column> remoteSchema = new 
FetchRemoteTabletSchemaUtil(tablets).fetch();
         this.schema.addAll(remoteSchema);
         return IndexSchemaProcNode.createResult(this.schema, this.bfColumns);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/common/util/FetchRemoteTabletSchemaUtil.java
 
b/fe/fe-core/src/main/java/org/apache/doris/common/util/FetchRemoteTabletSchemaUtil.java
index db9700f7448..0e96dc8c593 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/common/util/FetchRemoteTabletSchemaUtil.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/common/util/FetchRemoteTabletSchemaUtil.java
@@ -92,6 +92,8 @@ public class FetchRemoteTabletSchemaUtil {
             Long backendId = entry.getKey();
             Set<Long> tabletIds = entry.getValue();
             Backend backend = 
Env.getCurrentEnv().getCurrentSystemInfo().getBackend(backendId);
+            LOG.debug("fetch schema from coord backend {}, sample tablets 
count {}",
+                            backend.getId(), tabletIds.size());
             // only need alive be
             if (!backend.isAlive()) {
                 continue;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
index 7aa78c2bbb2..e9189a23635 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
@@ -617,6 +617,8 @@ public class SessionVariable implements Serializable, 
Writable {
 
     public static final String FETCH_REMOTE_SCHEMA_TIMEOUT_SECONDS = 
"fetch_remote_schema_timeout_seconds";
 
+    public static final String MAX_FETCH_REMOTE_TABLET_COUNT = 
"max_fetch_remote_schema_tablet_count";
+
     // CLOUD_VARIABLES_BEGIN
     public static final String CLOUD_CLUSTER = "cloud_cluster";
     public static final String DISABLE_EMPTY_PARTITION_PRUNE = 
"disable_empty_partition_prune";
@@ -1920,6 +1922,9 @@ public class SessionVariable implements Serializable, 
Writable {
     // fetch remote schema rpc timeout
     @VariableMgr.VarAttr(name = FETCH_REMOTE_SCHEMA_TIMEOUT_SECONDS, fuzzy = 
true)
     public long fetchRemoteSchemaTimeoutSeconds = 120;
+    // max tablet count for fetch remote schema
+    @VariableMgr.VarAttr(name = MAX_FETCH_REMOTE_TABLET_COUNT, fuzzy = true)
+    public int maxFetchRemoteTabletCount = 512;
 
     @VariableMgr.VarAttr(
             name = ENABLE_JOIN_SPILL,
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/qe/ShortCircuitQueryContext.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/ShortCircuitQueryContext.java
index 727eee11752..2840dd65239 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShortCircuitQueryContext.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShortCircuitQueryContext.java
@@ -36,6 +36,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
+import java.util.stream.Collectors;
 
 public class ShortCircuitQueryContext {
     // Cached for better CPU performance, since serialize DescriptorTable and
@@ -66,8 +67,15 @@ public class ShortCircuitQueryContext {
         this.serializedQueryOptions = ByteString.copyFrom(
                 new TSerializer().serialize(options));
         List<TExpr> exprs = new ArrayList<>();
-        for (Expr expr : 
planner.getFragments().get(1).getPlanRoot().getProjectList()) {
-            exprs.add(expr.treeToThrift());
+        OlapScanNode olapScanNode = (OlapScanNode) 
planner.getFragments().get(1).getPlanRoot();
+        if (olapScanNode.getProjectList() != null) {
+            // project on scan node
+            exprs.addAll(olapScanNode.getProjectList().stream()
+                    .map(Expr::treeToThrift).collect(Collectors.toList()));
+        } else {
+            // add output slots
+            
exprs.addAll(planner.getFragments().get(0).getOutputExprs().stream()
+                    .map(Expr::treeToThrift).collect(Collectors.toList()));
         }
         TExprList exprList = new TExprList(exprs);
         serializedOutputExpr = ByteString.copyFrom(
diff --git a/regression-test/data/export_p0/test_outfile_csv_variant_type.out 
b/regression-test/data/export_p0/test_outfile_csv_variant_type.out
new file mode 100644
index 00000000000..8016918c79b
--- /dev/null
+++ b/regression-test/data/export_p0/test_outfile_csv_variant_type.out
@@ -0,0 +1,41 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select_base1 --
+1      doris1  [9,99,999]
+2      doris2  [8,88]
+3      doris3  {"a":123}
+4      doris4  \N
+5      doris5  [1,null,2]
+6      doris6  {"aaaa":"111111"}
+7      doris7  {"bbbb":1.1111}
+8      doris8  {"xxx":[111.11]}
+
+-- !select_load1 --
+1      doris1  [9,99,999]
+2      doris2  [8,88]
+3      doris3  {"a":123}
+4      doris4  \N
+5      doris5  [1,null,2]
+6      doris6  {"aaaa":"111111"}
+7      doris7  {"bbbb":1.1111}
+8      doris8  {"xxx":[111.11]}
+
+-- !select_base2 --
+1      doris1  [9,99,999]
+2      doris2  [8,88]
+3      doris3  {"a":123}
+4      doris4  {}
+5      doris5  [1,null,2]
+6      doris6  {"aaaa":"111111"}
+7      doris7  {"bbbb":1.1111}
+8      doris8  {"xxx":[111.11]}
+
+-- !select_load2 --
+1      doris1  [9,99,999]
+2      doris2  [8,88]
+3      doris3  {"a":123}
+4      doris4  {}
+5      doris5  [1,null,2]
+6      doris6  {"aaaa":"111111"}
+7      doris7  {"bbbb":1.1111}
+8      doris8  {"xxx":[111.11]}
+
diff --git a/regression-test/data/point_query_p0/test_point_query.out 
b/regression-test/data/point_query_p0/test_point_query.out
index ff4b1932b3a..5a4e0b66178 100644
--- a/regression-test/data/point_query_p0/test_point_query.out
+++ b/regression-test/data/point_query_p0/test_point_query.out
@@ -1,27 +1,27 @@
 -- This file is automatically generated. You should know what you did if you 
want to edit this
 -- !point_select --
-1231   119291.110000000        ddd     laooq   \N      2020-01-01 12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
+1231   119291.110000000        ddd     laooq   \N      2020-01-01T12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
 
 -- !point_select --
-1231   119291.110000000        ddd     laooq   \N      2020-01-01 12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
+1231   119291.110000000        ddd     laooq   \N      2020-01-01T12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
 
 -- !point_select --
-1237   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   90696620686827832.374   
[1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000]       []
+1237   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   90696620686827832.374   
[1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000]       []
 
 -- !point_select --
-1232   12222.991211350 xxx     laooq   2023-01-02      2020-01-01 12:36:38     
522.762 2022-01-01      true    212.111 \N      \N
+1232   12222.991211350 xxx     laooq   2023-01-02      2020-01-01T12:36:38     
522.762 2022-01-01      true    212.111 \N      \N
 
 -- !point_select --
-251    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
     laooq   2030-01-02      2020-01-01 12:36:38     251.0   7022-01-01      
true    90696620686827832.374   [11111.000000000]       []
+251    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
     laooq   2030-01-02      2020-01-01T12:36:38     251.0   7022-01-01      
true    90696620686827832.374   [11111.000000000]       []
 
 -- !point_select --
-252    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
    laooq   2030-01-02      2020-01-01 12:36:38     252.0   7022-01-01      
false   90696620686827832.374   \N      [0.000000000]
+252    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
    laooq   2030-01-02      2020-01-01T12:36:38     252.0   7022-01-01      
false   90696620686827832.374   \N      [0.000000000]
 
 -- !point_select --
-298    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
      laooq   2030-01-02      2020-01-01 12:36:38     298.0   7022-01-01      
true    90696620686827832.374   []      []
+298    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
      laooq   2030-01-02      2020-01-01T12:36:38     298.0   7022-01-01      
true    90696620686827832.374   []      []
 
 -- !point_select --
-1235   991129292901.111380000  dd      \N      2120-01-02      2020-01-01 
12:36:38     652.692 5022-01-01      false   90696620686827832.374   
[119181.111100000]      
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"]
+1235   991129292901.111380000  dd      \N      2120-01-02      
2020-01-01T12:36:38     652.692 5022-01-01      false   90696620686827832.374   
[119181.111100000]      
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"]
 
 -- !point_select --
 646464 6C616F6F71
@@ -33,34 +33,34 @@
 646464 6C616F6F71
 
 -- !point_select --
-1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
+1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
 
 -- !point_select --
-1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
+1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
 
 -- !sql --
 1231   119291.110000000        ddd     laooq   \N      2020-01-01T12:36:38     
\N      1022-01-01      \N      1.111   [119181.111100000, 819019.119100000, 
null]      \N      0       0
@@ -75,28 +75,28 @@
 0      1       2       3
 
 -- !point_select --
-1231   119291.110000000        ddd     laooq   \N      2020-01-01 12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
+1231   119291.110000000        ddd     laooq   \N      2020-01-01T12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
 
 -- !point_select --
-1231   119291.110000000        ddd     laooq   \N      2020-01-01 12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
+1231   119291.110000000        ddd     laooq   \N      2020-01-01T12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
 
 -- !point_select --
-1237   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   90696620686827832.374   
[1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000]       []
+1237   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   90696620686827832.374   
[1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000]       []
 
 -- !point_select --
-1232   12222.991211350 xxx     laooq   2023-01-02      2020-01-01 12:36:38     
522.762 2022-01-01      true    212.111 \N      \N
+1232   12222.991211350 xxx     laooq   2023-01-02      2020-01-01T12:36:38     
522.762 2022-01-01      true    212.111 \N      \N
 
 -- !point_select --
-251    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
     laooq   2030-01-02      2020-01-01 12:36:38     251.0   7022-01-01      
true    90696620686827832.374   [11111.000000000]       []
+251    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
     laooq   2030-01-02      2020-01-01T12:36:38     251.0   7022-01-01      
true    90696620686827832.374   [11111.000000000]       []
 
 -- !point_select --
-252    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
    laooq   2030-01-02      2020-01-01 12:36:38     252.0   7022-01-01      
false   90696620686827832.374   \N      [0.000000000]
+252    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
    laooq   2030-01-02      2020-01-01T12:36:38     252.0   7022-01-01      
false   90696620686827832.374   \N      [0.000000000]
 
 -- !point_select --
-298    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
      laooq   2030-01-02      2020-01-01 12:36:38     298.0   7022-01-01      
true    90696620686827832.374   []      []
+298    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
      laooq   2030-01-02      2020-01-01T12:36:38     298.0   7022-01-01      
true    90696620686827832.374   []      []
 
 -- !point_select --
-1235   991129292901.111380000  dd      \N      2120-01-02      2020-01-01 
12:36:38     652.692 5022-01-01      false   90696620686827832.374   
[119181.111100000]      
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"]
+1235   991129292901.111380000  dd      \N      2120-01-02      
2020-01-01T12:36:38     652.692 5022-01-01      false   90696620686827832.374   
[119181.111100000]      
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"]
 
 -- !point_select --
 646464 6C616F6F71
@@ -108,34 +108,34 @@
 646464 6C616F6F71
 
 -- !point_select --
-1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
+1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
 
 -- !point_select --
-1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
+1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
 
 -- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
+1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
 
 -- !sql --
 1231   119291.110000000        ddd     laooq   \N      2020-01-01T12:36:38     
\N      1022-01-01      \N      1.111   [119181.111100000, 819019.119100000, 
null]      \N      0       0
@@ -149,78 +149,6 @@
 -- !sql --
 0      1       2       3
 
--- !point_select --
-1231   119291.110000000        ddd     laooq   \N      2020-01-01 12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
-
--- !point_select --
-1231   119291.110000000        ddd     laooq   \N      2020-01-01 12:36:38     
\N      1022-01-01      \N      1.111   \N      [119181.111100000, 
819019.119100000, null]
-
--- !point_select --
-1237   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   90696620686827832.374   
[1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000]       []
-
--- !point_select --
-1232   12222.991211350 xxx     laooq   2023-01-02      2020-01-01 12:36:38     
522.762 2022-01-01      true    212.111 \N      \N
-
--- !point_select --
-251    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
     laooq   2030-01-02      2020-01-01 12:36:38     251.0   7022-01-01      
true    90696620686827832.374   [11111.000000000]       []
-
--- !point_select --
-252    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
    laooq   2030-01-02      2020-01-01 12:36:38     252.0   7022-01-01      
false   90696620686827832.374   \N      [0.000000000]
-
--- !point_select --
-298    120939.111300000        
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
      laooq   2030-01-02      2020-01-01 12:36:38     298.0   7022-01-01      
true    90696620686827832.374   []      []
-
--- !point_select --
-1235   991129292901.111380000  dd      \N      2120-01-02      2020-01-01 
12:36:38     652.692 5022-01-01      false   90696620686827832.374   
[119181.111100000]      
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"]
-
--- !point_select --
-646464 6C616F6F71
-
--- !point_select --
-646464 6C616F6F71
-
--- !point_select --
-646464 6C616F6F71
-
--- !point_select --
-1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
-
--- !point_select --
-1235   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      true    1.111   
[119291.192910000]      ["111", "222", "333"]   1
-
--- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
-
--- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
-
--- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
-
--- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
-
--- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
-
--- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
-
--- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2
-
--- !point_select --
-1235   120939.111300000        a    ddd        xxxxxx  2030-01-02      
2020-01-01 12:36:38     22.822  7022-01-01      false   1929111.111     
[119291.192910000]      ["111", "222", "333"]   2       0
-
 -- !sql --
-1231   119291.110000000        ddd     laooq   \N      2020-01-01T12:36:38     
\N      1022-01-01      \N      1.111   [119181.111100000, 819019.119100000, 
null]      \N      0       0
-
--- !sql --
-1237   120939.111300000        a    ddd        laooq   2030-01-02      
2020-01-01T12:36:38     22.822  7022-01-01      false   90696620686827832.374   
[1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000]       []      
0       0
-
--- !sql --
-6120202020646464       6C616F6F71      32.92200050354004
-
--- !sql --
-0      1       2       3
+0      1111111
 
diff --git a/regression-test/data/variant_p0/desc.out 
b/regression-test/data/variant_p0/desc.out
index b46b5f9b4b0..b3ebce2b887 100644
--- a/regression-test/data/variant_p0/desc.out
+++ b/regression-test/data/variant_p0/desc.out
@@ -198,3 +198,11 @@ v.金额       SMALLINT        Yes     false   \N      NONE
 k      BIGINT  Yes     true    \N      
 v      VARIANT Yes     false   \N      NONE
 
+-- !sql15 --
+k      BIGINT  Yes     true    \N      
+v      VARIANT Yes     false   \N      NONE
+v.a    TINYINT Yes     false   \N      NONE
+v.b    TINYINT Yes     false   \N      NONE
+v.c    TINYINT Yes     false   \N      NONE
+v.d    TINYINT Yes     false   \N      NONE
+
diff --git a/regression-test/data/variant_p0/rqg/rqg1.out 
b/regression-test/data/variant_p0/rqg/rqg1.out
new file mode 100644
index 00000000000..7d5d493b12d
--- /dev/null
+++ b/regression-test/data/variant_p0/rqg/rqg1.out
@@ -0,0 +1,215 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !rqg1 --
+0
+
+-- !rqg1_2 --
+24
+
+-- !rqg1_3 --
+0
+
+-- !rqg1_4 --
+24
+
+-- !rqg1_5 --
+0
+
+-- !rqg1_6 --
+24
+
+-- !rqg1_7 --
+0
+
+-- !rqg1_8 --
+24
+
+-- !rqg1_9 --
+0
+
+-- !rqg1_10 --
+25
+
+-- !rqg1_11 --
+0
+
+-- !rqg1_12 --
+24
+
+-- !rqg1_13 --
+0
+
+-- !rqg1_14 --
+24
+
+-- !rqg1_15 --
+0
+
+-- !rqg1_16 --
+0
+
+-- !rqg1_17 --
+24
+
+-- !rqg1_18 --
+24
+
+-- !rqg1_19 --
+0
+
+-- !rqg1_20 --
+24
+
+-- !rqg1_21 --
+0
+
+-- !rqg1_22 --
+0
+
+-- !rqg1_23 --
+24
+
+-- !rqg1_24 --
+25
+
+-- !rqg1_25 --
+0
+
+-- !rqg1_26 --
+25
+
+-- !rqg1_27 --
+0
+
+-- !rqg1_28 --
+24
+
+-- !rqg1_29 --
+0
+
+-- !rqg1_30 --
+24
+
+-- !rqg1_31 --
+0
+
+-- !rqg1_32 --
+24
+
+-- !rqg1_33 --
+0
+
+-- !rqg1_34 --
+24
+
+-- !rqg1_35 --
+0
+
+-- !rqg1_36 --
+25
+
+-- !rqg1_37 --
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+
diff --git 
a/regression-test/suites/export_p0/test_outfile_csv_variant_type.groovy 
b/regression-test/suites/export_p0/test_outfile_csv_variant_type.groovy
new file mode 100644
index 00000000000..9ea95a287b7
--- /dev/null
+++ b/regression-test/suites/export_p0/test_outfile_csv_variant_type.groovy
@@ -0,0 +1,141 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+import java.nio.charset.StandardCharsets
+import java.nio.file.Files
+import java.nio.file.Paths
+
+suite("test_outfile_csv_variant_type", "p0") {
+    // open nereids
+    sql """ set enable_nereids_planner=true """
+    sql """ set enable_fallback_to_original_planner=false """
+
+    String ak = getS3AK()
+    String sk = getS3SK()
+    String s3_endpoint = getS3Endpoint()
+    String region = getS3Region()
+    String bucket = context.config.otherConfigs.get("s3BucketName");
+
+
+    def export_table_name = "outfile_csv_variant_export_test"
+    def load_table_name = "outfile_csv_variant_type_load_test"
+    def outFilePath = "${bucket}/outfile/csv/variant_type/exp_"
+
+
+    def create_table = {table_name, struct_field ->
+        sql """ DROP TABLE IF EXISTS ${table_name} """
+        sql """
+        CREATE TABLE IF NOT EXISTS ${table_name} (
+            `user_id` LARGEINT NOT NULL COMMENT "用户id",
+            `name` STRING COMMENT "用户年龄",
+            ${struct_field}
+            )
+            DISTRIBUTED BY HASH(user_id) PROPERTIES("replication_num" = "1");
+        """
+    }
+
+    def outfile_to_S3 = {
+        // select ... into outfile ...
+        def res = sql """
+            SELECT * FROM ${export_table_name} t ORDER BY user_id
+            INTO OUTFILE "s3://${outFilePath}"
+            FORMAT AS CSV
+            PROPERTIES (
+                "s3.endpoint" = "${s3_endpoint}",
+                "s3.region" = "${region}",
+                "s3.secret_key"="${sk}",
+                "s3.access_key" = "${ak}"
+            );
+        """
+
+        return res[0][3]
+    }
+
+
+    // 1. test NULL variant 
+    try {
+        def struct_field_define = "`a_info` VARIANT NULL"
+        // create table to export data
+        create_table(export_table_name, struct_field_define)
+        // create table to load data
+        create_table(load_table_name, struct_field_define)
+
+
+        // insert data
+        sql """ insert into ${export_table_name} values (1, 'doris1', '[9, 99, 
999]'), (2, 'doris2', '[8, 88]'); """
+        sql """ insert into ${export_table_name} values (3, 'doris3', '{"a" : 
123}'); """
+        sql """ insert into ${export_table_name} values (4, 'doris4', null); 
"""
+        sql """ insert into ${export_table_name} values (5, 'doris5', '[1, 
null, 2]'); """
+        sql """ insert into ${export_table_name} values (6, 'doris6', '{"aaaa" 
: "111111"}'); """
+        sql """ insert into ${export_table_name} values (7, 'doris7', '{"bbbb" 
: 1.1111}'); """
+        sql """ insert into ${export_table_name} values (8, 'doris8', '{"xxx" 
: [111.11]}'); """
+
+
+        // test base data
+        qt_select_base1 """ SELECT * FROM ${export_table_name} t ORDER BY 
user_id; """
+
+        def outfile_url = outfile_to_S3()
+
+        qt_select_load1 """ SELECT * FROM S3 (
+                "uri" = 
"http://${bucket}.${s3_endpoint}${outfile_url.substring(5 + bucket.length(), 
outfile_url.length() - 1)}0.csv",
+                "ACCESS_KEY"= "${ak}",
+                "SECRET_KEY" = "${sk}",
+                "format" = "csv",
+                "region" = "${region}"
+            );
+            """
+    } finally {
+    }
+
+
+    // 2. test NOT NULL VARIANT
+    try {
+        def struct_field_define = "`a_info` VARIANT NOT NULL"
+        // create table to export data
+        create_table(export_table_name, struct_field_define)
+        // create table to load data
+        create_table(load_table_name, struct_field_define)
+
+
+        // insert data
+        // insert data
+        sql """ insert into ${export_table_name} values (1, 'doris1', '[9, 99, 
999]'), (2, 'doris2', '[8, 88]'); """
+        sql """ insert into ${export_table_name} values (3, 'doris3', '{"a" : 
123}'); """
+        sql """ insert into ${export_table_name} values (4, 'doris4', '{}'); 
"""
+        sql """ insert into ${export_table_name} values (5, 'doris5', '[1, 
null, 2]'); """
+        sql """ insert into ${export_table_name} values (6, 'doris6', '{"aaaa" 
: "111111"}'); """
+        sql """ insert into ${export_table_name} values (7, 'doris7', '{"bbbb" 
: 1.1111}'); """
+        sql """ insert into ${export_table_name} values (8, 'doris8', '{"xxx" 
: [111.11]}'); """
+
+        // test base data
+        qt_select_base2 """ SELECT * FROM ${export_table_name} t ORDER BY 
user_id; """
+
+        def outfile_url = outfile_to_S3()
+
+        qt_select_load2 """ SELECT * FROM S3 (
+                "uri" = 
"http://${bucket}.${s3_endpoint}${outfile_url.substring(5 + bucket.length(), 
outfile_url.length() - 1)}0.csv",
+                "ACCESS_KEY"= "${ak}",
+                "SECRET_KEY" = "${sk}",
+                "format" = "csv",
+                "region" = "${region}"
+            );
+            """
+    } finally {
+    }
+}
diff --git a/regression-test/suites/point_query_p0/test_point_query.groovy 
b/regression-test/suites/point_query_p0/test_point_query.groovy
index 2fcbfd51434..0d4df448286 100644
--- a/regression-test/suites/point_query_p0/test_point_query.groovy
+++ b/regression-test/suites/point_query_p0/test_point_query.groovy
@@ -274,6 +274,30 @@ suite("test_point_query", "nonConcurrent") {
         """                
         sql "insert into test_ODS_EBA_LLREPORT(RPTNO) values('567890')"
         sql "select  /*+ SET_VAR(enable_nereids_planner=true) */  
substr(RPTNO,2,5) from test_ODS_EBA_LLREPORT where  RPTNO = '567890'"
+
+        sql "DROP TABLE IF EXISTS test_cc_aaaid2";
+        sql """
+        CREATE TABLE `test_cc_aaaid2` (
+          `aaaid` VARCHAR(13) NULL COMMENT '3aid'
+        ) ENGINE=OLAP
+        UNIQUE KEY(`aaaid`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`aaaid`) BUCKETS 1
+        PROPERTIES (
+        "replication_allocation" = "tag.location.default: 1",
+        "enable_unique_key_merge_on_write" = "true",
+        "store_row_column" = "true"
+        );
+        """                
+        sql """insert into `test_cc_aaaid2` values('1111111')"""
+        qt_sql """SELECT
+             `__DORIS_DELETE_SIGN__`,
+             aaaid
+
+            FROM
+             `test_cc_aaaid2` 
+            WHERE
+             aaaid = '1111111'"""
     } finally {
         set_be_config.call("disable_storage_row_cache", "true")
         sql """set global enable_nereids_planner=true"""
diff --git a/regression-test/suites/prepared_stmt_p0/prepared_stmt.groovy 
b/regression-test/suites/prepared_stmt_p0/prepared_stmt.groovy
index 0ed0a48e65c..18fd7978a54 100644
--- a/regression-test/suites/prepared_stmt_p0/prepared_stmt.groovy
+++ b/regression-test/suites/prepared_stmt_p0/prepared_stmt.groovy
@@ -201,9 +201,9 @@ suite("test_prepared_stmt", "nonConcurrent") {
         sql "set global max_prepared_stmt_count = 1"
         stmt_read = prepareStatement "SELECT 1" 
         qe_select13 stmt_read
-        assertEquals(stmt_read.class, 
com.mysql.cj.jdbc.ClientPreparedStatement);
+        // assertEquals(stmt_read.class, 
com.mysql.cj.jdbc.ClientPreparedStatement);
         stmt_read = prepareStatement "SELECT 1" 
-        assertEquals(stmt_read.class, 
com.mysql.cj.jdbc.ClientPreparedStatement);
+        // assertEquals(stmt_read.class, 
com.mysql.cj.jdbc.ClientPreparedStatement);
         // set back
         sql "set global max_prepared_stmt_count = 1000000"
 
diff --git a/regression-test/suites/variant_p0/delete_update.groovy 
b/regression-test/suites/variant_p0/delete_update.groovy
index 2b126b4c3a6..ae09fbc8878 100644
--- a/regression-test/suites/variant_p0/delete_update.groovy
+++ b/regression-test/suites/variant_p0/delete_update.groovy
@@ -166,7 +166,5 @@ suite("regression_test_variant_delete_and_update", 
"variant_type"){
 
     sql "sync"
 
-    if (!isCloudMode()) {
-        qt_sql """ select * from ${tableName} order by id;"""
-    }
+    qt_sql """ select * from ${tableName} order by id;"""
 }
\ No newline at end of file
diff --git a/regression-test/suites/variant_p0/desc.groovy 
b/regression-test/suites/variant_p0/desc.groovy
index f600496eae5..dfb5b40794e 100644
--- a/regression-test/suites/variant_p0/desc.groovy
+++ b/regression-test/suites/variant_p0/desc.groovy
@@ -238,6 +238,26 @@ suite("regression_test_variant_desc", "nonConcurrent"){
         sql """ insert into ${table_name} values (0, '100')"""
         sql """set describe_extend_variant_column = true"""
         qt_sql_12 """desc ${table_name}"""
+
+
+        // desc with large tablets
+        table_name = "large_tablets"
+        create_table_partition.call(table_name, "200") 
+        sql """insert into large_tablets values (1, '{"a" : 10}')"""
+        sql """insert into large_tablets values (3001, '{"b" : 10}')"""
+        sql """insert into large_tablets values (50001, '{"c" : 10}')"""
+        sql """insert into large_tablets values (99999, '{"d" : 10}')"""
+        sql """set max_fetch_remote_schema_tablet_count = 2"""
+        sql "desc large_tablets"
+        sql """set max_fetch_remote_schema_tablet_count = 128"""
+        sql "desc large_tablets"
+        sql """set max_fetch_remote_schema_tablet_count = 512"""
+        sql "desc large_tablets"
+        sql """set max_fetch_remote_schema_tablet_count = 2048"""
+        qt_sql15 "desc large_tablets"
+
+        sql "truncate table large_tablets"
+        sql "desc large_tablets"
     } finally {
         // reset flags
         set_be_config.call("variant_ratio_of_defaults_as_sparse_column", 
"0.95")
diff --git a/regression-test/suites/variant_p0/mtmv.groovy 
b/regression-test/suites/variant_p0/mtmv.groovy
new file mode 100644
index 00000000000..e411df243ad
--- /dev/null
+++ b/regression-test/suites/variant_p0/mtmv.groovy
@@ -0,0 +1,95 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("regression_test_variant_mtmv"){
+    sql "SET enable_nereids_planner=true"
+    sql "SET enable_fallback_to_original_planner=false"
+    sql "SET enable_materialized_view_rewrite=true"
+    sql "SET enable_materialized_view_nest_rewrite = true"
+    def load_json_data = {table_name, file_name ->
+        // load the json data
+        streamLoad {
+            table "${table_name}"
+
+            // set http request header params
+            set 'read_json_by_line', 'true' 
+            set 'format', 'json' 
+            set 'max_filter_ratio', '0.1'
+            set 'jsonpaths', '[\"$.v.id\", \"$.v.type\", \"$.v.actor\", 
\"$.v.repo\", \"$.v.payload\", \"$.v.public\", \"$.v.created_at\"]'
+            file file_name // import json file
+            time 10000 // limit inflight 10s
+
+            // if declared a check callback, the default check condition will 
ignore.
+            // So you must check all condition
+
+            check { result, exception, startTime, endTime ->
+                if (exception != null) {
+                        throw exception
+                }
+                logger.info("Stream load ${file_name} result: 
${result}".toString())
+                def json = parseJson(result)
+                assertEquals("success", json.Status.toLowerCase())
+                // assertEquals(json.NumberTotalRows, json.NumberLoadedRows + 
json.NumberUnselectedRows)
+                assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
+            }
+        }
+    }
+
+    def table_name = "github_events_mtmv"
+    sql """DROP TABLE IF EXISTS ${table_name}"""
+    sql """
+        CREATE TABLE `${table_name}` (
+          `id` BIGINT NOT NULL,
+          `type` VARCHAR(30) NULL,
+          `actor` VARIANT NULL,
+          `repo` VARIANT NULL,
+          `payload` VARIANT NULL,
+          `public` BOOLEAN NULL,
+          `created_at` DATETIME NULL,
+          INDEX idx_payload (`payload`) USING INVERTED PROPERTIES("parser" = 
"english", "lower_case" = "true") COMMENT 'inverted index for payload'
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+        "replication_allocation" = "tag.location.default: 1"
+        );  
+    """
+    load_json_data.call(table_name, """${getS3Url() + 
'/regression/gharchive.m/2015-01-01-3.json'}""")
+    load_json_data.call(table_name, """${getS3Url() + 
'/regression/gharchive.m/2022-11-07-16.json'}""")
+    sql """DROP MATERIALIZED VIEW IF EXISTS mv1"""
+    sql """
+        CREATE MATERIALIZED VIEW mv1
+        BUILD IMMEDIATE REFRESH AUTO ON MANUAL DISTRIBUTED BY RANDOM BUCKETS 2
+        PROPERTIES ('replication_num' = '1') 
+            as SELECT id, type, actor['id'], actor['display_login'], actor, 
payload['ref'] FROM github_events_mtmv limit 1024;
+    """ 
+    String db = context.config.getDbNameByFile(context.file)
+    def job_name_1 = getJobName(db, "mv1") 
+    waitingMTMVTaskFinished(job_name_1)
+
+    sql """DROP MATERIALIZED VIEW IF EXISTS mv2"""
+    sql """
+        CREATE MATERIALIZED VIEW mv2
+        BUILD IMMEDIATE REFRESH AUTO ON MANUAL DISTRIBUTED BY RANDOM BUCKETS 2
+        PROPERTIES ('replication_num' = '1') 
+            as SELECT id, cast(actor['id'] as bigint), payload  FROM 
github_events_mtmv limit 1024;
+    """ 
+    def job_name_2 = getJobName(db, "mv2") 
+    waitingMTMVTaskFinished(job_name_2)
+}
\ No newline at end of file
diff --git a/regression-test/suites/variant_p0/rqg/load.groovy 
b/regression-test/suites/variant_p0/rqg/load.groovy
new file mode 100644
index 00000000000..a524d3525b5
--- /dev/null
+++ b/regression-test/suites/variant_p0/rqg/load.groovy
@@ -0,0 +1,23 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("regression_test_variant_rqg_p0", "p0"){
+    sql """DROP DATABASE regression_test_variant_p0_rqg""" 
+    sql """CREATE DATABASE regression_test_variant_p0_rqg""" 
+}
\ No newline at end of file
diff --git a/regression-test/suites/variant_p0/rqg/rqg1.sql 
b/regression-test/suites/variant_p0/rqg/rqg1.sql
new file mode 100644
index 00000000000..0113eae7dad
--- /dev/null
+++ b/regression-test/suites/variant_p0/rqg/rqg1.sql
@@ -0,0 +1,37 @@
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by5 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by5(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": 
\"\"}'),('1','{\"col_int_undef_signed\": 8, \"col_varchar_10__undef_signed\": 
\"\"}'),('2','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": null}'),('3','{\"col_int_undef_signed\": 
null, \"col_varchar_10__undef_signed\": 
\"with\"}'),('4','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"\"}' [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by5 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by5(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": \"o\", 
\"col_varchar_1024__undef_signed\": \"c\"}'),('1','{\"col_int_undef_signed\": 
null, \"col_varchar_10__undef_signed\": \"m\", 
\"col_varchar_1024__undef_signed\": \"j\"}'),('2','{\"col_int_undef_signed\": 
null, \"col_varchar_10__undef_signed\": \"r\", 
\"col_varchar_1024__undef_signed\": \"had\"}'),('3','{\"col_int_undef_sign [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by52 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by52(pk,var) VALUES 
('0','{\"col_int_undef_signed\": 4, \"col_varchar_10__undef_signed\": \"well\", 
\"col_varchar_1024__undef_signed\": 
\"well\"}'),('1','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"b\", \"col_varchar_1024__undef_signed\": 
\"how\"}'),('2','{\"col_int_undef_signed\": 9, 
\"col_varchar_10__undef_signed\": \"then\", \"col_varchar_1024__undef_signed\": 
\"n\"}'),('3','{\"col_int_undef_ [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by53 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by53(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": \"v\", 
\"col_varchar_1024__undef_signed\": \"l\"}'),('1','{\"col_int_undef_signed\": 
1, \"col_varchar_10__undef_signed\": \"up\", 
\"col_varchar_1024__undef_signed\": 
\"there\"}'),('2','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"on\", \"col_varchar_1024__undef_signed\": 
\"o\"}'),('3','{\"col_int_undef_si [...]
+CREATE TABLE IF NOT EXISTS 
table_25_undef_partitions2_keys3_properties4_distributed_by52 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_25_undef_partitions2_keys3_properties4_distributed_by52(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": 
\"she\", \"col_varchar_1024__undef_signed\": 
\"his\"}'),('1','{\"col_int_undef_signed\": 7, 
\"col_varchar_10__undef_signed\": \"c\", \"col_varchar_1024__undef_signed\": 
\"i\"}'),('2','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"were\", \"col_varchar_1024__undef_signed\": 
\"z\"}'),('3','{\"col_int_undef_s [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by5 ( 
`col_int_undef_signed` int, `col_varchar_10__undef_signed` varchar(10), 
`col_varchar_1024__undef_signed` varchar(1024), `pk` int, var VARIANT NULL ) 
engine=olap DUPLICATE KEY(`col_int_undef_signed`) distributed by 
hash(`col_int_undef_signed`) buckets 10 properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by5(pk,var) VALUES 
('0','{\"col_int_undef_signed\": 9, \"col_varchar_10__undef_signed\": \"c\", 
\"col_varchar_1024__undef_signed\": \"one\"}'),('1','{\"col_int_undef_signed\": 
2, \"col_varchar_10__undef_signed\": \"o\", \"col_varchar_1024__undef_signed\": 
\"up\"}'),('2','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"c\", \"col_varchar_1024__undef_signed\": 
\"m\"}'),('3','{\"col_int_undef_signed\": [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by52 ( `pk` int, 
`col_int_undef_signed` int, `col_varchar_10__undef_signed` varchar(10), 
`col_varchar_1024__undef_signed` varchar(1024), var VARIANT NULL ) engine=olap 
DUPLICATE KEY(`pk`) distributed by hash(`pk`) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by52(pk,var) VALUES 
('0','{\"col_int_undef_signed\": 7, \"col_varchar_10__undef_signed\": \"say\", 
\"col_varchar_1024__undef_signed\": \"k\"}'),('1','{\"col_int_undef_signed\": 
null, \"col_varchar_10__undef_signed\": \"v\", 
\"col_varchar_1024__undef_signed\": \"or\"}'),('2','{\"col_int_undef_signed\": 
8, \"col_varchar_10__undef_signed\": \"v\", \"col_varchar_1024__undef_signed\": 
\"m\"}'),('3','{\"col_int_undef_signed\" [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by53 ( `pk` int, 
`col_varchar_10__undef_signed` varchar(10), `col_int_undef_signed` int, 
`col_varchar_1024__undef_signed` varchar(1024), var VARIANT NULL ) engine=olap 
DUPLICATE KEY(`pk`) distributed by hash(`pk`) buckets 10 
properties("replication_num" = "1");
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by5 ( 
`col_int_undef_signed` int, `col_varchar_10__undef_signed` varchar(10), 
`col_varchar_1024__undef_signed` varchar(1024), `pk` int, var VARIANT NULL ) 
engine=olap DUPLICATE KEY(`col_int_undef_signed`) distributed by 
hash(`col_int_undef_signed`) buckets 10 properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by5(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": \"g\", 
\"col_varchar_1024__undef_signed\": \"in\"}'),('1','{\"col_int_undef_signed\": 
2, \"col_varchar_10__undef_signed\": \"not\", 
\"col_varchar_1024__undef_signed\": \"a\"}'),('2','{\"col_int_undef_signed\": 
0, \"col_varchar_10__undef_signed\": \"q\", \"col_varchar_1024__undef_signed\": 
\"z\"}'),('3','{\"col_int_undef_signed\": [...]
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by53(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": \"k\", 
\"col_varchar_1024__undef_signed\": \"q\"}'),('1','{\"col_int_undef_signed\": 
1, \"col_varchar_10__undef_signed\": \"j\", \"col_varchar_1024__undef_signed\": 
\"this\"}'),('2','{\"col_int_undef_signed\": 1, 
\"col_varchar_10__undef_signed\": \"c\", \"col_varchar_1024__undef_signed\": 
\"from\"}'),('3','{\"col_int_undef_signe [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by52 ( `pk` int, 
`col_int_undef_signed` int, `col_varchar_10__undef_signed` varchar(10), 
`col_varchar_1024__undef_signed` varchar(1024), var VARIANT NULL ) engine=olap 
DUPLICATE KEY(`pk`) distributed by hash(`pk`) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by52(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": \"c\", 
\"col_varchar_1024__undef_signed\": \"o\"}'),('1','{\"col_int_undef_signed\": 
5, \"col_varchar_10__undef_signed\": \"you\", 
\"col_varchar_1024__undef_signed\": 
\"when\"}'),('2','{\"col_int_undef_signed\": 1, 
\"col_varchar_10__undef_signed\": \"q\", \"col_varchar_1024__undef_signed\": 
\"r\"}'),('3','{\"col_int_undef_signed [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by53 ( `pk` int, 
`col_varchar_10__undef_signed` varchar(10), `col_int_undef_signed` int, 
`col_varchar_1024__undef_signed` varchar(1024), var VARIANT NULL ) engine=olap 
DUPLICATE KEY(`pk`) distributed by hash(`pk`) buckets 10 
properties("replication_num" = "1");
+CREATE TABLE IF NOT EXISTS 
table_25_undef_partitions2_keys3_properties4_distributed_by52 ( `pk` int, 
`col_int_undef_signed` int, `col_varchar_10__undef_signed` varchar(10), 
`col_varchar_1024__undef_signed` varchar(1024), var VARIANT NULL ) engine=olap 
DUPLICATE KEY(`pk`) distributed by hash(`pk`) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by53(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": 
\"he\", \"col_varchar_1024__undef_signed\": 
\"good\"}'),('1','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"was\", \"col_varchar_1024__undef_signed\": 
\"how\"}'),('2','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"t\", \"col_varchar_1024__undef_signed\": 
\"p\"}'),('3','{\"col_int_und [...]
+INSERT INTO 
table_25_undef_partitions2_keys3_properties4_distributed_by52(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": 
\"look\", \"col_varchar_1024__undef_signed\": 
\"x\"}'),('1','{\"col_int_undef_signed\": 0, \"col_varchar_10__undef_signed\": 
\"k\", \"col_varchar_1024__undef_signed\": 
\"r\"}'),('2','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"e\", \"col_varchar_1024__undef_signed\": 
\"b\"}'),('3','{\"col_int_undef_signe [...]
+CREATE TABLE IF NOT EXISTS 
table_25_undef_partitions2_keys3_properties4_distributed_by52 ( `pk` int, 
`col_int_undef_signed` int, `col_varchar_10__undef_signed` varchar(10), 
`col_varchar_1024__undef_signed` varchar(1024), var VARIANT NULL ) engine=olap 
DUPLICATE KEY(`pk`) distributed by hash(`pk`) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_25_undef_partitions2_keys3_properties4_distributed_by52(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_varchar_10__undef_signed\": 
\"some\", \"col_varchar_1024__undef_signed\": 
\"some\"}'),('1','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"x\", \"col_varchar_1024__undef_signed\": 
\"so\"}'),('2','{\"col_int_undef_signed\": null, 
\"col_varchar_10__undef_signed\": \"say\", \"col_varchar_1024__undef_signed\": 
\"f\"}'),('3','{\"col_int_un [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by5 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by5(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_int_undef_signed_not_null\": 7, 
\"col_date_undef_signed\": \"2023-12-12\", \"col_date_undef_signed_not_null\": 
\"2023-12-19\", \"col_varchar_10__undef_signed\": \"i\", 
\"col_varchar_10__undef_signed_not_null\": \"been\", 
\"col_varchar_1024__undef_signed\": \"s\", 
\"col_varchar_1024__undef_signed_not_null\": 
\"i\"}'),('1','{\"col_int_undef_signed\": 5, \"col [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by52 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by52(pk,var) VALUES 
('0','{\"col_int_undef_signed\": 1, \"col_int_undef_signed_not_null\": 8, 
\"col_date_undef_signed\": \"2023-12-15\", \"col_date_undef_signed_not_null\": 
\"2023-12-19\", \"col_varchar_10__undef_signed\": \"he\", 
\"col_varchar_10__undef_signed_not_null\": \"it\", 
\"col_varchar_1024__undef_signed\": \"can\", 
\"col_varchar_1024__undef_signed_not_null\": 
\"r\"}'),('1','{\"col_int_undef_signed\": null, \"c [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by53 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by53(pk,var) VALUES 
('0','{\"col_int_undef_signed\": null, \"col_int_undef_signed_not_null\": 0, 
\"col_date_undef_signed\": \"2023-12-20\", \"col_date_undef_signed_not_null\": 
\"2023-12-19\", \"col_varchar_10__undef_signed\": \"or\", 
\"col_varchar_10__undef_signed_not_null\": \"I\'\'m\", 
\"col_varchar_1024__undef_signed\": \"o\", 
\"col_varchar_1024__undef_signed_not_null\": 
\"b\"}'),('1','{\"col_int_undef_signed\": 0, \ [...]
+CREATE TABLE IF NOT EXISTS 
table_24_undef_partitions2_keys3_properties4_distributed_by54 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_24_undef_partitions2_keys3_properties4_distributed_by54(pk,var) VALUES 
('0','{\"col_int_undef_signed\": 2, \"col_int_undef_signed_not_null\": 0, 
\"col_date_undef_signed\": \"2023-12-09\", \"col_date_undef_signed_not_null\": 
\"2023-12-20\", \"col_varchar_10__undef_signed\": \"we\", 
\"col_varchar_10__undef_signed_not_null\": \"k\", 
\"col_varchar_1024__undef_signed\": \"m\", 
\"col_varchar_1024__undef_signed_not_null\": 
\"your\"}'),('1','{\"col_int_undef_signed\": null, \"c [...]
+CREATE TABLE IF NOT EXISTS 
table_25_undef_partitions2_keys3_properties4_distributed_by52 ( pk int, var 
VARIANT NULL ) engine=olap DUPLICATE KEY(pk) distributed by hash(pk) buckets 10 
properties("replication_num" = "1");
+INSERT INTO 
table_25_undef_partitions2_keys3_properties4_distributed_by52(pk,var) VALUES 
('0','{\"col_int_undef_signed\": 9, \"col_int_undef_signed_not_null\": 7, 
\"col_date_undef_signed\": \"2023-12-11\", \"col_date_undef_signed_not_null\": 
\"2023-12-09\", \"col_varchar_10__undef_signed\": \"x\", 
\"col_varchar_10__undef_signed_not_null\": \"and\", 
\"col_varchar_1024__undef_signed\": \"see\", 
\"col_varchar_1024__undef_signed_not_null\": 
\"h\"}'),('1','{\"col_int_undef_signed\": 5, \"col_ [...]
+SELECT  CAST(alias2 . var['col_int_undef_signed'] AS int)  AS field1 FROM 
regression_test_variant_p0_rqg.table_25_undef_partitions2_keys3_properties4_distributed_by52
 AS alias1 LEFT OUTER JOIN 
regression_test_variant_p0_rqg.table_24_undef_partitions2_keys3_properties4_distributed_by5
 AS alias2 ON (alias2 . `pk` = alias1 . `pk` )   HAVING field1 < 4 ORDER BY 
field1 ;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org


Reply via email to