This is an automated email from the ASF dual-hosted git repository.

lihaopeng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 95d7d88c34c [chore](thrift) remove some useless thrift definitions 
(#46492)
95d7d88c34c is described below

commit 95d7d88c34c250fba8667695296f0421ad7e5e48
Author: yiguolei <guo...@selectdb.com>
AuthorDate: Tue Jan 7 19:03:49 2025 +0800

    [chore](thrift) remove some useless thrift definitions (#46492)
---
 be/src/common/config.h                             |   2 +-
 be/src/exec/schema_scanner/schema_helper.cpp       |   2 -
 be/src/exec/schema_scanner/schema_helper.h         |   2 -
 be/src/pipeline/exec/schema_scan_operator.cpp      |   8 -
 be/src/service/backend_service.cpp                 |  41 ---
 be/src/service/backend_service.h                   |   2 -
 be/src/service/internal_service.cpp                |  15 -
 be/src/service/internal_service.h                  |  15 -
 be/src/util/debug_util.h                           |   1 -
 .../main/java/org/apache/doris/qe/Coordinator.java |   1 -
 .../apache/doris/service/FrontendServiceImpl.java  |  70 ----
 .../org/apache/doris/common/GenericPoolTest.java   |   7 -
 .../apache/doris/utframe/MockedBackendFactory.java |  14 -
 gensrc/proto/internal_service.proto                |   2 -
 gensrc/thrift/BackendService.thrift                |   5 -
 gensrc/thrift/Data.thrift                          |  26 --
 gensrc/thrift/Ddl.thrift                           | 365 ---------------------
 gensrc/thrift/FrontendService.thrift               | 218 ------------
 gensrc/thrift/PaloInternalService.thrift           |  69 ----
 gensrc/thrift/PaloService.thrift                   | 100 ------
 gensrc/thrift/PlanNodes.thrift                     |  15 -
 gensrc/thrift/Types.thrift                         |   7 -
 22 files changed, 1 insertion(+), 986 deletions(-)

diff --git a/be/src/common/config.h b/be/src/common/config.h
index ddca52c607b..6c84d89faba 100644
--- a/be/src/common/config.h
+++ b/be/src/common/config.h
@@ -586,7 +586,7 @@ DECLARE_mInt64(load_error_log_limit_bytes);
 
 // be brpc interface is classified into two categories: light and heavy
 // each category has diffrent thread number
-// threads to handle heavy api interface, such as transmit_data/transmit_block 
etc
+// threads to handle heavy api interface, such as transmit_block etc
 DECLARE_Int32(brpc_heavy_work_pool_threads);
 // threads to handle light api interface, such as 
exec_plan_fragment_prepare/exec_plan_fragment_start
 DECLARE_Int32(brpc_light_work_pool_threads);
diff --git a/be/src/exec/schema_scanner/schema_helper.cpp 
b/be/src/exec/schema_scanner/schema_helper.cpp
index 2819dc603f7..7cf95187b02 100644
--- a/be/src/exec/schema_scanner/schema_helper.cpp
+++ b/be/src/exec/schema_scanner/schema_helper.cpp
@@ -23,8 +23,6 @@
 #include "util/thrift_rpc_helper.h"
 
 namespace doris {
-class TDescribeTableParams;
-class TDescribeTableResult;
 class TDescribeTablesParams;
 class TDescribeTablesResult;
 class TGetDbsParams;
diff --git a/be/src/exec/schema_scanner/schema_helper.h 
b/be/src/exec/schema_scanner/schema_helper.h
index 752e282bb52..bc794093128 100644
--- a/be/src/exec/schema_scanner/schema_helper.h
+++ b/be/src/exec/schema_scanner/schema_helper.h
@@ -24,8 +24,6 @@
 #include "common/status.h"
 
 namespace doris {
-class TDescribeTableParams;
-class TDescribeTableResult;
 class TDescribeTablesParams;
 class TDescribeTablesResult;
 class TGetDbsParams;
diff --git a/be/src/pipeline/exec/schema_scan_operator.cpp 
b/be/src/pipeline/exec/schema_scan_operator.cpp
index 2e2f80f5e24..ea6f122df95 100644
--- a/be/src/pipeline/exec/schema_scan_operator.cpp
+++ b/be/src/pipeline/exec/schema_scan_operator.cpp
@@ -192,14 +192,6 @@ Status SchemaScanOperatorX::open(RuntimeState* state) {
 
     _tuple_idx = 0;
 
-    if (_common_scanner_param->user) {
-        TSetSessionParams param;
-        param.__set_user(*_common_scanner_param->user);
-        //TStatus t_status;
-        //RETURN_IF_ERROR(SchemaJniHelper::set_session(param, &t_status));
-        //RETURN_IF_ERROR(Status(t_status));
-    }
-
     return Status::OK();
 }
 
diff --git a/be/src/service/backend_service.cpp 
b/be/src/service/backend_service.cpp
index 55e18b4deb8..29504fef187 100644
--- a/be/src/service/backend_service.cpp
+++ b/be/src/service/backend_service.cpp
@@ -654,47 +654,6 @@ Status BaseBackendService::start_plan_fragment_execution(
                                                          
QuerySource::INTERNAL_FRONTEND);
 }
 
-void BaseBackendService::transmit_data(TTransmitDataResult& return_val,
-                                       const TTransmitDataParams& params) {
-    VLOG_ROW << "transmit_data(): instance_id=" << 
params.dest_fragment_instance_id
-             << " node_id=" << params.dest_node_id << " #rows=" << 
params.row_batch.num_rows
-             << " eos=" << (params.eos ? "true" : "false");
-    // VLOG_ROW << "transmit_data params: " << 
apache::thrift::ThriftDebugString(params).c_str();
-
-    if (params.__isset.packet_seq) {
-        return_val.__set_packet_seq(params.packet_seq);
-        
return_val.__set_dest_fragment_instance_id(params.dest_fragment_instance_id);
-        return_val.__set_dest_node_id(params.dest_node_id);
-    }
-
-    // TODO: fix Thrift so we can simply take ownership of thrift_batch instead
-    // of having to copy its data
-    if (params.row_batch.num_rows > 0) {
-        // Status status = _exec_env->stream_mgr()->add_data(
-        //         params.dest_fragment_instance_id,
-        //         params.dest_node_id,
-        //         params.row_batch,
-        //         params.sender_id);
-        // status.set_t_status(&return_val);
-
-        // if (!status.ok()) {
-        //     // should we close the channel here as well?
-        //     return;
-        // }
-    }
-
-    if (params.eos) {
-        // Status status = _exec_env->stream_mgr()->close_sender(
-        //        params.dest_fragment_instance_id,
-        //        params.dest_node_id,
-        //        params.sender_id,
-        //        params.be_number);
-        //VLOG_ROW << "params.eos: " << (params.eos ? "true" : "false")
-        //        << " close_sender status: " << status;
-        //status.set_t_status(&return_val);
-    }
-}
-
 void BaseBackendService::submit_export_task(TStatus& t_status, const 
TExportTaskRequest& request) {
     //    VLOG_ROW << "submit_export_task. request  is "
     //            << apache::thrift::ThriftDebugString(request).c_str();
diff --git a/be/src/service/backend_service.h b/be/src/service/backend_service.h
index 1d4219e2191..d9871643597 100644
--- a/be/src/service/backend_service.h
+++ b/be/src/service/backend_service.h
@@ -92,8 +92,6 @@ public:
     void cancel_plan_fragment(TCancelPlanFragmentResult& return_val,
                               const TCancelPlanFragmentParams& params) 
override {};
 
-    void transmit_data(TTransmitDataResult& return_val, const 
TTransmitDataParams& params) override;
-
     void submit_export_task(TStatus& t_status, const TExportTaskRequest& 
request) override;
 
     void get_export_status(TExportStatusResult& result, const TUniqueId& 
task_id) override;
diff --git a/be/src/service/internal_service.cpp 
b/be/src/service/internal_service.cpp
index fb0b2f090bc..3186d4914bc 100644
--- a/be/src/service/internal_service.cpp
+++ b/be/src/service/internal_service.cpp
@@ -272,21 +272,6 @@ PInternalService::~PInternalService() {
     CHECK_EQ(0, bthread_key_delete(AsyncIO::btls_io_ctx_key));
 }
 
-void PInternalService::transmit_data(google::protobuf::RpcController* 
controller,
-                                     const PTransmitDataParams* request,
-                                     PTransmitDataResult* response,
-                                     google::protobuf::Closure* done) {}
-
-void PInternalService::transmit_data_by_http(google::protobuf::RpcController* 
controller,
-                                             const PEmptyRequest* request,
-                                             PTransmitDataResult* response,
-                                             google::protobuf::Closure* done) 
{}
-
-void PInternalService::_transmit_data(google::protobuf::RpcController* 
controller,
-                                      const PTransmitDataParams* request,
-                                      PTransmitDataResult* response,
-                                      google::protobuf::Closure* done, const 
Status& extract_st) {}
-
 void PInternalService::tablet_writer_open(google::protobuf::RpcController* 
controller,
                                           const PTabletWriterOpenRequest* 
request,
                                           PTabletWriterOpenResult* response,
diff --git a/be/src/service/internal_service.h 
b/be/src/service/internal_service.h
index 66a0f867393..e3d03a6a449 100644
--- a/be/src/service/internal_service.h
+++ b/be/src/service/internal_service.h
@@ -64,16 +64,6 @@ public:
     PInternalService(ExecEnv* exec_env);
     ~PInternalService() override;
 
-    void transmit_data(::google::protobuf::RpcController* controller,
-                       const ::doris::PTransmitDataParams* request,
-                       ::doris::PTransmitDataResult* response,
-                       ::google::protobuf::Closure* done) override;
-
-    void transmit_data_by_http(::google::protobuf::RpcController* controller,
-                               const ::doris::PEmptyRequest* request,
-                               ::doris::PTransmitDataResult* response,
-                               ::google::protobuf::Closure* done) override;
-
     void exec_plan_fragment(google::protobuf::RpcController* controller,
                             const PExecPlanFragmentRequest* request,
                             PExecPlanFragmentResult* result,
@@ -253,11 +243,6 @@ private:
 
     Status _fold_constant_expr(const std::string& ser_request, 
PConstantExprResult* response);
 
-    void _transmit_data(::google::protobuf::RpcController* controller,
-                        const ::doris::PTransmitDataParams* request,
-                        ::doris::PTransmitDataResult* response, 
::google::protobuf::Closure* done,
-                        const Status& extract_st);
-
     void _transmit_block(::google::protobuf::RpcController* controller,
                          const ::doris::PTransmitDataParams* request,
                          ::doris::PTransmitDataResult* response, 
::google::protobuf::Closure* done,
diff --git a/be/src/util/debug_util.h b/be/src/util/debug_util.h
index 31cc1f8f5ca..19485ca07d2 100644
--- a/be/src/util/debug_util.h
+++ b/be/src/util/debug_util.h
@@ -27,7 +27,6 @@
 namespace doris {
 
 std::string print_plan_node_type(const TPlanNodeType::type& type);
-std::string print_tstmt_type(const TStmtType::type& type);
 std::string print_query_state(const QueryState::type& type);
 std::string PrintTUnit(const TUnit::type& type);
 std::string PrintTMetricKind(const TMetricKind::type& type);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
index 472f2462e4d..3bf5c44d564 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
@@ -188,7 +188,6 @@ public class Coordinator implements CoordInterface {
 
     protected ImmutableMap<Long, Backend> idToBackend = ImmutableMap.of();
 
-    // copied from TQueryExecRequest; constant across all fragments
     private final TDescriptorTable descTable;
     private FragmentIdMapping<DistributedPlan> distributedPlans;
 
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java 
b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
index f4f10bf331d..8f2c37602e9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
@@ -140,8 +140,6 @@ import 
org.apache.doris.thrift.TConfirmUnusedRemoteFilesRequest;
 import org.apache.doris.thrift.TConfirmUnusedRemoteFilesResult;
 import org.apache.doris.thrift.TCreatePartitionRequest;
 import org.apache.doris.thrift.TCreatePartitionResult;
-import org.apache.doris.thrift.TDescribeTableParams;
-import org.apache.doris.thrift.TDescribeTableResult;
 import org.apache.doris.thrift.TDescribeTablesParams;
 import org.apache.doris.thrift.TDescribeTablesResult;
 import org.apache.doris.thrift.TDropPlsqlPackageRequest;
@@ -824,74 +822,6 @@ public class FrontendServiceImpl implements 
FrontendService.Iface {
         return result;
     }
 
-    @Override
-    public TDescribeTableResult describeTable(TDescribeTableParams params) 
throws TException {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("get desc table request: {}", params);
-        }
-        TDescribeTableResult result = new TDescribeTableResult();
-        List<TColumnDef> columns = Lists.newArrayList();
-        result.setColumns(columns);
-
-        // database privs should be checked in analysis phrase
-        UserIdentity currentUser = null;
-        if (params.isSetCurrentUserIdent()) {
-            currentUser = UserIdentity.fromThrift(params.current_user_ident);
-        } else {
-            currentUser = 
UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip);
-        }
-        String dbName = getDbNameFromMysqlTableSchema(params.catalog, 
params.db);
-        if (!Env.getCurrentEnv().getAccessManager()
-                .checkTblPriv(currentUser, params.catalog, dbName, 
params.getTableName(), PrivPredicate.SHOW)) {
-            return result;
-        }
-
-        String catalogName = Strings.isNullOrEmpty(params.catalog) ? 
InternalCatalog.INTERNAL_CATALOG_NAME
-                : params.catalog;
-        DatabaseIf<TableIf> db = Env.getCurrentEnv().getCatalogMgr()
-                .getCatalogOrException(catalogName, catalog -> new 
TException("Unknown catalog " + catalog))
-                .getDbNullable(dbName);
-        if (db != null) {
-            TableIf table = 
db.getTableNullableIfException(params.getTableName());
-            if (table != null) {
-                table.readLock();
-                try {
-                    List<Column> baseSchema = table.getBaseSchemaOrEmpty();
-                    for (Column column : baseSchema) {
-                        final TColumnDesc desc = new 
TColumnDesc(column.getName(), column.getDataType().toThrift());
-                        final Integer precision = 
column.getOriginType().getPrecision();
-                        if (precision != null) {
-                            desc.setColumnPrecision(precision);
-                        }
-                        final Integer columnLength = 
column.getOriginType().getColumnSize();
-                        if (columnLength != null) {
-                            desc.setColumnLength(columnLength);
-                        }
-                        final Integer decimalDigits = 
column.getOriginType().getDecimalDigits();
-                        if (decimalDigits != null) {
-                            desc.setColumnScale(decimalDigits);
-                        }
-                        desc.setIsAllowNull(column.isAllowNull());
-                        final TColumnDef colDef = new TColumnDef(desc);
-                        final String comment = column.getComment();
-                        if (comment != null) {
-                            colDef.setComment(comment);
-                        }
-                        if (column.isKey()) {
-                            if (table instanceof OlapTable) {
-                                desc.setColumnKey(((OlapTable) 
table).getKeysType().toMetadata());
-                            }
-                        }
-                        columns.add(colDef);
-                    }
-                } finally {
-                    table.readUnlock();
-                }
-            }
-        }
-        return result;
-    }
-
     @Override
     public TDescribeTablesResult describeTables(TDescribeTablesParams params) 
throws TException {
         if (LOG.isDebugEnabled()) {
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java
index 31fffe6a332..2885dc12caf 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java
@@ -55,8 +55,6 @@ import org.apache.doris.thrift.TStreamLoadRecordResult;
 import org.apache.doris.thrift.TSyncLoadForTabletsRequest;
 import org.apache.doris.thrift.TSyncLoadForTabletsResponse;
 import org.apache.doris.thrift.TTabletStatResult;
-import org.apache.doris.thrift.TTransmitDataParams;
-import org.apache.doris.thrift.TTransmitDataResult;
 import org.apache.doris.thrift.TUniqueId;
 import org.apache.doris.thrift.TWarmUpCacheAsyncRequest;
 import org.apache.doris.thrift.TWarmUpCacheAsyncResponse;
@@ -135,11 +133,6 @@ public class GenericPoolTest {
             return new TCancelPlanFragmentResult();
         }
 
-        @Override
-        public TTransmitDataResult transmitData(TTransmitDataParams params) {
-            return new TTransmitDataResult();
-        }
-
         @Override
         public TAgentResult submitTasks(List<TAgentTaskRequest> tasks) throws 
TException {
             return null;
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java 
b/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java
index 1a9a175366e..680b3e3641e 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java
@@ -80,8 +80,6 @@ import org.apache.doris.thrift.TSyncLoadForTabletsResponse;
 import org.apache.doris.thrift.TTabletInfo;
 import org.apache.doris.thrift.TTabletStatResult;
 import org.apache.doris.thrift.TTaskType;
-import org.apache.doris.thrift.TTransmitDataParams;
-import org.apache.doris.thrift.TTransmitDataResult;
 import org.apache.doris.thrift.TUniqueId;
 import org.apache.doris.thrift.TWarmUpCacheAsyncRequest;
 import org.apache.doris.thrift.TWarmUpCacheAsyncResponse;
@@ -366,11 +364,6 @@ public class MockedBackendFactory {
             return null;
         }
 
-        @Override
-        public TTransmitDataResult transmitData(TTransmitDataParams params) 
throws TException {
-            return null;
-        }
-
         @Override
         public TAgentResult submitTasks(List<TAgentTaskRequest> tasks) throws 
TException {
             for (TAgentTaskRequest request : tasks) {
@@ -507,13 +500,6 @@ public class MockedBackendFactory {
 
     // The default Brpc service.
     public static class DefaultPBackendServiceImpl extends 
PBackendServiceGrpc.PBackendServiceImplBase {
-        @Override
-        public void transmitData(InternalService.PTransmitDataParams request,
-                                 
StreamObserver<InternalService.PTransmitDataResult> responseObserver) {
-            
responseObserver.onNext(InternalService.PTransmitDataResult.newBuilder()
-                    
.setStatus(Types.PStatus.newBuilder().setStatusCode(0)).build());
-            responseObserver.onCompleted();
-        }
 
         @Override
         public void execPlanFragment(InternalService.PExecPlanFragmentRequest 
request,
diff --git a/gensrc/proto/internal_service.proto 
b/gensrc/proto/internal_service.proto
index 547b2588168..837d3f4a941 100644
--- a/gensrc/proto/internal_service.proto
+++ b/gensrc/proto/internal_service.proto
@@ -990,8 +990,6 @@ message PGetBeResourceResponse {
 }
 
 service PBackendService {
-    rpc transmit_data(PTransmitDataParams) returns (PTransmitDataResult);
-    rpc transmit_data_by_http(PEmptyRequest) returns (PTransmitDataResult);
     // If #fragments of a query is < 3, use exec_plan_fragment directly.
     // If #fragments of a query is >=3, use exec_plan_fragment_prepare + 
exec_plan_fragment_start
     rpc exec_plan_fragment(PExecPlanFragmentRequest) returns 
(PExecPlanFragmentResult);
diff --git a/gensrc/thrift/BackendService.thrift 
b/gensrc/thrift/BackendService.thrift
index aed248adfe8..3c2f256bac9 100644
--- a/gensrc/thrift/BackendService.thrift
+++ b/gensrc/thrift/BackendService.thrift
@@ -357,11 +357,6 @@ service BackendService {
     PaloInternalService.TCancelPlanFragmentResult cancel_plan_fragment(
         1:PaloInternalService.TCancelPlanFragmentParams params);
 
-    // Called by sender to transmit single row batch. Returns error indication
-    // if params.fragmentId or params.destNodeId are unknown or if data 
couldn't be read.
-    PaloInternalService.TTransmitDataResult transmit_data(
-        1:PaloInternalService.TTransmitDataParams params);
-
     AgentService.TAgentResult 
submit_tasks(1:list<AgentService.TAgentTaskRequest> tasks);
 
     AgentService.TAgentResult make_snapshot(1:AgentService.TSnapshotRequest 
snapshot_request);
diff --git a/gensrc/thrift/Data.thrift b/gensrc/thrift/Data.thrift
index dc1190c6e42..17007a08526 100644
--- a/gensrc/thrift/Data.thrift
+++ b/gensrc/thrift/Data.thrift
@@ -20,32 +20,6 @@ namespace java org.apache.doris.thrift
 
 include "Types.thrift"
 
-// Serialized, self-contained version of a RowBatch (in 
be/src/runtime/row-batch.h).
-struct TRowBatch {
-  // total number of rows contained in this batch
-  1: required i32 num_rows
-
-  // row composition
-  2: required list<Types.TTupleId> row_tuples
-
-  // There are a total of num_rows * num_tuples_per_row offsets
-  // pointing into tuple_data.
-  // An offset of -1 records a NULL.
-  3: list<i32> tuple_offsets
-
-  // binary tuple data
-  // TODO: figure out how we can avoid copying the data during TRowBatch 
construction
-  4: string tuple_data
-
-  // Indicates whether tuple_data is snappy-compressed
-  5: bool is_compressed
-
-  // backend num, source
-  6: i32 be_number
-  // packet seq
-  7: i64 packet_seq
-}
-
 // this is a union over all possible return types
 struct TCell {
   // TODO: use <type>_val instead of camelcase
diff --git a/gensrc/thrift/Ddl.thrift b/gensrc/thrift/Ddl.thrift
deleted file mode 100644
index 9696230af90..00000000000
--- a/gensrc/thrift/Ddl.thrift
+++ /dev/null
@@ -1,365 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-namespace cpp doris
-namespace java org.apache.doris.thrift
-
-include "Partitions.thrift"
-include "Types.thrift"
-include "Status.thrift"
-
-struct TDdlResult {
-  // required in V1
-  1: optional Status.TStatus status
-}
-
-enum TCommonDdlType {
-    CREATE_DATABASE
-    DROP_DATABASE
-    CREATE_TABLE
-    DROP_TABLE
-    LOAD 
-}
-
-// Parameters of CREATE DATABASE command
-struct TCreateDbParams {
-    // database name to create
-    1: required string database_name
-}
-
-// Parameters of DROP DATABASE command
-struct TDropDbParams {
-    // database name to drop
-    1: required string database_name
-}
-
-// database_name + table_name
-struct TTableName {
-    1: required string db_name
-    2: required string table_name
-}
-
-// supported aggregation type
-enum TAggType {
-    AGG_SUM
-    AGG_MIN
-    AGG_MAX
-    AGG_REPLACE
-}
-
-// column defination
-//struct TColumn {
-//    // column name
-//    1: required string column_name
-//
-//    // column type
-//    2: required Types.TColumnType column_type
-//
-//    // aggregation type, if not set, this column is a KEY column, otherwise 
is a value column
-//    3: optional TAggType agg_type
-//    
-//    // default value
-//    4: optional string default_value 
-//}
-
-enum THashType {
-    CRC32
-}
-
-// random partition info
-struct TRandomPartitionDesc {
-}
-
-// hash partition info
-struct THashPartitionDesc {
-    // column to compute hash value
-    1: required list<string> column_list
-
-    // hash buckets
-    2: required i32 hash_buckets
-
-    // type to compute hash value. if not set, use CRC32
-    3: optional THashType hash_type
-}
-
-// value used to represents one column value in one range value
-struct TValue {
-    1: optional string value
-
-    // if this sign is set and is true, this value is stand for MAX value
-    2: optional bool max_sign
-}
-
-// one range value
-struct TRangeValue {
-    1: required list<TValue> value_list
-}
-
-// range partition defination
-struct TRangePartitionDesc {
-    // column used to compute range
-    1: required list<string> column_list
-
-    // range value for range, if not set, all in one range
-    2: optional list<TRangeValue> range_value
-}
-
-// partition info
-struct TPartitionDesc {
-    // partition type
-    1: required Partitions.TPartitionType type
-    // hash buckets
-    2: required i32 partition_num
-
-    // hash partition information
-    3: optional THashPartitionDesc hash_partition
-
-    // range partition information
-    4: optional TRangePartitionDesc range_partition
-
-    // random partition infomation
-    5: optional TRandomPartitionDesc random_partition
-}
-
-// Parameters of CREATE TABLE command
-struct TCreateTableParams {
-    // table name to create
-    1: required TTableName table_name
-
-    // column defination.
-    // 2: required list<TColumn> columns
-
-    // engine type, if not set, use the default type.
-    3: optional string engine_name
-
-    // if set and true, no error when there is already table with same name
-    4: optional bool if_not_exists
-
-    // partition info, if not set, use the default partition type which meta 
define.
-    5: optional TPartitionDesc partition_desc
-
-    // used to set row format, maybe columnar or row format
-    6: optional string row_format_type
-
-    // other properties
-    7: optional map<string, string> properties
-}
-
-// Parameters of DROP TABLE command
-struct TDropTableParams {
-    // table name to drop
-    1: required TTableName table_name
-
-    // If true, no error is raised if the target db does not exist
-    2: optional bool if_exists
-}
-
-// Parameters to CREATE ROLLUP
-struct TCreateRollupParams {
-    // table name which create rollup
-    1: required TTableName table_name
-
-    // column names ROLLUP contains
-    2: required list<string> column_names
-
-    // rollup name, if not set, meta will assign a default value
-    3: optional string rollup_name
-
-    // partition info, if not set, use the base table's 
-    4: optional TPartitionDesc partition_desc
-}
-
-// Parameters to DROP ROLLUP
-struct TDropRollupParams {
-    // table name which create rollup
-    1: required TTableName table_name
-
-    // rollup name to drop
-    2: required string rollup_name
-}
-
-// Parameters for SCHEMA CHANGE
-// struct TShcemaChangeParams {
-//     // table name need to schema change
-//     1: required TTableName table_name
-// 
-//     // column definations for this table
-//     2: required list<TColumn> column_defs
-// 
-//     // rollup schema, map is 'rollup_name' -> 'list of column_name'
-//     3: required map<string, list<string>> rollup_defs
-// }
-
-// Parameters to create function
-struct TCreateFunctionParams {
-    // database name which function to create is in
-    1: required string db_name
-
-    // function name to create
-    2: required string function_name
-
-    // function argument type
-    3: required list<Types.TColumnType> argument_type
-
-    // function return type
-    4: required Types.TColumnType return_type
-
-    // function dynamic library path
-    5: required string so_file_path
-
-    // other properties
-    6: optional map<string, string> properties
-}
-
-// Parameters to drop function
-struct TDropFunctionParams {
-    // database name which function to drop is in
-    1: required string db_name
-
-    // function name to drop
-    2: required string function_name
-}
-
-// enum TSetType {
-//     SESSION
-//     GLOBAL
-// }
-// 
-// // Parameters to SET opration
-// struct TSetParams {
-//     // set type, GLOBAL\SESSION
-//     1: required TSetType type
-// 
-//     // set pairs, one name and one Expr
-//     // 2: required map<string, TExpr> set_content
-// }
-
-struct TUserSpecification {
-    1: required string user_name
-    2: optional string host_name
-}
-
-// Parameters to create user
-struct TCreateUserParams {
-    1: required TUserSpecification user_spec
-
-    // user's password
-    2: optional string password 
-}
-
-// Parameters to drop user
-struct TDropUserParams {
-    // user name to drop
-    1: required string user_spec
-}
-
-// Parameters to SET PASSWORD
-struct TSetPasswordParams {
-    1: required TUserSpecification user_spec
-
-    // password will changed to after this opration
-    3: required string password
-}
-
-enum TPrivType {
-    PRIVILEGE_READ_ONLY
-    PRIVILEGE_READ_WRITE
-}
-
-// Parameters to GRANT
-struct TGrantParams {
-    1: required TUserSpecification user_spec
-
-    // database to grant
-    3: required string db_name
-
-    // privileges to grant
-    4: required list<TPrivType> priv_types
-}
-
-// Data info 
-struct TDataSpecification {
-    // database name which table belongs to
-    1: required TTableName table_name
-
-    // all file pathes need to load
-    3: required list<string> file_path
-
-    // column names in file
-    4: optional list<string> columns
-
-    // column separator
-    5: optional string column_separator
-
-    // line separator
-    6: optional string line_separator
-
-    // if true, value will be multiply with -1
-    7: optional bool is_negative
-}
-
-struct TLabelName {
-    // database name which load_label belongs to 
-    1: required string db_name
-
-    // load label which to be canceled.
-    2: required string load_label
-}
-
-// Parameters to LOAD file
-struct TLoadParams {
-    // label belong to this load job, used when cancel load, show load
-    1: required TLabelName load_label
-
-    // data profiles used to load in this job
-    2: required list<TDataSpecification> data_profiles
-    
-    // task info
-    3: optional map<string, string> properties
-}
-
-// Parameters to CANCEL LOAD file
-struct TCancelLoadParams {
-    1: required TLabelName load_label
-}
-
-enum TPaloInternalServiceVersion {
-    V1
-}
-
-struct TMasterDdlRequest {
-    1: required TPaloInternalServiceVersion protocol_version
-    2: required TCommonDdlType ddl_type
-    3: optional TCreateDbParams create_db_params
-    4: optional TDropDbParams drop_db_params
-    // 5: optional TCreateTableParams create_table_params
-    6: optional TDropTableParams drop_table_params
-    7: optional TLoadParams load_params
-    8: optional TCancelLoadParams cancel_load_params
-    9: optional TCreateUserParams create_user_params
-    10: optional TDropUserParams drop_user_params
-    11: optional TCreateRollupParams create_rollup_params
-    12: optional TDropRollupParams drop_rollup_params
-    13: optional TCreateFunctionParams create_function_params
-    14: optional TDropFunctionParams drop_function_params
-}
-
-struct TMasterDdlResponse {
-    1: required TPaloInternalServiceVersion protocol_version
-    2: required TCommonDdlType ddl_type
-    3: optional Status.TStatus status
-}
diff --git a/gensrc/thrift/FrontendService.thrift 
b/gensrc/thrift/FrontendService.thrift
index 8f7e3432a78..793f6f669f2 100644
--- a/gensrc/thrift/FrontendService.thrift
+++ b/gensrc/thrift/FrontendService.thrift
@@ -35,15 +35,6 @@ include "HeartbeatService.thrift"
 // These are supporting structs for JniFrontend.java, which serves as the glue
 // between our C++ execution environment and the Java frontend.
 
-struct TSetSessionParams {
-    1: required string user
-}
-
-struct TAuthenticateParams {
-    1: required string user
-    2: required string passwd
-}
-
 struct TColumnDesc {
   1: required string columnName
   2: required Types.TPrimitiveType columnType
@@ -64,23 +55,6 @@ struct TColumnDef {
   2: optional string comment
 }
 
-// Arguments to DescribeTable, which returns a list of column descriptors for a
-// given table
-struct TDescribeTableParams {
-  1: optional string db
-  2: required string table_name
-  3: optional string user   // deprecated
-  4: optional string user_ip    // deprecated
-  5: optional Types.TUserIdentity current_user_ident // to replace the user 
and user ip
-  6: optional bool show_hidden_columns = false
-  7: optional string catalog
-}
-
-// Results of a call to describeTable()
-struct TDescribeTableResult {
-  1: required list<TColumnDef> columns
-}
-
 // Arguments to DescribeTables, which returns a list of column descriptors for
 // given tables
 struct TDescribeTablesParams {
@@ -110,197 +84,6 @@ struct TShowVariableResult {
     1: required list<list<string>> variables
 }
 
-// Valid table file formats
-enum TFileFormat {
-  PARQUETFILE,
-  RCFILE,
-  SEQUENCEFILE,
-  TEXTFILE,
-}
-
-// set type
-enum TSetType {
-  OPT_DEFAULT,
-  OPT_GLOBAL,
-  OPT_SESSION,
-}
-
-// The row format specifies how to interpret the fields (columns) and lines 
(rows) in a
-// data file when creating a new table.
-struct TTableRowFormat {
-  // Optional terminator string used to delimit fields (columns) in the table
-  1: optional string field_terminator
-
-  // Optional terminator string used to delimit lines (rows) in a table
-  2: optional string line_terminator
-
-  // Optional string used to specify a special escape character sequence
-  3: optional string escaped_by
-}
-
-
-// Represents a single item in a partition spec (column name + value)
-struct TPartitionKeyValue {
-  // Partition column name
-  1: required string name,
-
-  // Partition value
-  2: required string value
-}
-
-// Per-client session state
-struct TSessionState {
-  // The default database, changed by USE <database> queries.
-  1: required string database
-
-  // The user who this session belongs to.
-  2: required string user
-
-  // The user who this session belongs to.
-  3: required i64 connection_id
-}
-
-struct TClientRequest {
-  // select stmt to be executed
-  1: required string stmt
-
-  // query options
-  2: required PaloInternalService.TQueryOptions queryOptions
-
-  // session state
-  3: required TSessionState sessionState;
-}
-
-
-// Parameters for SHOW DATABASES commands
-struct TExplainParams {
-  // Optional pattern to match database names. If not set, all databases are 
returned.
-  1: required string explain
-}
-
-struct TSetVar{
-    1: required TSetType type
-    2: required string variable
-    3: required Exprs.TExpr value
-}
-// Parameters for Set commands
-struct TSetParams {
-  // Optional pattern to match database names. If not set, all databases are 
returned.
-  1: required list<TSetVar> set_vars
-}
-
-struct TKillParams {
-  // Optional pattern to match database names. If not set, all databases are 
returned.
-  1: required bool is_kill_connection
-  2: required i64 connection_id
-}
-
-struct TCommonDdlParams {
-  //1: required Ddl.TCommonDdlType ddl_type
-  //2: optional Ddl.TCreateDbParams create_db_params
-  //3: optional Ddl.TCreateTableParams create_table_params
-  //4: optional Ddl.TLoadParams load_params
-}
-
-// Parameters for the USE db command
-struct TUseDbParams {
-  1: required string db
-}
-
-struct TResultSetMetadata {
-  1: required list<TColumnDesc> columnDescs
-}
-
-// Result of call to PaloPlanService/JniFrontend.CreateQueryRequest()
-struct TQueryExecRequest {
-  // global descriptor tbl for all fragments
-  1: optional Descriptors.TDescriptorTable desc_tbl
-
-  // fragments[i] may consume the output of fragments[j > i];
-  // fragments[0] is the root fragment and also the coordinator fragment, if
-  // it is unpartitioned.
-  2: required list<Planner.TPlanFragment> fragments
-
-  // Specifies the destination fragment of the output of each fragment.
-  // parent_fragment_idx.size() == fragments.size() - 1 and
-  // fragments[i] sends its output to fragments[dest_fragment_idx[i-1]]
-  3: optional list<i32> dest_fragment_idx
-
-  // A map from scan node ids to a list of scan range locations.
-  // The node ids refer to scan nodes in fragments[].plan_tree
-  4: optional map<Types.TPlanNodeId, list<Planner.TScanRangeLocations>>
-      per_node_scan_ranges
-
-  // Metadata of the query result set (only for select)
-  5: optional TResultSetMetadata result_set_metadata
-
-  7: required PaloInternalService.TQueryGlobals query_globals
-
-  // The statement type governs when the coordinator can judge a query to be 
finished.
-  // DML queries are complete after Wait(), SELECTs may not be.
-  9: required Types.TStmtType stmt_type
-
-  // The statement type governs when the coordinator can judge a query to be 
finished.
-  // DML queries are complete after Wait(), SELECTs may not be.
-  10: optional bool is_block_query;
-}
-
-enum TDdlType {
-  USE,
-  DESCRIBE,
-  SET,
-  EXPLAIN,
-  KILL,
-  COMMON
-}
-
-struct TDdlExecRequest {
-  1: required TDdlType ddl_type
-
-  // Parameters for USE commands
-  2: optional TUseDbParams use_db_params;
-
-  // Parameters for DESCRIBE table commands
-  3: optional TDescribeTableParams describe_table_params
-
-  10: optional TExplainParams explain_params
-
-  11: optional TSetParams set_params
-  12: optional TKillParams kill_params
-  //13: optional Ddl.TMasterDdlRequest common_params
-}
-
-// Results of an EXPLAIN
-struct TExplainResult {
-    // each line in the explain plan occupies an entry in the list
-    1: required list<Data.TResultRow> results
-}
-
-// Result of call to createExecRequest()
-struct TExecRequest {
-  1: required Types.TStmtType stmt_type;
-
-  2: optional string sql_stmt;
-
-  // Globally unique id for this request. Assigned by the planner.
-  3: required Types.TUniqueId request_id
-
-  // Copied from the corresponding TClientRequest
-  4: required PaloInternalService.TQueryOptions query_options;
-
-  // TQueryExecRequest for the backend
-  // Set iff stmt_type is QUERY or DML
-  5: optional TQueryExecRequest query_exec_request
-
-  // Set iff stmt_type is DDL
-  6: optional TDdlExecRequest ddl_exec_request
-
-  // Metadata of the query result set (not set for DML)
-  7: optional TResultSetMetadata result_set_metadata
-
-  // Result of EXPLAIN. Set iff stmt_type is EXPLAIN
-  8: optional TExplainResult explain_result
-}
 
 // Arguments to getDbNames, which returns a list of dbs that match an optional
 // pattern
@@ -1718,7 +1501,6 @@ struct TFetchRunningQueriesRequest {
 service FrontendService {
     TGetDbsResult getDbNames(1: TGetDbsParams params)
     TGetTablesResult getTableNames(1: TGetTablesParams params)
-    TDescribeTableResult describeTable(1: TDescribeTableParams params)
     TDescribeTablesResult describeTables(1: TDescribeTablesParams params)
     TShowVariableResult showVariables(1: TShowVariableRequest params)
     TReportExecStatusResult reportExecStatus(1: TReportExecStatusParams params)
diff --git a/gensrc/thrift/PaloInternalService.thrift 
b/gensrc/thrift/PaloInternalService.thrift
index 39f3b65818f..c219daef5d1 100644
--- a/gensrc/thrift/PaloInternalService.thrift
+++ b/gensrc/thrift/PaloInternalService.thrift
@@ -27,7 +27,6 @@ include "Planner.thrift"
 include "DataSinks.thrift"
 include "Data.thrift"
 include "RuntimeProfile.thrift"
-include "PaloService.thrift"
 
 // constants for TQueryOptions.num_nodes
 const i32 NUM_NODES_ALL = 0
@@ -623,11 +622,6 @@ struct TCancelPlanFragmentResult {
   1: optional Status.TStatus status
 }
 
-// fold constant expr
-struct TExprMap {
-  1: required map<string, Exprs.TExpr> expr_map
-}
-
 struct TFoldConstantParams {
   1: required map<string, map<string, Exprs.TExpr>> expr_map
   2: required TQueryGlobals query_globals
@@ -650,9 +644,6 @@ struct TTransmitDataParams {
   // required in V1
   4: optional Types.TPlanNodeId dest_node_id
 
-  // required in V1
-  5: optional Data.TRowBatch row_batch
-
   // if set to true, indicates that no more row batches will be sent
   // for this dest_node_id
   6: optional bool eos
@@ -677,66 +668,6 @@ struct TTabletWithPartition {
     2: required i64 tablet_id
 }
 
-// open a tablet writer
-struct TTabletWriterOpenParams {
-    1: required Types.TUniqueId id
-    2: required i64 index_id
-    3: required i64 txn_id
-    4: required Descriptors.TOlapTableSchemaParam schema
-    5: required list<TTabletWithPartition> tablets
-
-    6: required i32 num_senders
-}
-
-struct TTabletWriterOpenResult {
-    1: required Status.TStatus status
-}
-
-// add batch to tablet writer
-struct TTabletWriterAddBatchParams {
-    1: required Types.TUniqueId id
-    2: required i64 index_id
-
-    3: required i64 packet_seq
-    4: required list<Types.TTabletId> tablet_ids
-    5: required Data.TRowBatch row_batch
-
-    6: required i32 sender_no
-}
-
-struct TTabletWriterAddBatchResult {
-    1: required Status.TStatus status
-}
-
-struct TTabletWriterCloseParams {
-    1: required Types.TUniqueId id
-    2: required i64 index_id
-
-    3: required i32 sender_no
-}
-
-struct TTabletWriterCloseResult {
-    1: required Status.TStatus status
-}
-
-//
-struct TTabletWriterCancelParams {
-    1: required Types.TUniqueId id
-    2: required i64 index_id
-
-    3: required i32 sender_no
-}
-
-struct TTabletWriterCancelResult {
-}
-
-struct TFetchDataParams {
-  1: required PaloInternalServiceVersion protocol_version
-  // required in V1
-  // query id which want to fetch data
-  2: required Types.TUniqueId fragment_instance_id
-}
-
 struct TFetchDataResult {
     // result batch
     1: required Data.TResultBatch result_batch
diff --git a/gensrc/thrift/PaloService.thrift b/gensrc/thrift/PaloService.thrift
deleted file mode 100644
index 6dd7743244e..00000000000
--- a/gensrc/thrift/PaloService.thrift
+++ /dev/null
@@ -1,100 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-namespace cpp doris
-namespace java org.apache.doris.thrift
-
-include "Status.thrift"
-
-// PaloService accepts query execution options through 
beeswax.Query.configuration in
-// key:value form. For example, the list of strings could be:
-//     "num_nodes:1", "abort_on_error:false"
-// The valid keys are listed in this enum. They map to TQueryOptions.
-// Note: If you add an option or change the default, you also need to update:
-// - PaloService.DEFAULT_QUERY_OPTIONS
-// - PaloInternalService.thrift: TQueryOptions
-// - PalodClientExecutor.getBeeswaxQueryConfigurations()
-// - PaloServer::SetQueryOptions()
-// - PaloServer::TQueryOptionsToMap()
-enum TPaloQueryOptions {
-  // if true, abort execution on the first error
-  ABORT_ON_ERROR,
-  
-  // maximum # of errors to be reported; Unspecified or 0 indicates backend 
default
-  MAX_ERRORS,
-  
-  // if true, disable llvm codegen
-  DISABLE_CODEGEN,
-  
-  // batch size to be used by backend; Unspecified or a size of 0 indicates 
backend
-  // default
-  BATCH_SIZE,
-
-  // a per-machine approximate limit on the memory consumption of this query;
-  // unspecified or a limit of 0 means no limit;
-  // otherwise specified either as:
-  // a) an int (= number of bytes);
-  // b) a float followed by "M" (MB) or "G" (GB)
-  MEM_LIMIT,
-   
-  // specifies the degree of parallelism with which to execute the query;
-  // 1: single-node execution
-  // NUM_NODES_ALL: executes on all nodes that contain relevant data
-  // NUM_NODES_ALL_RACKS: executes on one node per rack that holds relevant 
data
-  // > 1: executes on at most that many nodes at any point in time (ie, there 
can be
-  //      more nodes than numNodes with plan fragments for this query, but at 
most
-  //      numNodes would be active at any point in time)
-  // Constants (NUM_NODES_ALL, NUM_NODES_ALL_RACKS) are defined in 
JavaConstants.thrift.
-  NUM_NODES,
-  
-  // maximum length of the scan range; only applicable to HDFS scan range; 
Unspecified or
-  // a length of 0 indicates backend default;  
-  MAX_SCAN_RANGE_LENGTH,
-  
-  // Maximum number of io buffers (per disk)
-  MAX_IO_BUFFERS,
-
-  // Number of scanner threads.
-  NUM_SCANNER_THREADS,
-
-  QUERY_TIMEOUT,
-
-  // If true, Palo will try to execute on file formats that are not fully 
supported yet
-  ALLOW_UNSUPPORTED_FORMATS,
-
-  // if set and > -1, specifies the default limit applied to a top-level 
SELECT statement
-  // with an ORDER BY but without a LIMIT clause (ie, if the SELECT statement 
also has
-  // a LIMIT clause, this default is ignored)
-  DEFAULT_ORDER_BY_LIMIT,
-
-  // DEBUG ONLY:
-  // If set to
-  //   "[<backend number>:]<node id>:<TExecNodePhase>:<TDebugAction>",
-  // the exec node with the given id will perform the specified action in the 
given
-  // phase. If the optional backend number (starting from 0) is specified, 
only that
-  // backend instance will perform the debug action, otherwise all backends 
will behave
-  // in that way.
-  // If the string doesn't have the required format or if any of its 
components is
-  // invalid, the option is ignored. 
-  DEBUG_ACTION,
-  
-  // If true, raise an error when the DEFAULT_ORDER_BY_LIMIT has been reached.
-  ABORT_ON_DEFAULT_LIMIT_EXCEEDED,
-
-  // If false, the backend dosn't report the success status to coordiator
-  IS_REPORT_SUCCESS,
-}
diff --git a/gensrc/thrift/PlanNodes.thrift b/gensrc/thrift/PlanNodes.thrift
index 1345cac66cb..9e7c90908f4 100644
--- a/gensrc/thrift/PlanNodes.thrift
+++ b/gensrc/thrift/PlanNodes.thrift
@@ -61,21 +61,6 @@ enum TPlanNodeType {
   GROUP_COMMIT_SCAN_NODE
 }
 
-// phases of an execution node
-enum TExecNodePhase {
-  PREPARE,
-  OPEN,
-  GETNEXT,
-  CLOSE,
-  INVALID
-}
-
-// what to do when hitting a debug point (TPaloQueryOptions.DEBUG_ACTION)
-enum TDebugAction {
-  WAIT,
-  FAIL
-}
-
 struct TKeyRange {
   1: required i64 begin_key
   2: required i64 end_key
diff --git a/gensrc/thrift/Types.thrift b/gensrc/thrift/Types.thrift
index c40e6a04838..4dbc1d06054 100644
--- a/gensrc/thrift/Types.thrift
+++ b/gensrc/thrift/Types.thrift
@@ -243,13 +243,6 @@ enum TTaskType {
     CALCULATE_DELETE_BITMAP = 1000
 }
 
-enum TStmtType {
-  QUERY,
-  DDL,  // Data definition, e.g. CREATE TABLE (includes read-only functions 
e.g. SHOW)
-  DML,  // Data modification e.g. INSERT
-  EXPLAIN   // EXPLAIN
-}
-
 // level of verboseness for "explain" output
 // TODO: should this go somewhere else?
 enum TExplainLevel {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org


Reply via email to