morningman commented on code in PR #30198: URL: https://github.com/apache/doris/pull/30198#discussion_r1499490325
########## fe/fe-core/src/main/java/org/apache/doris/catalog/external/ExternalMetadataOperations.java: ########## @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one Review Comment: Move the this file and `[NamedExternalTable.java]` to package `datasource.operations`. In future, all classes related to multi catalog are better in `datasource` package ########## fe/fe-core/src/main/java/org/apache/doris/catalog/external/NamedExternalTable.java: ########## @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.catalog.external; + +import org.apache.doris.datasource.ExternalCatalog; +import org.apache.doris.datasource.ExternalTable; + +/** + * use to save table info. + */ +public class NamedExternalTable extends ExternalTable { Review Comment: This class looks like a common class, but it only for `HMS_EXTERNAL_TABLE` ? ########## fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalCatalog.java: ########## @@ -195,14 +192,15 @@ public void dropDatabase(String dbName) { LOG.warn("drop database [{}] failed", dbName); } idToDb.remove(dbId); + Env.getCurrentEnv().getExtMetaCacheMgr().invalidateDbCache(getId(), dbName); } @Override - public void createDatabase(long dbId, String dbName) { + public void registerDatabase(long dbId, String dbName) { if (LOG.isDebugEnabled()) { LOG.debug("create database [{}]", dbName); } - dbNameToId.put(dbName, dbId); + dbNameToId.put(ClusterNamespace.getNameFromFullName(dbName), dbId); Review Comment: No need to call `ClusterNamespace.getNameFromFullName()` ########## fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergHMSExternalCatalog.java: ########## @@ -44,8 +53,34 @@ protected void initLocalObjectsImpl() { // initialize hive catalog Map<String, String> catalogProperties = new HashMap<>(); String metastoreUris = catalogProperty.getOrDefault(HMSProperties.HIVE_METASTORE_URIS, ""); - catalogProperties.put(CatalogProperties.URI, metastoreUris); + HiveConf hiveConf = new HiveConf(); + for (Map.Entry<String, String> kv : catalogProperty.getHadoopProperties().entrySet()) { + hiveConf.set(kv.getKey(), kv.getValue()); + } + hiveConf.set(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.name(), + String.valueOf(Config.hive_metastore_client_timeout_second)); + String authentication = catalogProperty.getOrDefault( + AuthenticationConfig.HADOOP_SECURITY_AUTHENTICATION, ""); + if (AuthType.KERBEROS.getDesc().equals(authentication)) { Review Comment: This is duplicate code with `HadoopUGI.java` ########## fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetadataOps.java: ########## @@ -0,0 +1,188 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.datasource.hive; + +import org.apache.doris.analysis.CreateDbStmt; +import org.apache.doris.analysis.CreateTableStmt; +import org.apache.doris.analysis.DropDbStmt; +import org.apache.doris.analysis.DropTableStmt; +import org.apache.doris.catalog.Env; +import org.apache.doris.catalog.JdbcResource; +import org.apache.doris.catalog.external.NamedExternalTable; +import org.apache.doris.common.Config; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.UserException; +import org.apache.doris.datasource.ExternalDatabase; +import org.apache.doris.datasource.ExternalMetaIdMgr; +import org.apache.doris.datasource.jdbc.client.JdbcClient; +import org.apache.doris.datasource.jdbc.client.JdbcClientConfig; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class HiveMetadataOps implements ExternalMetadataOps { + private static final Logger LOG = LogManager.getLogger(HiveMetadataOps.class); + private static final int MIN_CLIENT_POOL_SIZE = 8; + private JdbcClientConfig jdbcClientConfig; + private HiveConf hiveConf; + private HMSExternalCatalog catalog; + private HMSCachedClient client; + + public HiveMetadataOps(HiveConf hiveConf, JdbcClientConfig jdbcClientConfig, HMSExternalCatalog catalog) { + this.catalog = catalog; + this.hiveConf = hiveConf; + this.jdbcClientConfig = jdbcClientConfig; + this.client = createCachedClient(hiveConf, + Math.max(MIN_CLIENT_POOL_SIZE, Config.max_external_cache_loader_thread_pool_size), jdbcClientConfig); + } + + public HMSCachedClient getClient() { + return client; + } + + public static HMSCachedClient createCachedClient(HiveConf hiveConf, int thriftClientPoolSize, + JdbcClientConfig jdbcClientConfig) { + if (hiveConf != null) { + return new ThriftHMSCachedClient(hiveConf, thriftClientPoolSize); + } + Preconditions.checkNotNull(jdbcClientConfig, "hiveConf and jdbcClientConfig are both null"); + String dbType = JdbcClient.parseDbType(jdbcClientConfig.getJdbcUrl()); + switch (dbType) { + case JdbcResource.POSTGRESQL: + return new PostgreSQLJdbcHMSCachedClient(jdbcClientConfig); + default: + throw new IllegalArgumentException("Unsupported DB type: " + dbType); + } + } + + @Override + public void createDb(CreateDbStmt stmt) throws DdlException { + String fullDbName = stmt.getFullDbName(); + Map<String, String> properties = stmt.getProperties(); + long dbId = Env.getCurrentEnv().getNextId(); + try { + HiveDatabaseMetadata catalogDatabase = new HiveDatabaseMetadata(); + catalogDatabase.setDbName(fullDbName); + catalogDatabase.setProperties(properties); + if (properties.containsKey("location_uri")) { + catalogDatabase.setLocationUri(properties.get("location_uri")); + } + catalogDatabase.setComment(properties.getOrDefault("comment", "")); + client.createDatabase(catalogDatabase); + catalog.onRefresh(true); + catalog.registerDatabase(dbId, fullDbName); Review Comment: Is it necessary to call `registerDatabase()` after `onRefresh()`? because after refresh, if we want to get db, it will automatically sync all dbs. ########## fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java: ########## @@ -277,21 +276,13 @@ public List<Index> getIndexes() { @Override public void analyze(Analyzer analyzer) throws UserException { - if (Config.isCloudMode() && properties != null - && properties.containsKey(PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE)) { Review Comment: Why removing this? ########## fe/fe-common/src/main/java/org/apache/doris/common/Config.java: ########## @@ -2201,6 +2201,21 @@ public class Config extends ConfigBase { "Sample size for hive row count estimation."}) public static int hive_stats_partition_sample_size = 3000; + @ConfField(mutable = true, masterOnly = true, description = { + "Hive创建外部表默认指定的input format", + "Default hive input format for creating table."}) + public static String hive_default_input_format = "org.apache.hadoop.mapred.TextInputFormat"; Review Comment: how about default to orc? ########## fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreClientHelper.java: ########## @@ -584,6 +584,34 @@ private static int findNextNestedField(String commaSplitFields) { return commaSplitFields.length(); } + /** + * Convert doris type to hive type. + */ + public static String dorisTypeToHiveType(Type dorisType) { + if (dorisType.equals(Type.BOOLEAN)) { + return "boolean"; + } else if (dorisType.equals(Type.TINYINT)) { + return "tinyint"; + } else if (dorisType.equals(Type.SMALLINT)) { + return "smallint"; + } else if (dorisType.equals(Type.INT)) { + return "int"; + } else if (dorisType.equals(Type.BIGINT)) { + return "bigint"; + } else if (dorisType.equals(Type.DATE) || dorisType.equals(Type.DATEV2)) { + return "date"; + } else if (dorisType.equals(Type.DATETIME) || dorisType.equals(Type.DATETIMEV2)) { + return "timestamp"; + } else if (dorisType.equals(Type.FLOAT)) { + return "float"; + } else if (dorisType.equals(Type.DOUBLE)) { + return "double"; + } else if (dorisType.equals(Type.STRING)) { + return "string"; + } Review Comment: How about nested type? ########## fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetadataOps.java: ########## @@ -0,0 +1,188 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.datasource.hive; + +import org.apache.doris.analysis.CreateDbStmt; +import org.apache.doris.analysis.CreateTableStmt; +import org.apache.doris.analysis.DropDbStmt; +import org.apache.doris.analysis.DropTableStmt; +import org.apache.doris.catalog.Env; +import org.apache.doris.catalog.JdbcResource; +import org.apache.doris.catalog.external.NamedExternalTable; +import org.apache.doris.common.Config; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.UserException; +import org.apache.doris.datasource.ExternalDatabase; +import org.apache.doris.datasource.ExternalMetaIdMgr; +import org.apache.doris.datasource.jdbc.client.JdbcClient; +import org.apache.doris.datasource.jdbc.client.JdbcClientConfig; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class HiveMetadataOps implements ExternalMetadataOps { + private static final Logger LOG = LogManager.getLogger(HiveMetadataOps.class); + private static final int MIN_CLIENT_POOL_SIZE = 8; + private JdbcClientConfig jdbcClientConfig; + private HiveConf hiveConf; + private HMSExternalCatalog catalog; + private HMSCachedClient client; + + public HiveMetadataOps(HiveConf hiveConf, JdbcClientConfig jdbcClientConfig, HMSExternalCatalog catalog) { + this.catalog = catalog; + this.hiveConf = hiveConf; + this.jdbcClientConfig = jdbcClientConfig; + this.client = createCachedClient(hiveConf, + Math.max(MIN_CLIENT_POOL_SIZE, Config.max_external_cache_loader_thread_pool_size), jdbcClientConfig); + } + + public HMSCachedClient getClient() { + return client; + } + + public static HMSCachedClient createCachedClient(HiveConf hiveConf, int thriftClientPoolSize, + JdbcClientConfig jdbcClientConfig) { + if (hiveConf != null) { + return new ThriftHMSCachedClient(hiveConf, thriftClientPoolSize); + } + Preconditions.checkNotNull(jdbcClientConfig, "hiveConf and jdbcClientConfig are both null"); + String dbType = JdbcClient.parseDbType(jdbcClientConfig.getJdbcUrl()); + switch (dbType) { + case JdbcResource.POSTGRESQL: + return new PostgreSQLJdbcHMSCachedClient(jdbcClientConfig); + default: + throw new IllegalArgumentException("Unsupported DB type: " + dbType); + } + } + + @Override + public void createDb(CreateDbStmt stmt) throws DdlException { + String fullDbName = stmt.getFullDbName(); + Map<String, String> properties = stmt.getProperties(); + long dbId = Env.getCurrentEnv().getNextId(); + try { + HiveDatabaseMetadata catalogDatabase = new HiveDatabaseMetadata(); + catalogDatabase.setDbName(fullDbName); + catalogDatabase.setProperties(properties); + if (properties.containsKey("location_uri")) { + catalogDatabase.setLocationUri(properties.get("location_uri")); + } + catalogDatabase.setComment(properties.getOrDefault("comment", "")); + client.createDatabase(catalogDatabase); + catalog.onRefresh(true); + catalog.registerDatabase(dbId, fullDbName); + } catch (Exception e) { + throw new RuntimeException(e.getMessage(), e); + } + LOG.info("createDb dbName = " + fullDbName + ", id = " + dbId); + } + + @Override + public void dropDb(DropDbStmt stmt) throws DdlException { + String dbName = stmt.getDbName(); + try { + client.dropDatabase(dbName); + catalog.onRefresh(true); + catalog.unregisterDatabase(dbName); + } catch (Exception e) { + throw new RuntimeException(e.getMessage(), e); + } + } + + @Override + public void createTable(CreateTableStmt stmt) throws UserException { + String dbName = stmt.getDbName(); + String tblName = stmt.getTableName(); + ExternalDatabase<?> db = catalog.getDbNullable(dbName); + if (db == null) { + throw new UserException("Failed to get database: '" + dbName + "' in catalog: " + catalog.getName()); + } + try { + Map<String, String> props = stmt.getExtProperties(); + String inputFormat = props.getOrDefault("input_format", Config.hive_default_input_format); + String outputFormat = props.getOrDefault("output_format", Config.hive_default_output_format); + String serDe = props.getOrDefault("serde", Config.hive_default_serde); + HiveTableMetadata catalogTable = HiveTableMetadata.of(dbName, + tblName, + stmt.getColumns(), + parsePartitionKeys(props), + props, + inputFormat, + outputFormat, + serDe); + + client.createTable(catalogTable, stmt.isSetIfNotExists()); + catalog.onRefresh(true); Review Comment: Only need to call `db.setUnInitialized()`? ########## fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetadataOps.java: ########## @@ -0,0 +1,188 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.datasource.hive; + +import org.apache.doris.analysis.CreateDbStmt; +import org.apache.doris.analysis.CreateTableStmt; +import org.apache.doris.analysis.DropDbStmt; +import org.apache.doris.analysis.DropTableStmt; +import org.apache.doris.catalog.Env; +import org.apache.doris.catalog.JdbcResource; +import org.apache.doris.catalog.external.NamedExternalTable; +import org.apache.doris.common.Config; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.UserException; +import org.apache.doris.datasource.ExternalDatabase; +import org.apache.doris.datasource.ExternalMetaIdMgr; +import org.apache.doris.datasource.jdbc.client.JdbcClient; +import org.apache.doris.datasource.jdbc.client.JdbcClientConfig; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class HiveMetadataOps implements ExternalMetadataOps { + private static final Logger LOG = LogManager.getLogger(HiveMetadataOps.class); + private static final int MIN_CLIENT_POOL_SIZE = 8; + private JdbcClientConfig jdbcClientConfig; + private HiveConf hiveConf; + private HMSExternalCatalog catalog; + private HMSCachedClient client; + + public HiveMetadataOps(HiveConf hiveConf, JdbcClientConfig jdbcClientConfig, HMSExternalCatalog catalog) { + this.catalog = catalog; + this.hiveConf = hiveConf; + this.jdbcClientConfig = jdbcClientConfig; + this.client = createCachedClient(hiveConf, + Math.max(MIN_CLIENT_POOL_SIZE, Config.max_external_cache_loader_thread_pool_size), jdbcClientConfig); + } + + public HMSCachedClient getClient() { + return client; + } + + public static HMSCachedClient createCachedClient(HiveConf hiveConf, int thriftClientPoolSize, + JdbcClientConfig jdbcClientConfig) { + if (hiveConf != null) { + return new ThriftHMSCachedClient(hiveConf, thriftClientPoolSize); + } + Preconditions.checkNotNull(jdbcClientConfig, "hiveConf and jdbcClientConfig are both null"); + String dbType = JdbcClient.parseDbType(jdbcClientConfig.getJdbcUrl()); + switch (dbType) { + case JdbcResource.POSTGRESQL: + return new PostgreSQLJdbcHMSCachedClient(jdbcClientConfig); + default: + throw new IllegalArgumentException("Unsupported DB type: " + dbType); + } + } + + @Override + public void createDb(CreateDbStmt stmt) throws DdlException { + String fullDbName = stmt.getFullDbName(); + Map<String, String> properties = stmt.getProperties(); + long dbId = Env.getCurrentEnv().getNextId(); + try { + HiveDatabaseMetadata catalogDatabase = new HiveDatabaseMetadata(); + catalogDatabase.setDbName(fullDbName); + catalogDatabase.setProperties(properties); + if (properties.containsKey("location_uri")) { + catalogDatabase.setLocationUri(properties.get("location_uri")); + } + catalogDatabase.setComment(properties.getOrDefault("comment", "")); + client.createDatabase(catalogDatabase); + catalog.onRefresh(true); + catalog.registerDatabase(dbId, fullDbName); + } catch (Exception e) { + throw new RuntimeException(e.getMessage(), e); + } + LOG.info("createDb dbName = " + fullDbName + ", id = " + dbId); + } + + @Override + public void dropDb(DropDbStmt stmt) throws DdlException { + String dbName = stmt.getDbName(); + try { + client.dropDatabase(dbName); + catalog.onRefresh(true); + catalog.unregisterDatabase(dbName); Review Comment: ditto ########## fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetadataOps.java: ########## @@ -0,0 +1,188 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.datasource.hive; + +import org.apache.doris.analysis.CreateDbStmt; +import org.apache.doris.analysis.CreateTableStmt; +import org.apache.doris.analysis.DropDbStmt; +import org.apache.doris.analysis.DropTableStmt; +import org.apache.doris.catalog.Env; +import org.apache.doris.catalog.JdbcResource; +import org.apache.doris.catalog.external.NamedExternalTable; +import org.apache.doris.common.Config; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.UserException; +import org.apache.doris.datasource.ExternalDatabase; +import org.apache.doris.datasource.ExternalMetaIdMgr; +import org.apache.doris.datasource.jdbc.client.JdbcClient; +import org.apache.doris.datasource.jdbc.client.JdbcClientConfig; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class HiveMetadataOps implements ExternalMetadataOps { + private static final Logger LOG = LogManager.getLogger(HiveMetadataOps.class); + private static final int MIN_CLIENT_POOL_SIZE = 8; + private JdbcClientConfig jdbcClientConfig; + private HiveConf hiveConf; + private HMSExternalCatalog catalog; + private HMSCachedClient client; + + public HiveMetadataOps(HiveConf hiveConf, JdbcClientConfig jdbcClientConfig, HMSExternalCatalog catalog) { + this.catalog = catalog; + this.hiveConf = hiveConf; + this.jdbcClientConfig = jdbcClientConfig; + this.client = createCachedClient(hiveConf, + Math.max(MIN_CLIENT_POOL_SIZE, Config.max_external_cache_loader_thread_pool_size), jdbcClientConfig); + } + + public HMSCachedClient getClient() { + return client; + } + + public static HMSCachedClient createCachedClient(HiveConf hiveConf, int thriftClientPoolSize, + JdbcClientConfig jdbcClientConfig) { + if (hiveConf != null) { + return new ThriftHMSCachedClient(hiveConf, thriftClientPoolSize); + } + Preconditions.checkNotNull(jdbcClientConfig, "hiveConf and jdbcClientConfig are both null"); + String dbType = JdbcClient.parseDbType(jdbcClientConfig.getJdbcUrl()); + switch (dbType) { + case JdbcResource.POSTGRESQL: + return new PostgreSQLJdbcHMSCachedClient(jdbcClientConfig); + default: + throw new IllegalArgumentException("Unsupported DB type: " + dbType); + } + } + + @Override + public void createDb(CreateDbStmt stmt) throws DdlException { + String fullDbName = stmt.getFullDbName(); + Map<String, String> properties = stmt.getProperties(); + long dbId = Env.getCurrentEnv().getNextId(); + try { + HiveDatabaseMetadata catalogDatabase = new HiveDatabaseMetadata(); + catalogDatabase.setDbName(fullDbName); + catalogDatabase.setProperties(properties); + if (properties.containsKey("location_uri")) { + catalogDatabase.setLocationUri(properties.get("location_uri")); + } + catalogDatabase.setComment(properties.getOrDefault("comment", "")); + client.createDatabase(catalogDatabase); + catalog.onRefresh(true); + catalog.registerDatabase(dbId, fullDbName); + } catch (Exception e) { + throw new RuntimeException(e.getMessage(), e); + } + LOG.info("createDb dbName = " + fullDbName + ", id = " + dbId); + } + + @Override + public void dropDb(DropDbStmt stmt) throws DdlException { + String dbName = stmt.getDbName(); + try { + client.dropDatabase(dbName); + catalog.onRefresh(true); + catalog.unregisterDatabase(dbName); + } catch (Exception e) { + throw new RuntimeException(e.getMessage(), e); + } + } + + @Override + public void createTable(CreateTableStmt stmt) throws UserException { + String dbName = stmt.getDbName(); + String tblName = stmt.getTableName(); + ExternalDatabase<?> db = catalog.getDbNullable(dbName); + if (db == null) { + throw new UserException("Failed to get database: '" + dbName + "' in catalog: " + catalog.getName()); + } + try { + Map<String, String> props = stmt.getExtProperties(); + String inputFormat = props.getOrDefault("input_format", Config.hive_default_input_format); + String outputFormat = props.getOrDefault("output_format", Config.hive_default_output_format); + String serDe = props.getOrDefault("serde", Config.hive_default_serde); + HiveTableMetadata catalogTable = HiveTableMetadata.of(dbName, + tblName, + stmt.getColumns(), + parsePartitionKeys(props), + props, + inputFormat, + outputFormat, + serDe); + + client.createTable(catalogTable, stmt.isSetIfNotExists()); + catalog.onRefresh(true); + long tableId = Env.getCurrentEnv().getExternalMetaIdMgr().getTblId(catalog.getId(), dbName, tblName); + if (tableId == ExternalMetaIdMgr.META_ID_FOR_NOT_EXISTS) { + return; + } + db.registerTable(NamedExternalTable.of(tableId, tblName, dbName, catalog)); + } catch (Exception e) { + throw new UserException(e.getMessage(), e); + } + } + + private static List<FieldSchema> parsePartitionKeys(Map<String, String> props) { + List<FieldSchema> parsedKeys = new ArrayList<>(); + String pkStr = props.getOrDefault("partition_keys", ""); + if (pkStr.isEmpty()) { + return parsedKeys; + } else { + // TODO: parse string to partition keys list + return parsedKeys; + } + } + + @Override + public void dropTable(DropTableStmt stmt) throws DdlException { + String dbName = stmt.getDbName(); + ExternalDatabase<?> db = catalog.getDbNullable(stmt.getDbName()); + if (db == null) { + throw new DdlException("Failed to get database: '" + dbName + "' in catalog: " + catalog.getName()); + } + try { + client.dropTable(dbName, stmt.getTableName()); + catalog.onRefresh(true); Review Comment: ditto -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org