gaborgsomogyi commented on code in PR #13187:
URL: https://github.com/apache/iceberg/pull/13187#discussion_r2137150659


##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/security/IcebergHiveConnectorDelegationTokenProvider.java:
##########
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.security;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.spark.SparkCatalog;
+import org.apache.iceberg.spark.SparkSessionCatalog;
+import org.apache.spark.SparkConf;
+import org.apache.spark.security.HadoopDelegationTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.Option;
+import scala.Tuple2;
+
+public class IcebergHiveConnectorDelegationTokenProvider implements 
HadoopDelegationTokenProvider {
+  private static final Logger LOG =
+      
LoggerFactory.getLogger(IcebergHiveConnectorDelegationTokenProvider.class);
+
+  private static final String SERVICE_NAME = "iceberg_hive";
+  private static final String SPARK_SQL_CATALOG_PREFIX = "spark.sql.catalog.";
+  private static final String CATALOG_TYPE = "hive";
+  private static final String URI_KEY = ".uri";
+  private static final String PRINCIPAL_KEY = 
".hive.metastore.kerberos.principal";
+  private static final String TYPE_KEY = ".type";
+
+  @Override
+  public String serviceName() {
+    return SERVICE_NAME;
+  }
+
+  /**
+   * Builds a HiveConf object for the specified catalog by merging the 
provided Hadoop configuration
+   * with catalog-specific configurations from the Spark configuration.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @param hadoopConf The Hadoop Configuration object to be used as the base 
for the HiveConf.
+   * @param catalogName The name of the catalog.
+   * @return An Optional containing the constructed HiveConf if successful, or 
an empty Optional if
+   *     an error occurs.
+   */
+  Optional<HiveConf> buildHiveConf(
+      SparkConf sparkConf, Configuration hadoopConf, String catalogName) {
+    try {
+      HiveConf hiveConf = new HiveConf(hadoopConf, HiveConf.class);
+      Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX + 
catalogName))
+          .forEach(x -> hiveConf.set(x._1, x._2));
+      return Optional.of(hiveConf);
+    } catch (Exception e) {
+      LOG.warn("Fail to create Hive Configuration for catalog {}: {}", 
catalogName, e.getMessage());
+      return Optional.empty();
+    }
+  }
+
+  /**
+   * Extracts the names of Iceberg catalogs from the provided Spark 
configuration. This method
+   * filters the Spark configuration entries to identify those that correspond 
to Iceberg catalogs.
+   * It checks if the catalog type matches either `SparkSessionCatalog` or 
`SparkCatalog` and
+   * collects their names.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @return A Set of Strings representing the names of Iceberg catalogs.
+   */
+  private Set<String> extractIcebergCatalogNames(SparkConf sparkConf) {
+    return Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX))
+        .filter(
+            entry -> {
+              String val = entry._2();
+              return val.contains(SparkSessionCatalog.class.getName())
+                  || val.contains(SparkCatalog.class.getName());
+            })
+        .map(Tuple2::_1)
+        .collect(Collectors.toSet());
+  }
+
+  @Override
+  public boolean delegationTokensRequired(SparkConf sparkConf, Configuration 
hadoopConf) {
+    return !getRequireTokenCatalogs(sparkConf).isEmpty();
+  }
+
+  private Set<String> getRequireTokenCatalogs(SparkConf sparkConf) {
+    return extractIcebergCatalogNames(sparkConf).stream()
+        .filter(catalog -> checkDelegationTokensRequired(sparkConf, catalog))
+        .collect(Collectors.toSet());
+  }
+
+  private boolean checkDelegationTokensRequired(SparkConf sparkConf, String 
catalogName) {
+    String metastoreUri = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName 
+ URI_KEY, "");
+    String principal = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + 
PRINCIPAL_KEY, "");
+    boolean isHiveType =
+        CATALOG_TYPE.equalsIgnoreCase(
+            sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + TYPE_KEY, 
""));
+    if (metastoreUri.isEmpty()
+        || principal.isEmpty()
+        || !isHiveType
+        || !UserGroupInformation.isSecurityEnabled()) {
+      return false;
+    }
+
+    try {
+      Credentials credentials = 
UserGroupInformation.getCurrentUser().getCredentials();
+      Token<?> currentToken = credentials.getToken(new Text(metastoreUri));
+      return currentToken == null;
+    } catch (IOException ex) {
+      LOG.error(
+          "Failed to get current user credentials for catalog {}: {}",
+          catalogName,
+          ex.getMessage(),
+          ex);
+      throw new RuntimeException(ex);
+    }
+  }
+
+  IMetaStoreClient createHmsClient(HiveConf conf) throws Exception {
+    return new HiveMetaStoreClient(conf, null, false);
+  }
+
+  @Override
+  public Option<Object> obtainDelegationTokens(
+      Configuration hadoopConf, SparkConf sparkConf, Credentials creds) {
+    Map<String, IMetaStoreClient> hmsClientCache = Maps.newHashMap();
+    Set<String> requireTokenCatalogs = getRequireTokenCatalogs(sparkConf);
+    LOG.debug("Require token Hive catalogs: {}", requireTokenCatalogs);
+    try {
+      for (String catalogName : requireTokenCatalogs) {
+        Optional<HiveConf> hiveConfOpt = buildHiveConf(sparkConf, hadoopConf, 
catalogName);
+        if (!hiveConfOpt.isPresent()) {
+          continue;

Review Comment:
   When no valid hive conf but catalog is listed then it's an error which 
should blow up, right?



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/security/IcebergHiveConnectorDelegationTokenProvider.java:
##########
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.security;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.spark.SparkCatalog;
+import org.apache.iceberg.spark.SparkSessionCatalog;
+import org.apache.spark.SparkConf;
+import org.apache.spark.security.HadoopDelegationTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.Option;
+import scala.Tuple2;
+
+public class IcebergHiveConnectorDelegationTokenProvider implements 
HadoopDelegationTokenProvider {
+  private static final Logger LOG =
+      
LoggerFactory.getLogger(IcebergHiveConnectorDelegationTokenProvider.class);
+
+  private static final String SERVICE_NAME = "iceberg_hive";
+  private static final String SPARK_SQL_CATALOG_PREFIX = "spark.sql.catalog.";
+  private static final String CATALOG_TYPE = "hive";
+  private static final String URI_KEY = ".uri";
+  private static final String PRINCIPAL_KEY = 
".hive.metastore.kerberos.principal";
+  private static final String TYPE_KEY = ".type";
+
+  @Override
+  public String serviceName() {
+    return SERVICE_NAME;
+  }
+
+  /**
+   * Builds a HiveConf object for the specified catalog by merging the 
provided Hadoop configuration
+   * with catalog-specific configurations from the Spark configuration.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @param hadoopConf The Hadoop Configuration object to be used as the base 
for the HiveConf.
+   * @param catalogName The name of the catalog.
+   * @return An Optional containing the constructed HiveConf if successful, or 
an empty Optional if
+   *     an error occurs.
+   */
+  Optional<HiveConf> buildHiveConf(
+      SparkConf sparkConf, Configuration hadoopConf, String catalogName) {
+    try {
+      HiveConf hiveConf = new HiveConf(hadoopConf, HiveConf.class);
+      Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX + 
catalogName))
+          .forEach(x -> hiveConf.set(x._1, x._2));
+      return Optional.of(hiveConf);
+    } catch (Exception e) {
+      LOG.warn("Fail to create Hive Configuration for catalog {}: {}", 
catalogName, e.getMessage());
+      return Optional.empty();
+    }
+  }
+
+  /**
+   * Extracts the names of Iceberg catalogs from the provided Spark 
configuration. This method
+   * filters the Spark configuration entries to identify those that correspond 
to Iceberg catalogs.
+   * It checks if the catalog type matches either `SparkSessionCatalog` or 
`SparkCatalog` and
+   * collects their names.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @return A Set of Strings representing the names of Iceberg catalogs.
+   */
+  private Set<String> extractIcebergCatalogNames(SparkConf sparkConf) {
+    return Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX))
+        .filter(
+            entry -> {
+              String val = entry._2();
+              return val.contains(SparkSessionCatalog.class.getName())
+                  || val.contains(SparkCatalog.class.getName());
+            })
+        .map(Tuple2::_1)
+        .collect(Collectors.toSet());
+  }
+
+  @Override
+  public boolean delegationTokensRequired(SparkConf sparkConf, Configuration 
hadoopConf) {
+    return !getRequireTokenCatalogs(sparkConf).isEmpty();
+  }
+
+  private Set<String> getRequireTokenCatalogs(SparkConf sparkConf) {
+    return extractIcebergCatalogNames(sparkConf).stream()
+        .filter(catalog -> checkDelegationTokensRequired(sparkConf, catalog))
+        .collect(Collectors.toSet());
+  }
+
+  private boolean checkDelegationTokensRequired(SparkConf sparkConf, String 
catalogName) {
+    String metastoreUri = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName 
+ URI_KEY, "");
+    String principal = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + 
PRINCIPAL_KEY, "");
+    boolean isHiveType =
+        CATALOG_TYPE.equalsIgnoreCase(
+            sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + TYPE_KEY, 
""));
+    if (metastoreUri.isEmpty()
+        || principal.isEmpty()
+        || !isHiveType
+        || !UserGroupInformation.isSecurityEnabled()) {
+      return false;
+    }
+
+    try {
+      Credentials credentials = 
UserGroupInformation.getCurrentUser().getCredentials();
+      Token<?> currentToken = credentials.getToken(new Text(metastoreUri));
+      return currentToken == null;
+    } catch (IOException ex) {
+      LOG.error(
+          "Failed to get current user credentials for catalog {}: {}",
+          catalogName,
+          ex.getMessage(),
+          ex);
+      throw new RuntimeException(ex);
+    }
+  }
+
+  IMetaStoreClient createHmsClient(HiveConf conf) throws Exception {
+    return new HiveMetaStoreClient(conf, null, false);
+  }
+
+  @Override
+  public Option<Object> obtainDelegationTokens(
+      Configuration hadoopConf, SparkConf sparkConf, Credentials creds) {
+    Map<String, IMetaStoreClient> hmsClientCache = Maps.newHashMap();
+    Set<String> requireTokenCatalogs = getRequireTokenCatalogs(sparkConf);
+    LOG.debug("Require token Hive catalogs: {}", requireTokenCatalogs);
+    try {
+      for (String catalogName : requireTokenCatalogs) {
+        Optional<HiveConf> hiveConfOpt = buildHiveConf(sparkConf, hadoopConf, 
catalogName);
+        if (!hiveConfOpt.isPresent()) {
+          continue;
+        }
+
+        HiveConf remoteHmsConf = hiveConfOpt.get();
+        String metastoreUri = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + 
catalogName + URI_KEY);
+        String principal = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + 
catalogName + PRINCIPAL_KEY);
+        try {
+          UserGroupInformation currentUser = 
UserGroupInformation.getCurrentUser();
+          LOG.debug(
+              "Getting Hive delegation token for {} against {} at {}",
+              currentUser.getUserName(),
+              principal,
+              metastoreUri);
+          doAsRealUser(
+              () -> {
+                IMetaStoreClient hmsClient = hmsClientCache.get(metastoreUri);
+                if (hmsClient == null) {
+                  hmsClient = createHmsClient(remoteHmsConf);
+                  hmsClientCache.put(metastoreUri, hmsClient);
+                }
+
+                String tokenStr =
+                    hmsClient.getDelegationToken(currentUser.getUserName(), 
principal);
+                Token<DelegationTokenIdentifier> hive2Token = new Token<>();
+                hive2Token.decodeFromUrlString(tokenStr);
+                LOG.info("Get Token from hive metastore: {}", hive2Token);
+                creds.addToken(new Text(metastoreUri), hive2Token);
+                return null;
+              });
+        } catch (IOException | UndeclaredThrowableException e) {
+          LOG.error(
+              "Failed to obtain delegation token for catalog {}: {}",
+              catalogName,
+              e.getMessage(),
+              e);
+          throw new RuntimeException(e);
+        } catch (Throwable t) {
+          LOG.error(
+              "Unexpected error when obtaining delegation token for catalog 
{}: {}",
+              catalogName,
+              t.getMessage(),
+              t);
+          throw new RuntimeException(t);
+        }
+      }
+
+      return Option.empty();
+    } catch (NoClassDefFoundError e) {
+      throw new RuntimeException(e);
+    } finally {
+      close(hmsClientCache);
+    }
+  }
+
+  private void close(Map<String, IMetaStoreClient> hmsClientCache) {
+    hmsClientCache
+        .values()
+        .forEach(
+            client -> {
+              try {
+                client.close();
+              } catch (Exception e) {
+                throw new RuntimeException(
+                    "Failed to close HiveMetaStoreClient: " + e.getMessage(), 
e);
+              }
+            });
+  }
+
+  private <T> void doAsRealUser(PrivilegedExceptionAction<T> action) {
+    try {
+      UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+      UserGroupInformation realUser =
+          Optional.ofNullable(currentUser.getRealUser()).orElse(currentUser);
+      realUser.doAs(action);
+    } catch (UndeclaredThrowableException e) {
+      throw new RuntimeException("Fatal error in doAsRealUser: " + 
e.getCause());
+    } catch (IOException | InterruptedException e) {
+      throw new RuntimeException(e);

Review Comment:
   I think there can be merged, right?



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/security/IcebergHiveConnectorDelegationTokenProvider.java:
##########
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.security;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.spark.SparkCatalog;
+import org.apache.iceberg.spark.SparkSessionCatalog;
+import org.apache.spark.SparkConf;
+import org.apache.spark.security.HadoopDelegationTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.Option;
+import scala.Tuple2;
+
+public class IcebergHiveConnectorDelegationTokenProvider implements 
HadoopDelegationTokenProvider {
+  private static final Logger LOG =
+      
LoggerFactory.getLogger(IcebergHiveConnectorDelegationTokenProvider.class);
+
+  private static final String SERVICE_NAME = "iceberg_hive";
+  private static final String SPARK_SQL_CATALOG_PREFIX = "spark.sql.catalog.";
+  private static final String CATALOG_TYPE = "hive";
+  private static final String URI_KEY = ".uri";
+  private static final String PRINCIPAL_KEY = 
".hive.metastore.kerberos.principal";
+  private static final String TYPE_KEY = ".type";
+
+  @Override
+  public String serviceName() {
+    return SERVICE_NAME;
+  }
+
+  /**
+   * Builds a HiveConf object for the specified catalog by merging the 
provided Hadoop configuration
+   * with catalog-specific configurations from the Spark configuration.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @param hadoopConf The Hadoop Configuration object to be used as the base 
for the HiveConf.
+   * @param catalogName The name of the catalog.
+   * @return An Optional containing the constructed HiveConf if successful, or 
an empty Optional if
+   *     an error occurs.
+   */
+  Optional<HiveConf> buildHiveConf(
+      SparkConf sparkConf, Configuration hadoopConf, String catalogName) {
+    try {
+      HiveConf hiveConf = new HiveConf(hadoopConf, HiveConf.class);
+      Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX + 
catalogName))
+          .forEach(x -> hiveConf.set(x._1, x._2));
+      return Optional.of(hiveConf);
+    } catch (Exception e) {
+      LOG.warn("Fail to create Hive Configuration for catalog {}: {}", 
catalogName, e.getMessage());
+      return Optional.empty();
+    }
+  }
+
+  /**
+   * Extracts the names of Iceberg catalogs from the provided Spark 
configuration. This method
+   * filters the Spark configuration entries to identify those that correspond 
to Iceberg catalogs.
+   * It checks if the catalog type matches either `SparkSessionCatalog` or 
`SparkCatalog` and
+   * collects their names.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @return A Set of Strings representing the names of Iceberg catalogs.
+   */
+  private Set<String> extractIcebergCatalogNames(SparkConf sparkConf) {
+    return Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX))
+        .filter(
+            entry -> {
+              String val = entry._2();
+              return val.contains(SparkSessionCatalog.class.getName())
+                  || val.contains(SparkCatalog.class.getName());
+            })
+        .map(Tuple2::_1)
+        .collect(Collectors.toSet());
+  }
+
+  @Override
+  public boolean delegationTokensRequired(SparkConf sparkConf, Configuration 
hadoopConf) {
+    return !getRequireTokenCatalogs(sparkConf).isEmpty();
+  }
+
+  private Set<String> getRequireTokenCatalogs(SparkConf sparkConf) {
+    return extractIcebergCatalogNames(sparkConf).stream()
+        .filter(catalog -> checkDelegationTokensRequired(sparkConf, catalog))
+        .collect(Collectors.toSet());
+  }
+
+  private boolean checkDelegationTokensRequired(SparkConf sparkConf, String 
catalogName) {
+    String metastoreUri = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName 
+ URI_KEY, "");
+    String principal = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + 
PRINCIPAL_KEY, "");
+    boolean isHiveType =
+        CATALOG_TYPE.equalsIgnoreCase(
+            sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + TYPE_KEY, 
""));
+    if (metastoreUri.isEmpty()
+        || principal.isEmpty()
+        || !isHiveType
+        || !UserGroupInformation.isSecurityEnabled()) {
+      return false;
+    }
+
+    try {
+      Credentials credentials = 
UserGroupInformation.getCurrentUser().getCredentials();
+      Token<?> currentToken = credentials.getToken(new Text(metastoreUri));
+      return currentToken == null;
+    } catch (IOException ex) {
+      LOG.error(
+          "Failed to get current user credentials for catalog {}: {}",
+          catalogName,
+          ex.getMessage(),
+          ex);
+      throw new RuntimeException(ex);
+    }
+  }
+
+  IMetaStoreClient createHmsClient(HiveConf conf) throws Exception {
+    return new HiveMetaStoreClient(conf, null, false);
+  }
+
+  @Override
+  public Option<Object> obtainDelegationTokens(
+      Configuration hadoopConf, SparkConf sparkConf, Credentials creds) {
+    Map<String, IMetaStoreClient> hmsClientCache = Maps.newHashMap();
+    Set<String> requireTokenCatalogs = getRequireTokenCatalogs(sparkConf);
+    LOG.debug("Require token Hive catalogs: {}", requireTokenCatalogs);
+    try {
+      for (String catalogName : requireTokenCatalogs) {
+        Optional<HiveConf> hiveConfOpt = buildHiveConf(sparkConf, hadoopConf, 
catalogName);
+        if (!hiveConfOpt.isPresent()) {
+          continue;
+        }
+
+        HiveConf remoteHmsConf = hiveConfOpt.get();
+        String metastoreUri = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + 
catalogName + URI_KEY);
+        String principal = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + 
catalogName + PRINCIPAL_KEY);
+        try {
+          UserGroupInformation currentUser = 
UserGroupInformation.getCurrentUser();
+          LOG.debug(
+              "Getting Hive delegation token for {} against {} at {}",
+              currentUser.getUserName(),
+              principal,
+              metastoreUri);
+          doAsRealUser(
+              () -> {
+                IMetaStoreClient hmsClient = hmsClientCache.get(metastoreUri);

Review Comment:
   When we use any caching then we must make sure that when the client is 
corrupted (constantly throws exception) then the client must be evicted and 
newly created, otherwise I don't see the gain to have a cache. Do we really 
need a cache here? I mean, maybe we can space 2-3 seconds within a 4 hours 
period.



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/security/IcebergHiveConnectorDelegationTokenProvider.java:
##########
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.security;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.spark.SparkCatalog;
+import org.apache.iceberg.spark.SparkSessionCatalog;
+import org.apache.spark.SparkConf;
+import org.apache.spark.security.HadoopDelegationTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.Option;
+import scala.Tuple2;
+
+public class IcebergHiveConnectorDelegationTokenProvider implements 
HadoopDelegationTokenProvider {
+  private static final Logger LOG =
+      
LoggerFactory.getLogger(IcebergHiveConnectorDelegationTokenProvider.class);
+
+  private static final String SERVICE_NAME = "iceberg_hive";
+  private static final String SPARK_SQL_CATALOG_PREFIX = "spark.sql.catalog.";
+  private static final String CATALOG_TYPE = "hive";
+  private static final String URI_KEY = ".uri";
+  private static final String PRINCIPAL_KEY = 
".hive.metastore.kerberos.principal";
+  private static final String TYPE_KEY = ".type";
+
+  @Override
+  public String serviceName() {
+    return SERVICE_NAME;
+  }
+
+  /**
+   * Builds a HiveConf object for the specified catalog by merging the 
provided Hadoop configuration
+   * with catalog-specific configurations from the Spark configuration.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @param hadoopConf The Hadoop Configuration object to be used as the base 
for the HiveConf.
+   * @param catalogName The name of the catalog.
+   * @return An Optional containing the constructed HiveConf if successful, or 
an empty Optional if
+   *     an error occurs.
+   */
+  Optional<HiveConf> buildHiveConf(
+      SparkConf sparkConf, Configuration hadoopConf, String catalogName) {
+    try {
+      HiveConf hiveConf = new HiveConf(hadoopConf, HiveConf.class);
+      Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX + 
catalogName))
+          .forEach(x -> hiveConf.set(x._1, x._2));
+      return Optional.of(hiveConf);
+    } catch (Exception e) {
+      LOG.warn("Fail to create Hive Configuration for catalog {}: {}", 
catalogName, e.getMessage());
+      return Optional.empty();
+    }
+  }
+
+  /**
+   * Extracts the names of Iceberg catalogs from the provided Spark 
configuration. This method
+   * filters the Spark configuration entries to identify those that correspond 
to Iceberg catalogs.
+   * It checks if the catalog type matches either `SparkSessionCatalog` or 
`SparkCatalog` and
+   * collects their names.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @return A Set of Strings representing the names of Iceberg catalogs.
+   */
+  private Set<String> extractIcebergCatalogNames(SparkConf sparkConf) {
+    return Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX))
+        .filter(
+            entry -> {
+              String val = entry._2();
+              return val.contains(SparkSessionCatalog.class.getName())
+                  || val.contains(SparkCatalog.class.getName());
+            })
+        .map(Tuple2::_1)
+        .collect(Collectors.toSet());
+  }
+
+  @Override
+  public boolean delegationTokensRequired(SparkConf sparkConf, Configuration 
hadoopConf) {
+    return !getRequireTokenCatalogs(sparkConf).isEmpty();
+  }
+
+  private Set<String> getRequireTokenCatalogs(SparkConf sparkConf) {
+    return extractIcebergCatalogNames(sparkConf).stream()
+        .filter(catalog -> checkDelegationTokensRequired(sparkConf, catalog))
+        .collect(Collectors.toSet());
+  }
+
+  private boolean checkDelegationTokensRequired(SparkConf sparkConf, String 
catalogName) {
+    String metastoreUri = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName 
+ URI_KEY, "");
+    String principal = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + 
PRINCIPAL_KEY, "");
+    boolean isHiveType =
+        CATALOG_TYPE.equalsIgnoreCase(
+            sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + TYPE_KEY, 
""));
+    if (metastoreUri.isEmpty()
+        || principal.isEmpty()
+        || !isHiveType
+        || !UserGroupInformation.isSecurityEnabled()) {
+      return false;
+    }
+
+    try {
+      Credentials credentials = 
UserGroupInformation.getCurrentUser().getCredentials();
+      Token<?> currentToken = credentials.getToken(new Text(metastoreUri));
+      return currentToken == null;
+    } catch (IOException ex) {
+      LOG.error(
+          "Failed to get current user credentials for catalog {}: {}",
+          catalogName,
+          ex.getMessage(),
+          ex);
+      throw new RuntimeException(ex);
+    }
+  }
+
+  IMetaStoreClient createHmsClient(HiveConf conf) throws Exception {
+    return new HiveMetaStoreClient(conf, null, false);
+  }
+
+  @Override
+  public Option<Object> obtainDelegationTokens(
+      Configuration hadoopConf, SparkConf sparkConf, Credentials creds) {
+    Map<String, IMetaStoreClient> hmsClientCache = Maps.newHashMap();
+    Set<String> requireTokenCatalogs = getRequireTokenCatalogs(sparkConf);
+    LOG.debug("Require token Hive catalogs: {}", requireTokenCatalogs);
+    try {
+      for (String catalogName : requireTokenCatalogs) {
+        Optional<HiveConf> hiveConfOpt = buildHiveConf(sparkConf, hadoopConf, 
catalogName);
+        if (!hiveConfOpt.isPresent()) {
+          continue;
+        }
+
+        HiveConf remoteHmsConf = hiveConfOpt.get();
+        String metastoreUri = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + 
catalogName + URI_KEY);
+        String principal = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + 
catalogName + PRINCIPAL_KEY);
+        try {
+          UserGroupInformation currentUser = 
UserGroupInformation.getCurrentUser();
+          LOG.debug(
+              "Getting Hive delegation token for {} against {} at {}",
+              currentUser.getUserName(),
+              principal,
+              metastoreUri);
+          doAsRealUser(
+              () -> {
+                IMetaStoreClient hmsClient = hmsClientCache.get(metastoreUri);
+                if (hmsClient == null) {
+                  hmsClient = createHmsClient(remoteHmsConf);
+                  hmsClientCache.put(metastoreUri, hmsClient);
+                }
+
+                String tokenStr =
+                    hmsClient.getDelegationToken(currentUser.getUserName(), 
principal);
+                Token<DelegationTokenIdentifier> hive2Token = new Token<>();
+                hive2Token.decodeFromUrlString(tokenStr);
+                LOG.info("Get Token from hive metastore: {}", hive2Token);
+                creds.addToken(new Text(metastoreUri), hive2Token);
+                return null;

Review Comment:
   Since it's returning null that means `obtainDelegationTokens` will never 
ever been called again unless other tokens are expired. Is this the intention 
here?



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/security/IcebergHiveConnectorDelegationTokenProvider.java:
##########
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.security;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.spark.SparkCatalog;
+import org.apache.iceberg.spark.SparkSessionCatalog;
+import org.apache.spark.SparkConf;
+import org.apache.spark.security.HadoopDelegationTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.Option;
+import scala.Tuple2;
+
+public class IcebergHiveConnectorDelegationTokenProvider implements 
HadoopDelegationTokenProvider {
+  private static final Logger LOG =
+      
LoggerFactory.getLogger(IcebergHiveConnectorDelegationTokenProvider.class);
+
+  private static final String SERVICE_NAME = "iceberg_hive";
+  private static final String SPARK_SQL_CATALOG_PREFIX = "spark.sql.catalog.";
+  private static final String CATALOG_TYPE = "hive";
+  private static final String URI_KEY = ".uri";
+  private static final String PRINCIPAL_KEY = 
".hive.metastore.kerberos.principal";
+  private static final String TYPE_KEY = ".type";
+
+  @Override
+  public String serviceName() {
+    return SERVICE_NAME;
+  }
+
+  /**
+   * Builds a HiveConf object for the specified catalog by merging the 
provided Hadoop configuration
+   * with catalog-specific configurations from the Spark configuration.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @param hadoopConf The Hadoop Configuration object to be used as the base 
for the HiveConf.
+   * @param catalogName The name of the catalog.
+   * @return An Optional containing the constructed HiveConf if successful, or 
an empty Optional if
+   *     an error occurs.
+   */
+  Optional<HiveConf> buildHiveConf(
+      SparkConf sparkConf, Configuration hadoopConf, String catalogName) {
+    try {
+      HiveConf hiveConf = new HiveConf(hadoopConf, HiveConf.class);
+      Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX + 
catalogName))
+          .forEach(x -> hiveConf.set(x._1, x._2));
+      return Optional.of(hiveConf);
+    } catch (Exception e) {
+      LOG.warn("Fail to create Hive Configuration for catalog {}: {}", 
catalogName, e.getMessage());
+      return Optional.empty();
+    }
+  }
+
+  /**
+   * Extracts the names of Iceberg catalogs from the provided Spark 
configuration. This method
+   * filters the Spark configuration entries to identify those that correspond 
to Iceberg catalogs.
+   * It checks if the catalog type matches either `SparkSessionCatalog` or 
`SparkCatalog` and
+   * collects their names.
+   *
+   * @param sparkConf The SparkConf object containing Spark configurations.
+   * @return A Set of Strings representing the names of Iceberg catalogs.
+   */
+  private Set<String> extractIcebergCatalogNames(SparkConf sparkConf) {
+    return Arrays.stream(sparkConf.getAllWithPrefix(SPARK_SQL_CATALOG_PREFIX))
+        .filter(
+            entry -> {
+              String val = entry._2();
+              return val.contains(SparkSessionCatalog.class.getName())
+                  || val.contains(SparkCatalog.class.getName());
+            })
+        .map(Tuple2::_1)
+        .collect(Collectors.toSet());
+  }
+
+  @Override
+  public boolean delegationTokensRequired(SparkConf sparkConf, Configuration 
hadoopConf) {
+    return !getRequireTokenCatalogs(sparkConf).isEmpty();
+  }
+
+  private Set<String> getRequireTokenCatalogs(SparkConf sparkConf) {
+    return extractIcebergCatalogNames(sparkConf).stream()
+        .filter(catalog -> checkDelegationTokensRequired(sparkConf, catalog))
+        .collect(Collectors.toSet());
+  }
+
+  private boolean checkDelegationTokensRequired(SparkConf sparkConf, String 
catalogName) {
+    String metastoreUri = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName 
+ URI_KEY, "");
+    String principal = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + 
PRINCIPAL_KEY, "");
+    boolean isHiveType =
+        CATALOG_TYPE.equalsIgnoreCase(
+            sparkConf.get(SPARK_SQL_CATALOG_PREFIX + catalogName + TYPE_KEY, 
""));
+    if (metastoreUri.isEmpty()
+        || principal.isEmpty()
+        || !isHiveType
+        || !UserGroupInformation.isSecurityEnabled()) {
+      return false;
+    }
+
+    try {
+      Credentials credentials = 
UserGroupInformation.getCurrentUser().getCredentials();
+      Token<?> currentToken = credentials.getToken(new Text(metastoreUri));
+      return currentToken == null;
+    } catch (IOException ex) {
+      LOG.error(
+          "Failed to get current user credentials for catalog {}: {}",
+          catalogName,
+          ex.getMessage(),
+          ex);
+      throw new RuntimeException(ex);
+    }
+  }
+
+  IMetaStoreClient createHmsClient(HiveConf conf) throws Exception {
+    return new HiveMetaStoreClient(conf, null, false);
+  }
+
+  @Override
+  public Option<Object> obtainDelegationTokens(
+      Configuration hadoopConf, SparkConf sparkConf, Credentials creds) {
+    Map<String, IMetaStoreClient> hmsClientCache = Maps.newHashMap();
+    Set<String> requireTokenCatalogs = getRequireTokenCatalogs(sparkConf);
+    LOG.debug("Require token Hive catalogs: {}", requireTokenCatalogs);
+    try {
+      for (String catalogName : requireTokenCatalogs) {
+        Optional<HiveConf> hiveConfOpt = buildHiveConf(sparkConf, hadoopConf, 
catalogName);
+        if (!hiveConfOpt.isPresent()) {
+          continue;
+        }
+
+        HiveConf remoteHmsConf = hiveConfOpt.get();
+        String metastoreUri = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + 
catalogName + URI_KEY);
+        String principal = sparkConf.get(SPARK_SQL_CATALOG_PREFIX + 
catalogName + PRINCIPAL_KEY);
+        try {
+          UserGroupInformation currentUser = 
UserGroupInformation.getCurrentUser();
+          LOG.debug(
+              "Getting Hive delegation token for {} against {} at {}",
+              currentUser.getUserName(),
+              principal,
+              metastoreUri);
+          doAsRealUser(
+              () -> {
+                IMetaStoreClient hmsClient = hmsClientCache.get(metastoreUri);
+                if (hmsClient == null) {
+                  hmsClient = createHmsClient(remoteHmsConf);
+                  hmsClientCache.put(metastoreUri, hmsClient);
+                }
+
+                String tokenStr =
+                    hmsClient.getDelegationToken(currentUser.getUserName(), 
principal);
+                Token<DelegationTokenIdentifier> hive2Token = new Token<>();
+                hive2Token.decodeFromUrlString(tokenStr);
+                LOG.info("Get Token from hive metastore: {}", hive2Token);
+                creds.addToken(new Text(metastoreUri), hive2Token);
+                return null;
+              });
+        } catch (IOException | UndeclaredThrowableException e) {
+          LOG.error(
+              "Failed to obtain delegation token for catalog {}: {}",
+              catalogName,
+              e.getMessage(),
+              e);
+          throw new RuntimeException(e);
+        } catch (Throwable t) {
+          LOG.error(
+              "Unexpected error when obtaining delegation token for catalog 
{}: {}",
+              catalogName,
+              t.getMessage(),
+              t);
+          throw new RuntimeException(t);
+        }
+      }
+
+      return Option.empty();
+    } catch (NoClassDefFoundError e) {
+      throw new RuntimeException(e);
+    } finally {
+      close(hmsClientCache);
+    }
+  }
+
+  private void close(Map<String, IMetaStoreClient> hmsClientCache) {
+    hmsClientCache
+        .values()
+        .forEach(
+            client -> {
+              try {
+                client.close();

Review Comment:
   I'm not 100% sure that clients can be closed all the time, like when it's 
corrupted. Such case stopping the job by throwing `RuntimeException` is not 
advisable. I would try to close it silently with some warning.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org


Reply via email to