This is an automated email from the ASF dual-hosted git repository. morningman pushed a commit to branch hadoop-3.3.6 in repository https://gitbox.apache.org/repos/asf/doris-thirdparty.git
The following commit(s) were added to refs/heads/hadoop-3.3.6 by this push: new 031dce84cc8 Revert "[fix](tgt)hadoop kerberos support renew TGT by keytab (#173)" but remain principal interface (#298) 031dce84cc8 is described below commit 031dce84cc80100e05288c94e219476350173f33 Author: Mingyu Chen (Rayner) <morning...@163.com> AuthorDate: Tue Mar 18 11:57:06 2025 +0800 Revert "[fix](tgt)hadoop kerberos support renew TGT by keytab (#173)" but remain principal interface (#298) Revert #173 but keep `hdfsBuilderSetPrincipal()` in hdfs.h. We can not only use `hdfsBuilderSetUsername()` to set principal. If user is set, the `UserGroupInformation.getBestUGI()` will try getting ugi from remote user, not from kerberos subject. --- .../main/java/org/apache/hadoop/fs/FileSystem.java | 13 --- .../src/main/native/libhdfs/hdfs.c | 98 ++++++++-------------- .../src/main/native/libhdfs/include/hdfs/hdfs.h | 2 +- 3 files changed, 36 insertions(+), 77 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 2fb9c7592f6..6caf1e7167a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -582,19 +582,6 @@ public abstract class FileSystem extends Configured }); } - public synchronized static FileSystem newInstanceFromKeytab(final URI uri, final Configuration conf, - final String principal, final String keytabPath) - throws IOException, InterruptedException { - UserGroupInformation.setConfiguration(conf); - UserGroupInformation.loginUserFromKeytab(principal, keytabPath); - return UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<FileSystem>() { - @Override - public FileSystem run() throws IOException { - return newInstance(uri, conf); - } - }); - } - /** * Returns the FileSystem for this URI's scheme and authority. * The entire URI is passed to the FileSystem instance's initialize method. diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c index 323275cb0d3..942bf61bb06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c @@ -36,7 +36,7 @@ #define JMETHOD1(X, R) "(" X ")" R #define JMETHOD2(X, Y, R) "(" X Y ")" R #define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R -#define JMETHOD4(X, Y, Z, A, R) "(" X Y Z A")" R +#define JMETHOD4(X, Y, Z, A, R) "(" X Y Z A")" R #define KERBEROS_TICKET_CACHE_PATH "hadoop.security.kerberos.ticket.cache.path" @@ -867,7 +867,7 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld) // fs = FileSytem#getLocal(conf); jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_FILE_SYSTEM, "getLocal", - JMETHOD1(JPARAM(HADOOP_CONF) , JPARAM(HADOOP_LOCALFS)), + JMETHOD1(JPARAM(HADOOP_CONF), JPARAM(HADOOP_LOCALFS)), jConfiguration); if (jthr) { ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, @@ -922,7 +922,28 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld) hdfsBuilderToStr(bld, buf, sizeof(buf))); goto done; } - if (bld->kerbTicketCachePath) { + if (bld->kerb5ConfPath && bld->keyTabFile && bld->kerbPrincipal) { + jthr = invokeMethod(env, NULL, STATIC, NULL, JC_SECURITY_CONFIGURATION, "setConfiguration", JMETHOD1(JPARAM(HADOOP_CONF),JAVA_VOID), jConfiguration); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf))); + goto done; + } + jthr = newJavaStr(env, bld->keyTabFile, &jKeyTabString); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf))); + goto done; + } + jthr = newJavaStr(env, bld->kerbPrincipal, &jPrincipalString); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf))); + goto done; + } + jthr = invokeMethod(env, NULL, STATIC, NULL, JC_SECURITY_CONFIGURATION, "loginUserFromKeytab", JMETHOD2(JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID), jPrincipalString, jKeyTabString); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf))); + goto done; + } + } else if (bld->kerbTicketCachePath) { jthr = hadoopConfSetStr(env, jConfiguration, KERBEROS_TICKET_CACHE_PATH, bld->kerbTicketCachePath); if (jthr) { @@ -935,69 +956,20 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld) JMETHOD1(JPARAM(HADOOP_CONF),JAVA_VOID), jConfiguration); } if (bld->forceNewInstance) { - // need kerb5ConfPath to enable kerberos authentication - if (bld->kerb5ConfPath && bld->kerbPrincipal && bld->keyTabFile) { - jthr = newJavaStr(env, bld->kerbPrincipal, &jPrincipalString); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hdfsBuilderConnect(%s)", - hdfsBuilderToStr(bld, buf, sizeof(buf))); - goto done; - } - jthr = newJavaStr(env, bld->keyTabFile, &jKeyTabString); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf))); - goto done; - } - jthr = invokeMethod(env, &jVal, STATIC, NULL, - JC_FILE_SYSTEM, "newInstanceFromKeytab", - JMETHOD4(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF), - JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI, - jConfiguration, jPrincipalString, jKeyTabString); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hdfsBuilderConnect(%s)", - hdfsBuilderToStr(bld, buf, sizeof(buf))); - goto done; - } - } else { - jthr = invokeMethod(env, &jVal, STATIC, NULL, - JC_FILE_SYSTEM, "newInstance", - JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF), - JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI, - jConfiguration, jUserString); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hdfsBuilderConnect(%s)", - hdfsBuilderToStr(bld, buf, sizeof(buf))); - goto done; - } + jthr = invokeMethod(env, &jVal, STATIC, NULL, + JC_FILE_SYSTEM, "newInstance", + JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF), + JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI, + jConfiguration, jUserString); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hdfsBuilderConnect(%s)", + hdfsBuilderToStr(bld, buf, sizeof(buf))); + goto done; } jFS = jVal.l; } else { - if (bld->keyTabFile && bld->kerb5ConfPath && bld->kerbPrincipal) { - jthr = newJavaStr(env, bld->kerbPrincipal, &jPrincipalString); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hdfsBuilderConnect(%s)", - hdfsBuilderToStr(bld, buf, sizeof(buf))); - goto done; - } - jthr = invokeMethod(env, NULL, STATIC, NULL, JC_SECURITY_CONFIGURATION, "setConfiguration", JMETHOD1(JPARAM(HADOOP_CONF),JAVA_VOID), jConfiguration); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf))); - goto done; - } - jthr = newJavaStr(env, bld->keyTabFile, &jKeyTabString); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf))); - goto done; - } - jthr = invokeMethod(env, NULL, STATIC, NULL, JC_SECURITY_CONFIGURATION, "loginUserFromKeytab", JMETHOD2(JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID), jPrincipalString, jKeyTabString); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf))); - goto done; - } + if (bld->keyTabFile && bld->kerb5ConfPath) { jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_FILE_SYSTEM, "get", JMETHOD1(JPARAM(HADOOP_CONF), JPARAM(HADOOP_FS)), jConfiguration); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h index ec389ad30af..263e8939e11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h @@ -362,7 +362,7 @@ extern "C" { */ LIBHDFS_EXTERNAL void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName); - + /** * Set the principal to use when connecting to the HDFS cluster. * --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org