This is an automated email from the ASF dual-hosted git repository.

caolu pushed a commit to branch kylin5
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit 510edafa1224336f493075a859263039a74e346a
Author: Yinghao Lin <[email protected]>
AuthorDate: Fri Mar 28 00:23:21 2025 +0800

    kylin 5.0.2 followup fix
---
 build/release/compress.sh                          |   7 +
 .../standalone-docker/all-in-one/Dockerfile        |   4 +-
 .../standalone-docker/all-in-one/README.md         | 323 +--------------------
 .../standalone-docker/all-in-one/build.sh          |  12 +-
 .../standalone-docker/all-in-one/run.sh            |   2 +-
 .../src/components/monitor/batchJobs/locales.js    |  25 +-
 pom.xml                                            |   4 -
 .../java/org/apache/kylin/common/KylinVersion.java |  12 +-
 .../src/main/resources/kylin-defaults0.properties  |   2 +
 .../kylin/job/constant/ExecutableConstants.java    |  14 +-
 .../apache/kylin/engine/spark/job/StageEnum.java   |   6 +-
 .../scala/org/apache/spark/sql/KylinSession.scala  |  19 --
 12 files changed, 56 insertions(+), 374 deletions(-)

diff --git a/build/release/compress.sh b/build/release/compress.sh
index 5fe084218a..bc590675a7 100755
--- a/build/release/compress.sh
+++ b/build/release/compress.sh
@@ -102,6 +102,13 @@ if [[ "${WITH_GLUTEN}" = "1" ]]; then
     cp -rf gluten/jars/spark33/* ${package_name}/lib/gluten/
     mv spark/libch.so ${package_name}/server/
     cp spark/jars/gluten.jar ${package_name}/lib/ext/
+    if [[ "$(uname)" == "Darwin" ]]; then
+    sed -i '' '22a\
+export LD_PRELOAD=${KYLIN_HOME}/server/libch.so
+' ${package_name}/sbin/spark-test.sh
+    else
+        sed -i '22aexport LD_PRELOAD=${KYLIN_HOME}/server/libch.so' 
${package_name}/sbin/spark-test.sh
+    fi
 fi
 
 # cp -rf deploy/.keystore ${package_name}/server/
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/Dockerfile 
b/dev-support/release-manager/standalone-docker/all-in-one/Dockerfile
index 1e3a309836..be5cf1d3ec 100644
--- a/dev-support/release-manager/standalone-docker/all-in-one/Dockerfile
+++ b/dev-support/release-manager/standalone-docker/all-in-one/Dockerfile
@@ -74,8 +74,8 @@ RUN cp /opt/mysql-connector-j-8.0.33.jar $HIVE_HOME/lib/
 
 # install apache kylin
 RUN mkdir -p /home/kylin
-ADD package/apache-kylin-5.0.0-GA-bin.tar.gz /home/kylin
-ENV KYLIN_HOME=/home/kylin/apache-kylin-5.0.0-GA-bin
+ADD package/apache-kylin-5.0.2-bin.tar.gz /home/kylin
+ENV KYLIN_HOME=/home/kylin/apache-kylin-5.0.2-bin
 RUN cp /opt/mysql-connector-j-8.0.33.jar $KYLIN_HOME/lib/ext/
 COPY conf/kylin/kylin.properties.override $KYLIN_HOME/conf/
 
diff --git a/dev-support/release-manager/standalone-docker/all-in-one/README.md 
b/dev-support/release-manager/standalone-docker/all-in-one/README.md
index b3ba5dc040..d267d37dc0 100644
--- a/dev-support/release-manager/standalone-docker/all-in-one/README.md
+++ b/dev-support/release-manager/standalone-docker/all-in-one/README.md
@@ -1,13 +1,10 @@
 # Preview latest Kylin (5.x)
 
 ## [Image Tag 
Information](https://hub.docker.com/r/apachekylin/apache-kylin-standalone)
-| Tag                  | Image Contents                                        
                    | Comment & Publish Date                                    
                                                                                
               |
-|----------------------|---------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|
-| 5.0.0-GA             | [**Recommended for users**] The official 5.0.0-GA 
with Spark & Gluten bundled. | Uploaded at 2024-09-13 |
-| 5.0-beta             | The official 5.0.0-beta with Spark bundled.    | 
Uploaded at 2023-09-08, worked fine on Docker Desktop Mac 4.3.0 & 4.22.1(and 
Windows) ,                                                                  |
-| kylin-4.0.1-mondrian | The official Kylin 4.0.1 with **MDX** function 
enabled                    | Uploaded at 2022-05-13                             
                                                                                
                      | 
-| 5-dev                | [**For developer only**] Kylin 5.X package with some 
sample data/tools etc | Uploaded at 2023-11-21, this image for developer to 
debug and test Kylin 5.X source code if he/her didn't have a Hadoop env         
                     |
-| 5.x-base-dev-only    | [**For maintainer only**] Hadoop, Hive, Zookeeper, 
MySQL, JDK8            | Uploaded at 2023-09-07, this is the base image for all 
Kylin 5.X image, so it didn't contain Kylin package, see file 
`Dockerfile_hadoop` for information |
+| Tag                  | Image Contents                                        
                         | Comment & Publish Date                               
                                                                                
                    |
+|----------------------|--------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|
+| 5.0.2-GA             | [**Recommended for users**] The official 5.0.2-GA 
with Spark & Gluten bundled. | Uploaded at 2025-03-31                           
                                                                                
                        |
+| kylin-4.0.1-mondrian | The official Kylin 4.0.1 with **MDX** function 
enabled                         | Uploaded at 2022-05-13                        
                                                                                
                           | 
 
 ## Why you need Kylin 5
 
@@ -55,320 +52,12 @@ docker run -d \
     -p 8032:8032 \
     -p 8042:8042 \
     -p 2181:2181 \
-    apachekylin/apache-kylin-standalone:5.0.0-GA
+    apachekylin/apache-kylin-standalone:5.0.2-GA
 
 docker logs --follow Kylin5-Machine
 ```
 
-When you enter these two commands, the logs will scroll
-out in terminal and the process will continue for 3-5 minutes.
-
-```
-===============================================================================
-*******************************************************************************
-|
-|   Start SSH server at Fri Sep 13 12:15:24 UTC 2024
-|   Command: /etc/init.d/ssh start
-|
- * Starting OpenBSD Secure Shell server sshd
-   ...done.
-[Start SSH server] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Start MySQL at Fri Sep 13 12:15:25 UTC 2024
-|   Command: service mysql start
-|
- * Starting MySQL database server mysqld
-su: warning: cannot change directory to /nonexistent: No such file or directory
-   ...done.
-[Start MySQL] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Create Database kylin at Fri Sep 13 12:15:36 UTC 2024
-|   Command: mysql -uroot -p123456 -e CREATE DATABASE IF NOT EXISTS kylin 
default charset utf8mb4 COLLATE utf8mb4_general_ci;
-|
-mysql: [Warning] Using a password on the command line interface can be 
insecure.
-[Create Database kylin] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Create Database hive3 at Fri Sep 13 12:15:36 UTC 2024
-|   Command: mysql -uroot -p123456 -e CREATE DATABASE IF NOT EXISTS hive3 
default charset utf8mb4 COLLATE utf8mb4_general_ci;
-|
-mysql: [Warning] Using a password on the command line interface can be 
insecure.
-[Create Database hive3] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Init Hive at Fri Sep 13 12:15:36 UTC 2024
-|   Command: schematool -initSchema -dbType mysql
-|
-SLF4J: Class path contains multiple SLF4J bindings.
-SLF4J: Found binding in 
[jar:file:/opt/apache-hive-3.1.3-bin/lib/log4j-slf4j-impl-2.17.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
-SLF4J: Found binding in 
[jar:file:/opt/hadoop-3.2.4/share/hadoop/common/lib/slf4j-reload4j-1.7.35.jar!/org/slf4j/impl/StaticLoggerBinder.class]
-SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an 
explanation.
-SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
-Metastore connection URL: 
jdbc:mysql://127.0.0.1:3306/hive3?useSSL=false&allowPublicKeyRetrieval=true&characterEncoding=UTF-8
-Metastore Connection Driver : com.mysql.cj.jdbc.Driver
-Metastore connection User: root
-Starting metastore schema initialization to 3.1.0
-Initialization script hive-schema-3.1.0.mysql.sql
-...
-Initialization script completed
-schemaTool completed
-[Init Hive] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Format HDFS at Fri Sep 13 12:15:50 UTC 2024
-|   Command: hdfs namenode -format
-|
-WARNING: /opt/hadoop-3.2.4/logs does not exist. Creating.
-2024-09-13 12:15:51,423 INFO namenode.NameNode: STARTUP_MSG: 
-/************************************************************
-STARTUP_MSG: Starting NameNode
-STARTUP_MSG:   host = localhost/127.0.0.1
-STARTUP_MSG:   args = [-format]
-STARTUP_MSG:   version = 3.2.4
-STARTUP_MSG:   classpath = 
/opt/hadoop-3.2.4/etc/hadoop:/opt/hadoop-3.2.4/share/hadoop/common/lib/audience-annotations-0.5.0.jar:/opt/hadoop-3.2.4/share/hadoop/common/lib/httpclient-4.5.13.jar:/opt/hadoop-3.2.4/share/hadoop/common/lib/curator-client-2.13.0.jar:/opt/hadoop-3.2.4/share/hadoop/common/lib/jetty-server-9.4.43.v20210629.jar:/opt/hadoop-3.2.4/share/hadoop/common/lib/checker-qual-2.5.2.jar:/opt/hadoop-3.2.4/share/hadoop/common/lib/woodstox-core-5.3.0.jar:/opt/hadoop-3.2.4/share/
 [...]
-STARTUP_MSG:   build = Unknown -r 7e5d9983b388e372fe640f21f048f2f2ae6e9eba; 
compiled by 'ubuntu' on 2022-07-12T11:58Z
-STARTUP_MSG:   java = 1.8.0_422
-************************************************************/
-2024-09-13 12:15:51,434 INFO namenode.NameNode: registered UNIX signal 
handlers for [TERM, HUP, INT]
-2024-09-13 12:15:51,539 INFO namenode.NameNode: createNameNode [-format]
-Formatting using clusterid: CID-e9f0293c-adcd-40a6-9c7f-ab7537b2eedf
-2024-09-13 12:15:52,059 INFO namenode.FSEditLog: Edit logging is async:true
-2024-09-13 12:15:52,090 INFO namenode.FSNamesystem: KeyProvider: null
-2024-09-13 12:15:52,092 INFO namenode.FSNamesystem: fsLock is fair: true
-2024-09-13 12:15:52,092 INFO namenode.FSNamesystem: Detailed lock hold time 
metrics enabled: false
-2024-09-13 12:15:52,100 INFO namenode.FSNamesystem: fsOwner             = root 
(auth:SIMPLE)
-2024-09-13 12:15:52,100 INFO namenode.FSNamesystem: supergroup          = 
supergroup
-2024-09-13 12:15:52,100 INFO namenode.FSNamesystem: isPermissionEnabled = true
-2024-09-13 12:15:52,100 INFO namenode.FSNamesystem: HA Enabled: false
-2024-09-13 12:15:52,153 INFO common.Util: 
dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO 
profiling
-2024-09-13 12:15:52,165 INFO blockmanagement.DatanodeManager: 
dfs.block.invalidate.limit: configured=1000, counted=60, effected=1000
-2024-09-13 12:15:52,165 INFO blockmanagement.DatanodeManager: 
dfs.namenode.datanode.registration.ip-hostname-check=true
-2024-09-13 12:15:52,169 INFO blockmanagement.BlockManager: 
dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
-2024-09-13 12:15:52,169 INFO blockmanagement.BlockManager: The block deletion 
will start around 2024 Sep 13 12:15:52
-2024-09-13 12:15:52,171 INFO util.GSet: Computing capacity for map BlocksMap
-2024-09-13 12:15:52,171 INFO util.GSet: VM type       = 64-bit
-2024-09-13 12:15:52,172 INFO util.GSet: 2.0% max memory 1.7 GB = 34.8 MB
-2024-09-13 12:15:52,172 INFO util.GSet: capacity      = 2^22 = 4194304 entries
-2024-09-13 12:15:52,180 INFO blockmanagement.BlockManager: Storage policy 
satisfier is disabled
-2024-09-13 12:15:52,180 INFO blockmanagement.BlockManager: 
dfs.block.access.token.enable = false
-2024-09-13 12:15:52,186 INFO blockmanagement.BlockManagerSafeMode: 
dfs.namenode.safemode.threshold-pct = 0.9990000128746033
-2024-09-13 12:15:52,186 INFO blockmanagement.BlockManagerSafeMode: 
dfs.namenode.safemode.min.datanodes = 0
-2024-09-13 12:15:52,186 INFO blockmanagement.BlockManagerSafeMode: 
dfs.namenode.safemode.extension = 30000
-2024-09-13 12:15:52,187 INFO blockmanagement.BlockManager: defaultReplication  
       = 1
-2024-09-13 12:15:52,187 INFO blockmanagement.BlockManager: maxReplication      
       = 512
-2024-09-13 12:15:52,187 INFO blockmanagement.BlockManager: minReplication      
       = 1
-2024-09-13 12:15:52,187 INFO blockmanagement.BlockManager: 
maxReplicationStreams      = 2
-2024-09-13 12:15:52,187 INFO blockmanagement.BlockManager: 
redundancyRecheckInterval  = 3000ms
-2024-09-13 12:15:52,187 INFO blockmanagement.BlockManager: encryptDataTransfer 
       = false
-2024-09-13 12:15:52,188 INFO blockmanagement.BlockManager: maxNumBlocksToLog   
       = 1000
-2024-09-13 12:15:52,238 INFO namenode.FSDirectory: GLOBAL serial map: bits=29 
maxEntries=536870911
-2024-09-13 12:15:52,238 INFO namenode.FSDirectory: USER serial map: bits=24 
maxEntries=16777215
-2024-09-13 12:15:52,238 INFO namenode.FSDirectory: GROUP serial map: bits=24 
maxEntries=16777215
-2024-09-13 12:15:52,238 INFO namenode.FSDirectory: XATTR serial map: bits=24 
maxEntries=16777215
-2024-09-13 12:15:52,259 INFO util.GSet: Computing capacity for map INodeMap
-2024-09-13 12:15:52,259 INFO util.GSet: VM type       = 64-bit
-2024-09-13 12:15:52,259 INFO util.GSet: 1.0% max memory 1.7 GB = 17.4 MB
-2024-09-13 12:15:52,259 INFO util.GSet: capacity      = 2^21 = 2097152 entries
-2024-09-13 12:15:52,260 INFO namenode.FSDirectory: ACLs enabled? false
-2024-09-13 12:15:52,260 INFO namenode.FSDirectory: POSIX ACL inheritance 
enabled? true
-2024-09-13 12:15:52,260 INFO namenode.FSDirectory: XAttrs enabled? true
-2024-09-13 12:15:52,261 INFO namenode.NameNode: Caching file names occurring 
more than 10 times
-2024-09-13 12:15:52,265 INFO snapshot.SnapshotManager: Loaded config 
captureOpenFiles: false, skipCaptureAccessTimeOnlyChange: false, 
snapshotDiffAllowSnapRootDescendant: true, maxSnapshotLimit: 65536
-2024-09-13 12:15:52,267 INFO snapshot.SnapshotManager: SkipList is disabled
-2024-09-13 12:15:52,272 INFO util.GSet: Computing capacity for map cachedBlocks
-2024-09-13 12:15:52,272 INFO util.GSet: VM type       = 64-bit
-2024-09-13 12:15:52,272 INFO util.GSet: 0.25% max memory 1.7 GB = 4.4 MB
-2024-09-13 12:15:52,272 INFO util.GSet: capacity      = 2^19 = 524288 entries
-2024-09-13 12:15:52,284 INFO metrics.TopMetrics: NNTop conf: 
dfs.namenode.top.window.num.buckets = 10
-2024-09-13 12:15:52,284 INFO metrics.TopMetrics: NNTop conf: 
dfs.namenode.top.num.users = 10
-2024-09-13 12:15:52,284 INFO metrics.TopMetrics: NNTop conf: 
dfs.namenode.top.windows.minutes = 1,5,25
-2024-09-13 12:15:52,288 INFO namenode.FSNamesystem: Retry cache on namenode is 
enabled
-2024-09-13 12:15:52,288 INFO namenode.FSNamesystem: Retry cache will use 0.03 
of total heap and retry cache entry expiry time is 600000 millis
-2024-09-13 12:15:52,291 INFO util.GSet: Computing capacity for map 
NameNodeRetryCache
-2024-09-13 12:15:52,291 INFO util.GSet: VM type       = 64-bit
-2024-09-13 12:15:52,292 INFO util.GSet: 0.029999999329447746% max memory 1.7 
GB = 535.3 KB
-2024-09-13 12:15:52,292 INFO util.GSet: capacity      = 2^16 = 65536 entries
-2024-09-13 12:15:52,314 INFO namenode.FSImage: Allocated new BlockPoolId: 
BP-1031271309-127.0.0.1-1726229752306
-2024-09-13 12:15:52,328 INFO common.Storage: Storage directory 
/data/hadoop/dfs/name has been successfully formatted.
-2024-09-13 12:15:52,352 INFO namenode.FSImageFormatProtobuf: Saving image file 
/data/hadoop/dfs/name/current/fsimage.ckpt_0000000000000000000 using no 
compression
-2024-09-13 12:15:52,435 INFO namenode.FSImageFormatProtobuf: Image file 
/data/hadoop/dfs/name/current/fsimage.ckpt_0000000000000000000 of size 396 
bytes saved in 0 seconds .
-2024-09-13 12:15:52,447 INFO namenode.NNStorageRetentionManager: Going to 
retain 1 images with txid >= 0
-2024-09-13 12:15:52,470 INFO namenode.FSNamesystem: Stopping services started 
for active state
-2024-09-13 12:15:52,470 INFO namenode.FSNamesystem: Stopping services started 
for standby state
-2024-09-13 12:15:52,474 INFO namenode.FSImage: FSImageSaver clean checkpoint: 
txid=0 when meet shutdown.
-2024-09-13 12:15:52,475 INFO namenode.NameNode: SHUTDOWN_MSG: 
-/************************************************************
-SHUTDOWN_MSG: Shutting down NameNode at localhost/127.0.0.1
-************************************************************/
-[Format HDFS] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Start Zookeeper at Fri Sep 13 12:15:52 UTC 2024
-|   Command: /opt/apache-zookeeper-3.7.2-bin/bin/zkServer.sh start
-|
-ZooKeeper JMX enabled by default
-Using config: /opt/apache-zookeeper-3.7.2-bin/bin/../conf/zoo.cfg
-Starting zookeeper ... STARTED
-[Start Zookeeper] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Start Hadoop at Fri Sep 13 12:15:53 UTC 2024
-|   Command: /opt/hadoop-3.2.4/sbin/start-all.sh
-|
-Starting namenodes on [localhost]
-localhost: Warning: Permanently added 'localhost' (ED25519) to the list of 
known hosts.
-Starting datanodes
-Starting secondary namenodes [localhost]
-Starting resourcemanager
-Starting nodemanagers
-[Start Hadoop] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Start History Server at Fri Sep 13 12:16:09 UTC 2024
-|   Command: /opt/hadoop-3.2.4/sbin/start-historyserver.sh
-|
-WARNING: Use of this script to start the MR JobHistory daemon is deprecated.
-WARNING: Attempting to execute replacement "mapred --daemon start" instead.
-[Start History Server] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Start Hive metastore at Fri Sep 13 12:16:11 UTC 2024
-|   Command: /opt/apache-hive-3.1.3-bin/bin/start-hivemetastore.sh
-|
-[Start Hive metastore] succeed.
-Checking Check Hive metastore's status...
-+
-Check Check Hive metastore succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Start Hive server at Fri Sep 13 12:16:22 UTC 2024
-|   Command: /opt/apache-hive-3.1.3-bin/bin/start-hiveserver2.sh
-|
-[Start Hive server] succeed.
-Checking Check Hive server's status...
-+
-Check Check Hive server succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Prepare sample data at Fri Sep 13 12:16:45 UTC 2024
-|   Command: /home/kylin/apache-kylin-5.0.0-GA-bin/bin/sample.sh
-|
-Loading sample data into HDFS tmp path: /tmp/kylin/sample_cube/data
-WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete.
-WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete.
-Going to create sample tables in hive to database SSB by hive
-WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete.
-SLF4J: Class path contains multiple SLF4J bindings.
-SLF4J: Found binding in 
[jar:file:/opt/apache-hive-3.1.3-bin/lib/log4j-slf4j-impl-2.17.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
-SLF4J: Found binding in 
[jar:file:/opt/hadoop-3.2.4/share/hadoop/common/lib/slf4j-reload4j-1.7.35.jar!/org/slf4j/impl/StaticLoggerBinder.class]
-SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an 
explanation.
-SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
-Hive Session ID = 2e70c349-7575-4a00-84c8-08b24f1a38cb
-
-Logging initialized using configuration in 
jar:file:/opt/apache-hive-3.1.3-bin/lib/hive-common-3.1.3.jar!/hive-log4j2.properties
 Async: true
-Hive Session ID = bc35a6b2-1846-4a03-837f-271679ac6185
-OK
-Time taken: 1.136 seconds
-WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete.
-SLF4J: Class path contains multiple SLF4J bindings.
-SLF4J: Found binding in 
[jar:file:/opt/apache-hive-3.1.3-bin/lib/log4j-slf4j-impl-2.17.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
-SLF4J: Found binding in 
[jar:file:/opt/hadoop-3.2.4/share/hadoop/common/lib/slf4j-reload4j-1.7.35.jar!/org/slf4j/impl/StaticLoggerBinder.class]
-SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an 
explanation.
-SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
-Hive Session ID = 2e1f6b0e-4017-4e44-871d-239ab6dadc29
-
-Logging initialized using configuration in 
jar:file:/opt/apache-hive-3.1.3-bin/lib/hive-common-3.1.3.jar!/hive-log4j2.properties
 Async: true
-Hive Session ID = 965d433f-9635-4bfb-9aa6-93705ab733a4
-...
-Time taken: 1.946 seconds
-Loading data to table ssb.customer
-OK
-Time taken: 0.517 seconds
-Loading data to table ssb.dates
-OK
-Time taken: 0.256 seconds
-Loading data to table ssb.lineorder
-OK
-Time taken: 0.248 seconds
-Loading data to table ssb.part
-OK
-Time taken: 0.254 seconds
-Loading data to table ssb.supplier
-OK
-Time taken: 0.243 seconds
-Sample hive tables are created successfully; Going to create sample project...
-kylin version is 5.0.0.0
-The metadata backup path is 
hdfs://localhost:9000/kylin/kylin/_backup/2024-09-13-12-17-29_backup/core_meta.
-Sample model is created successfully in project 'learn_kylin'. Detailed 
Message is at "logs/shell.stderr".
-[Prepare sample data] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Kylin ENV bypass at Fri Sep 13 12:17:29 UTC 2024
-|   Command: touch /home/kylin/apache-kylin-5.0.0-GA-bin/bin/check-env-bypass
-|
-[Kylin ENV bypass] succeed.
-
-===============================================================================
-*******************************************************************************
-|
-|   Start Kylin Instance at Fri Sep 13 12:17:29 UTC 2024
-|   Command: /home/kylin/apache-kylin-5.0.0-GA-bin/bin/kylin.sh -v start
-|
-java is /usr/lib/jvm/java-8-openjdk-amd64/bin/java
-Starting Kylin...
-This user don't have permission to run crontab.
-KYLIN_HOME is:/home/kylin/apache-kylin-5.0.0-GA-bin
-KYLIN_CONFIG_FILE 
is:/home/kylin/apache-kylin-5.0.0-GA-bin/conf/kylin.properties
-SPARK_HOME is:/home/kylin/apache-kylin-5.0.0-GA-bin/spark
-Retrieving hadoop config dir...
-KYLIN_JVM_SETTINGS is -server -Xms1g -Xmx8g -XX:+UseG1GC 
-XX:MaxGCPauseMillis=200 -XX:G1HeapRegionSize=16m -XX:+PrintFlagsFinal 
-XX:+PrintReferenceGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps 
-XX:+PrintGCDateStamps -XX:+PrintAdaptiveSizePolicy 
-XX:+UnlockDiagnosticVMOptions -XX:+G1SummarizeConcMark  
-Xloggc:/home/kylin/apache-kylin-5.0.0-GA-bin/logs/kylin.gc.%p  
-XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M 
-XX:-OmitStackTraceInFastThrow -Dlog4j2. [...]
-KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging
-KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify 
your own native path
-SPARK_HDP_VERSION is set to 'hadoop'
-Export SPARK_HOME to /home/kylin/apache-kylin-5.0.0-GA-bin/spark
-LD_PRELOAD= is:/home/kylin/apache-kylin-5.0.0-GA-bin/server/libch.so
-Checking Zookeeper role...
-Checking Spark directory...
-Kylin is starting. It may take a while. For status, please visit 
http://localhost:7070/kylin/index.html.
-You may also check status via: PID:4746, or Log: 
/home/kylin/apache-kylin-5.0.0-GA-bin/logs/kylin.log.
-[Start Kylin Instance] succeed.
-Checking Check Env Script's status...
-/home/kylin/apache-kylin-5.0.0-GA-bin/bin/check-env-bypass
-+
-Check Check Env Script succeed.
-Checking Kylin Instance's status...
-...
-Check Kylin Instance succeed.
-Kylin service is already available for you to preview.
-```
-
-Finally, the following message indicates that the Kylin is ready :
+The following message indicates that the Kylin is ready :
 
 ```
 Kylin service is already available for you to preview.
diff --git a/dev-support/release-manager/standalone-docker/all-in-one/build.sh 
b/dev-support/release-manager/standalone-docker/all-in-one/build.sh
index ca8638ab16..3129a2f039 100755
--- a/dev-support/release-manager/standalone-docker/all-in-one/build.sh
+++ b/dev-support/release-manager/standalone-docker/all-in-one/build.sh
@@ -18,7 +18,7 @@
 #
 
 
-TAG=5.0.0-GA
+TAG=5.0.2-GA
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 cd ${DIR} || exit
@@ -34,12 +34,12 @@ fi
 # 2. Execute sbin/download-spark-user.sh and should have a new spark folder at 
the root of kylin dir
 # 3. Re-compress kylin folder and put it to the package dir for Dockerfile use
 #
-# wget 
https://archive.apache.org/dist/kylin/apache-kylin-5.0.0-GA/apache-kylin-5.0.0-GA-bin.tar.gz
 -P ${DIR}/package/
-# tar zxf apache-kylin-5.0.0-GA-bin.tar.gz
-# cd apache-kylin-5.0.0-GA-bin
+# wget 
https://github.com/apache/kylin/releases/download/kylin-5.0.2/apache-kylin-5.0.2-bin.tar.gz
 -P ${DIR}/package/
+# tar zxf apache-kylin-5.0.0-bin.tar.gz
+# cd apache-kylin-5.0.2-bin
 # bash sbin/download-spark-user.sh
-# tar -czf apache-kylin-5.0.0-GA-bin.tar.gz apache-kylin-5.0.0-GA-bin
-# Notice - For mac tar command use: tar czf apache-kylin-5.0.0-GA-bin.tar.gz 
--no-mac-metadata apache-kylin-5.0.0-GA-bin
+# tar -czf apache-kylin-5.0.2-bin.tar.gz apache-kylin-5.0.2-bin
+# Notice - For mac tar command use: tar czf apache-kylin-5.0.2-bin.tar.gz 
--no-mac-metadata apache-kylin-5.0.2-bin
 # to avoid AppleDouble format hidden files inside the compressed file
 
 echo "start to build kylin standalone docker image"
diff --git a/dev-support/release-manager/standalone-docker/all-in-one/run.sh 
b/dev-support/release-manager/standalone-docker/all-in-one/run.sh
index 8b890ffedc..2ca410b825 100755
--- a/dev-support/release-manager/standalone-docker/all-in-one/run.sh
+++ b/dev-support/release-manager/standalone-docker/all-in-one/run.sh
@@ -17,7 +17,7 @@
 # limitations under the License.
 #
 
-TAG=5.0.0-GA
+TAG=5.0.2-GA
 
 docker run -d \
     --name Kylin5-Machine \
diff --git a/kystudio/src/components/monitor/batchJobs/locales.js 
b/kystudio/src/components/monitor/batchJobs/locales.js
index 5e24c81e91..ca79aa96ac 100644
--- a/kystudio/src/components/monitor/batchJobs/locales.js
+++ b/kystudio/src/components/monitor/batchJobs/locales.js
@@ -19,6 +19,7 @@ export default {
     tip_jobResume: 'Resume the Job',
     tip_jobPause: 'Pause the Job',
     tip_jobDiscard: 'Discard the Job',
+    cubeName: 'Cube Name',
     NEW: 'NEW',
     PENDING: 'PENDING',
     RUNNING: 'RUNNING',
@@ -46,8 +47,9 @@ export default {
     discardJob: 'Are you sure you want to discard the following job(s)? Please 
note that it couldn\'t be recovered.',
     discardJobWarning: 'Are you sure you want to discard the following job(s)? 
Discarding the highlighted job(s) might result in gaps between segments. The 
query results would be empty for those data ranges. Please note that the 
discarded jobs couldn’t be recovered.',
     discardJobTitle: 'Discard Job',
+    discardChJobTip: 'Some of the loaded data can serve the query. Please 
check the segment to complete the data, which may cause the query performance 
degradation before completion.',
     jobName: 'Job Name',
-    duration: 'Excuting',
+    duration: 'Executing',
     totalDuration: 'Total Duration',
     waiting: 'Waiting',
     noSelectJobs: 'Please select at least one job.',
@@ -76,6 +78,13 @@ export default {
     SUB_PARTITION_REFRESH: 'Refresh Sub-partitions Data',
     SUB_PARTITION_BUILD: 'Build Sub-partitions Data',
     SNAPSHOT_BUILD: 'Build Snapshot',
+    SNAPSHOT_REFRESH: 'Refresh Snapshot',
+    EXPORT_TO_SECOND_STORAGE: 'Load Data to Tiered Storage',
+    SECOND_STORAGE_MODEL_CLEAN: 'Delete Tiered Storage (Model)',
+    SECOND_STORAGE_NODE_CLEAN: 'Delete Tiered Storage (Project)',
+    SECOND_STORAGE_SEGMENT_CLEAN: 'Delete Tiered Storage (Segment)',
+    SECOND_STORAGE_INDEX_CLEAN: 'Delete Tiered Storage (Index)',
+    SECOND_STORAGE_REFRESH_SECONDARY_INDEXES: 'Refresh skipping index',
     INTERNAL_TABLE_BUILD: 'Load internal table',
     INTERNAL_TABLE_REFRESH: 'Refresh internal table',
     LAYOUT_DATA_OPTIMIZE: 'Storage Optimization',
@@ -101,18 +110,12 @@ export default {
     buildSnapshot: 'Build Snapshot',
     increamLoad: 'Incremental Load',
     filteredTotalSize: '{totalSize} result(s)',
-    secondaryStorage: 'Tiered Storage',
-    exportSecondaryStorage: 'Load Data to Tiered Storage',
-    mergeSecondaryStorage: 'Merge Data to Tiered Storage',
-    refreshSecondaryStorage: 'Refresh Tiered Storage',
-    delSecondaryStorage: 'Delete Tiered Storage',
     waitingYarnResource: 'Waiting for resources',
     buildOrRefreshSnapshot: 'Build or refresh snapshot',
     materializeFactTableView: 'Materialize fact table view',
     generateGlobalDict: 'Generate global dictionary',
     generateFlatTable: 'Generate flat table',
     saveFlatTable: 'Save flat table',
-    costBasedPlanner: 'Cost based planner',
     getFlatTableStatistics: 'Get flat table statistics',
     generateDictOfCC: 'Generate global dictionary of computed columns',
     mergeFlatTable: 'Merge flat table',
@@ -130,8 +133,10 @@ export default {
     jobParams: 'Job Parameters',
     paramKey: 'Key',
     paramValue: 'Value',
-    buildSegmentTips: 'The current step has {segments} segments in parallel, 
where {successLen} success, {pendingLen} pending, and {runningLen} executing.',
+    buildSegmentTips: 'The current step has {segments} segments in parallel, 
where {successLen} success, {warningLen} segments wasn\'t built due to data 
inconsistency, {pendingLen} pending, and {runningLen} executing.',
     viewDetails: 'View Details',
+    DATA_INCONSISTENT: 'this segment wasn\'t built due to data inconsistency',
+    dataInconsistentInfo: 'The count() number in the Hive table corresponding 
to the segment is different from the existing indexes, or the count() number of 
the existing indexes is different from each other. It results in some inedx 
unbuilt in the segments.<br/> If you still want to build this part of the 
index, please try one of the following methods:<br/>1)Refresh all indexes in 
the segment based on Hive.<br/>2)  Update the Hive table to resolve the data 
inconsistency with existing in [...]
     segmentDetail: 'Segment Details',
     errorDetail: 'Error Details',
     resolveErrorBtn: 'View Solution',
@@ -144,11 +149,13 @@ export default {
     step_discarded: 'Discarded',
     step_stopped: 'Paused',
     step_skip: 'Skipped',
+    step_warning: 'A data issue is found',
     errorStepTips: 'Job interrupted, "{name}" step error',
     noErrorMsg: 'No error message',
     chRestartTips: 'Can\'t restart the current job where some data has been 
loaded.',
     fullOptimization: 'Full Optimization',
     loadInternalTableStep: 'Load Internal Table',
-    loadGlutenCacheStep: 'Load Internal Table Cache'
+    loadGlutenCacheStep: 'Load Gluten Cache',
+    partitions: 'Paritions'
   }
 }
diff --git a/pom.xml b/pom.xml
index 9a3be5aff9..e793703bd9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -120,10 +120,6 @@
         <libthrift.version>0.14.0</libthrift.version>
         <libfb303.version>0.9.3</libfb303.version>
 
-        <!-- soft affinity -->
-        <alluxio.version>2.7.4</alluxio.version>
-        <libthrift.version>0.12.0</libthrift.version>
-
         <!-- Kafka versions -->
         <kafka.version>2.8.2</kafka.version>
 
diff --git 
a/src/core-common/src/main/java/org/apache/kylin/common/KylinVersion.java 
b/src/core-common/src/main/java/org/apache/kylin/common/KylinVersion.java
index 1e8e7ff913..c0b7f03fd2 100644
--- a/src/core-common/src/main/java/org/apache/kylin/common/KylinVersion.java
+++ b/src/core-common/src/main/java/org/apache/kylin/common/KylinVersion.java
@@ -91,25 +91,25 @@ public class KylinVersion implements Comparable {
     /**
      * Require MANUAL updating kylin version per ANY upgrading.
      */
-    private static final KylinVersion CURRENT_KYLIN_VERSION = new 
KylinVersion("5.0-SNAPSHOT");
+    private static final KylinVersion CURRENT_KYLIN_VERSION = new 
KylinVersion("5.0.2");
 
     private static final KylinVersion VERSION_200 = new KylinVersion("2.0.0");
 
     private static final Set<KylinVersion> SIGNATURE_INCOMPATIBLE_REVISIONS = 
new HashSet<KylinVersion>();
 
     /*
-     * 1.5.1 is actually compatible with 1.5.0's cube. However, 
+     * 1.5.1 is actually compatible with 1.5.0's cube. However,
      * the "calculate signature" method in 1.5.1 code base somehow
-     * gives different signature values for 1.5.0 cubes. 
+     * gives different signature values for 1.5.0 cubes.
      * To prevent from users having to take care of this mess, as people
      * usually won't expect to take lots of efforts for small upgrade (from 
1.5.0 to 1.5.1), a special list of
      * SIGNATURE_INCOMPATIBLE_REVISIONS is introduced to silently take care of 
such legacy cubes.
      *
-     * We should NEVER add new stuff to SIGNATURE_INCOMPATIBLE_REVISIONS. 
+     * We should NEVER add new stuff to SIGNATURE_INCOMPATIBLE_REVISIONS.
      * "calculate signature" should always return consistent values
-     * to compatible versions. If it's impossible to maintain consistent 
signatures between upgrade, 
+     * to compatible versions. If it's impossible to maintain consistent 
signatures between upgrade,
      * we should increase the minor version,
-     * e.g. it's better to skip 1.5.1 and use 1.6.0 as the next release 
version to 1.5.0, 
+     * e.g. it's better to skip 1.5.1 and use 1.6.0 as the next release 
version to 1.5.0,
      * or even to use 2.0.0, as people tends to accept
      * doing more (e.g. Having to use sth like a metastore upgrade tool when 
upgrading Kylin)
      */
diff --git a/src/core-common/src/main/resources/kylin-defaults0.properties 
b/src/core-common/src/main/resources/kylin-defaults0.properties
index bd4f910e79..dc1b57b9ed 100644
--- a/src/core-common/src/main/resources/kylin-defaults0.properties
+++ b/src/core-common/src/main/resources/kylin-defaults0.properties
@@ -98,6 +98,7 @@ 
kylin.engine.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/hive_1
 
 # for V3 Dictionary
 
kylin.engine.spark-conf.spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension
+kylin.engine.spark-conf.spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog
 
kylin.engine.spark-conf.spark.sql.catalog.delta=org.apache.spark.sql.delta.catalog.DeltaCatalog
 
kylin.engine.spark-conf.spark.databricks.delta.retentionDurationCheck.enabled=false
 
kylin.engine.spark-conf.spark.databricks.delta.vacuum.parallelDelete.enabled=true
@@ -176,6 +177,7 @@ 
kylin.storage.columnar.spark-conf.spark.sql.hive.metastore.version=1.2.2
 
kylin.storage.columnar.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/hive_1_2_2/*
 
 
kylin.storage.columnar.spark-conf.spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension
+kylin.storage.columnar.spark-conf.spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog
 
kylin.storage.columnar.spark-conf.spark.sql.catalog.delta=org.apache.spark.sql.delta.catalog.DeltaCatalog
 
 # to avoid cartesian partition oom, set to -1 or empty to turn off
diff --git 
a/src/core-job/src/main/java/org/apache/kylin/job/constant/ExecutableConstants.java
 
b/src/core-job/src/main/java/org/apache/kylin/job/constant/ExecutableConstants.java
index 4cdcfdb101..639dc200e0 100644
--- 
a/src/core-job/src/main/java/org/apache/kylin/job/constant/ExecutableConstants.java
+++ 
b/src/core-job/src/main/java/org/apache/kylin/job/constant/ExecutableConstants.java
@@ -83,20 +83,20 @@ public final class ExecutableConstants {
     public static final String STEP_NAME_INDEX_PLAN_OPT = "IndexPlan Optimize";
     public static final String STAGE_NAME_COST_BASED_PLANNER = "Cost based 
planner";
 
-    public static final String STAGE_NAME_WAIT_FOR_RESOURCE = "Wait for yarn 
resource";
-    public static final String STAGE_NAME_REFRESH_SNAPSHOT = "Refresh 
snapshot";
-    public static final String STAGE_NAME_MATERIALIZE_FACT_VIEW = "Materialize 
fact view";
-    public static final String STAGE_NAME_BUILD_GLOBAL_DICT = "Build global 
dictionary";
-    public static final String STAGE_NAME_MATERIALIZE_FLAT_TABLE = 
"Materialize flat table";
+    public static final String STAGE_NAME_WAIT_FOR_RESOURCE = "Waiting for 
yarn resources";
+    public static final String STAGE_NAME_REFRESH_SNAPSHOT = "Build or refresh 
snapshot";
+    public static final String STAGE_NAME_MATERIALIZED_FACT_TABLE = 
"Materialize fact table view";
+    public static final String STAGE_NAME_BUILD_GLOBAL_DICT = "Generate global 
dictionary";
+    public static final String STAGE_NAME_GENERATE_FLAT_TABLE = "Generate flat 
table";
     public static final String STAGE_NAME_OPTIMIZE_INDEX_PLAN = "Optimize 
index plan";
-    public static final String STAGE_NAME_BUILD_FLAT_TABLE_STATS = "Build flat 
table statistics";
+    public static final String STAGE_NAME_GATHER_FLAT_TABLE_STATS = "Get flat 
table statistics";
     public static final String STAGE_NAME_BUILD_LAYER = "Build indexes by 
layer";
     public static final String STAGE_NAME_REFRESH_COLUMN_BYTES = "Update flat 
table statistics";
     public static final String STAGE_NAME_MERGE_FLAT_TABLE = "Merge flat 
table";
     public static final String STAGE_NAME_MERGE_INDICES = "Merge indexes";
     public static final String STAGE_NAME_MERGE_COLUMN_BYTES = "Merge flat 
table statistics";
     public static final String STAGE_NAME_TABLE_SAMPLING = "Sample Table Data";
-    public static final String STAGE_NAME_BUILD_SNAPSHOT = "Build snapshot";
+    public static final String STAGE_NAME_BUILD_SNAPSHOT = "Build Snapshot";
     public static final String STAGE_NAME_DELETE_USELESS_LAYOUT_DATA = "delete 
useless layout data";
     public static final String STAGE_NAME_OPTIMIZE_LAYOUT_DATA_REPARTITION = 
"Optimize layout data by repartition";
     public static final String STAGE_NAME_OPTIMIZE_LAYOUT_DATA_ZORDER = 
"Optimize layout data by zorder";
diff --git 
a/src/spark-project/engine-spark/src/main/java/org/apache/kylin/engine/spark/job/StageEnum.java
 
b/src/spark-project/engine-spark/src/main/java/org/apache/kylin/engine/spark/job/StageEnum.java
index 4855e2476b..dfbd676243 100644
--- 
a/src/spark-project/engine-spark/src/main/java/org/apache/kylin/engine/spark/job/StageEnum.java
+++ 
b/src/spark-project/engine-spark/src/main/java/org/apache/kylin/engine/spark/job/StageEnum.java
@@ -94,7 +94,7 @@ public enum StageEnum {
 
         @Override
         public StageExecutable createExecutable() {
-            return new 
StageExecutable(ExecutableConstants.STAGE_NAME_MATERIALIZE_FACT_VIEW);
+            return new 
StageExecutable(ExecutableConstants.STAGE_NAME_MATERIALIZED_FACT_TABLE);
         }
     },
 
@@ -125,7 +125,7 @@ public enum StageEnum {
 
         @Override
         public StageExecutable createExecutable() {
-            return new 
StageExecutable(ExecutableConstants.STAGE_NAME_MATERIALIZE_FLAT_TABLE);
+            return new 
StageExecutable(ExecutableConstants.STAGE_NAME_GENERATE_FLAT_TABLE);
         }
     },
 
@@ -156,7 +156,7 @@ public enum StageEnum {
 
         @Override
         public StageExecutable createExecutable() {
-            return new 
StageExecutable(ExecutableConstants.STAGE_NAME_BUILD_FLAT_TABLE_STATS);
+            return new 
StageExecutable(ExecutableConstants.STAGE_NAME_GATHER_FLAT_TABLE_STATS);
         }
     },
 
diff --git 
a/src/spark-project/sparder/src/main/scala/org/apache/spark/sql/KylinSession.scala
 
b/src/spark-project/sparder/src/main/scala/org/apache/spark/sql/KylinSession.scala
index 48d41b403c..e9c86b5394 100644
--- 
a/src/spark-project/sparder/src/main/scala/org/apache/spark/sql/KylinSession.scala
+++ 
b/src/spark-project/sparder/src/main/scala/org/apache/spark/sql/KylinSession.scala
@@ -318,25 +318,6 @@ object KylinSession extends Logging {
           sparkConf.set("spark.driver.host", 
AddressUtil.getLocalHostExactAddress)
         }
 
-        var extraJars = 
Paths.get(KylinConfig.getInstanceFromEnv.getKylinJobJarPath).getFileName.toString
-        if (KylinConfig.getInstanceFromEnv.queryUseGlutenEnabled) {
-          if (sparkConf.get(SPARK_MASTER).startsWith("yarn")) {
-            val distFiles = sparkConf.get(SPARK_YARN_DIST_FILE)
-            if (distFiles.isEmpty) {
-              sparkConf.set(SPARK_YARN_DIST_FILE,
-                sparkConf.get(SPARK_EXECUTOR_JAR_PATH))
-            } else {
-              sparkConf.set(SPARK_YARN_DIST_FILE,
-                sparkConf.get(SPARK_EXECUTOR_JAR_PATH) + "," + distFiles)
-            }
-            extraJars = "gluten.jar" + File.pathSeparator + extraJars
-          } else {
-            extraJars = sparkConf.get(SPARK_EXECUTOR_JAR_PATH) +
-              File.pathSeparator + extraJars
-          }
-        }
-        sparkConf.set("spark.executor.extraClassPath", extraJars)
-
         val krb5conf = " 
-Djava.security.krb5.conf=./__spark_conf__/__hadoop_conf__/krb5.conf"
         val executorExtraJavaOptions =
           sparkConf.get("spark.executor.extraJavaOptions", "")


Reply via email to