This is an automated email from the ASF dual-hosted git repository. chengpan pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/zeppelin.git
The following commit(s) were added to refs/heads/master by this push: new ad79848a90 [ZEPPELIN-6091] Drop support for Spark 3.2 ad79848a90 is described below commit ad79848a9035d1e5ea596632dac523b757aaaf8a Author: Cheng Pan <cheng...@apache.org> AuthorDate: Thu Sep 19 14:49:13 2024 +0800 [ZEPPELIN-6091] Drop support for Spark 3.2 ### What is this PR for? Follow the discussion in the mailing list, to drop support for Spark 3.2 https://www.mail-archive.com/devzeppelin.apache.org/msg37311.html ### What type of PR is it? Breaking change. ### What is the Jira issue? ZEPPELIN-6091 ### How should this be tested? Pass CI ### Screenshots (if appropriate) ### Questions: * Does the license files need to update? No. * Is there breaking changes for older versions? Yes. * Does this needs documentation? Yes, updated. Closes #4834 from pan3793/ZEPPELIN-6091. Signed-off-by: Cheng Pan <cheng...@apache.org> --- .github/workflows/core.yml | 8 --- Dockerfile | 4 +- conf/zeppelin-env.cmd.template | 2 +- conf/zeppelin-env.sh.template | 2 +- docs/interpreter/spark.md | 2 +- docs/setup/basics/how_to_build.md | 3 +- docs/setup/deployment/flink_and_spark_cluster.md | 63 +++++++++------------- spark/interpreter/pom.xml | 35 ------------ .../src/main/resources/interpreter-setting.json | 2 +- .../org/apache/zeppelin/spark/SparkShimsTest.java | 2 +- .../zeppelin/spark/SparkSqlInterpreterTest.java | 9 +--- .../apache/zeppelin/spark/SparkVersionTest.java | 10 ++-- spark/pom.xml | 5 -- .../org/apache/zeppelin/spark/SparkVersion.java | 4 +- .../integration/SparkIntegrationTest32.java | 36 ------------- .../integration/ZeppelinSparkClusterTest32.java | 35 ------------ 16 files changed, 40 insertions(+), 182 deletions(-) diff --git a/.github/workflows/core.yml b/.github/workflows/core.yml index e4a4bd1264..b7004b15da 100644 --- a/.github/workflows/core.yml +++ b/.github/workflows/core.yml @@ -397,14 +397,6 @@ jobs: - name: Make IRkernel available to Jupyter run: | R -e "IRkernel::installspec()" - - name: run spark-3.2 tests with scala-2.12 and python-${{ matrix.python }} - run: | - rm -rf spark/interpreter/metastore_db - ./mvnw verify -pl spark-submit,spark/interpreter -am -Dtest=org/apache/zeppelin/spark/* -Pspark-3.2 -Pspark-scala-2.12 -Phadoop3 -Pintegration -DfailIfNoTests=false ${MAVEN_ARGS} - - name: run spark-3.2 tests with scala-2.13 and python-${{ matrix.python }} - run: | - rm -rf spark/interpreter/metastore_db - ./mvnw verify -pl spark-submit,spark/interpreter -am -Dtest=org/apache/zeppelin/spark/* -Pspark-3.2 -Pspark-scala-2.13 -Phadoop3 -Pintegration -DfailIfNoTests=false ${MAVEN_ARGS} - name: run spark-3.3 tests with scala-2.12 and python-${{ matrix.python }} run: | rm -rf spark/interpreter/metastore_db diff --git a/Dockerfile b/Dockerfile index 4f4fef399b..6f1777e086 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,9 +21,9 @@ ENV MAVEN_OPTS="-Xms1024M -Xmx2048M -XX:MaxMetaspaceSize=1024m -XX:-UseGCOverhea # Allow npm and bower to run with root privileges RUN echo "unsafe-perm=true" > ~/.npmrc && \ echo '{ "allow_root": true }' > ~/.bowerrc && \ - ./mvnw -B package -DskipTests -Pbuild-distr -Pspark-3.3 -Pinclude-hadoop -Phadoop3 -Pspark-scala-2.12 -Pweb-classic -Pweb-dist && \ + ./mvnw -B package -DskipTests -Pbuild-distr -Pspark-3.4 -Pinclude-hadoop -Phadoop3 -Pspark-scala-2.12 -Pweb-classic -Pweb-dist && \ # Example with doesn't compile all interpreters - # ./mvnw -B package -DskipTests -Pbuild-distr -Pspark-3.2 -Pinclude-hadoop -Phadoop3 -Pspark-scala-2.12 -Pweb-classic -Pweb-dist -pl '!groovy,!livy,!hbase,!file,!flink' && \ + # ./mvnw -B package -DskipTests -Pbuild-distr -Pspark-3.4 -Pinclude-hadoop -Phadoop3 -Pspark-scala-2.12 -Pweb-classic -Pweb-dist -pl '!groovy,!livy,!hbase,!file,!flink' && \ mv /workspace/zeppelin/zeppelin-distribution/target/zeppelin-*-bin/zeppelin-*-bin /opt/zeppelin/ && \ # Removing stuff saves time, because docker creates a temporary layer rm -rf ~/.m2 && \ diff --git a/conf/zeppelin-env.cmd.template b/conf/zeppelin-env.cmd.template index 2d5bb40dbb..15c88fd4ca 100644 --- a/conf/zeppelin-env.cmd.template +++ b/conf/zeppelin-env.cmd.template @@ -64,7 +64,7 @@ REM however, it is not encouraged when you can define SPARK_HOME REM REM Options read in YARN client mode REM set HADOOP_CONF_DIR REM yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR. -REM Pyspark (supported with Spark 1.2.1 and above) +REM Pyspark (supported with Spark 3.3 and above) REM To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI REM set PYSPARK_PYTHON REM path to the python command. must be the same path on the driver(Zeppelin) and all workers. REM set PYTHONPATH diff --git a/conf/zeppelin-env.sh.template b/conf/zeppelin-env.sh.template index 9c228cbadb..e27a688bec 100644 --- a/conf/zeppelin-env.sh.template +++ b/conf/zeppelin-env.sh.template @@ -87,7 +87,7 @@ ## # Options read in YARN client mode # export HADOOP_CONF_DIR # yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR. -# Pyspark (supported with Spark 1.2.1 and above) +# Pyspark (supported with Spark 3.3 and above) # To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI # export PYSPARK_PYTHON # path to the python command. must be the same path on the driver(Zeppelin) and all workers. # export PYTHONPATH diff --git a/docs/interpreter/spark.md b/docs/interpreter/spark.md index 1fa02b5b2f..680ca054b3 100644 --- a/docs/interpreter/spark.md +++ b/docs/interpreter/spark.md @@ -385,7 +385,7 @@ You can also choose `scoped` mode. For `scoped` per note mode, Zeppelin creates ## SparkContext, SQLContext, SparkSession, ZeppelinContext -SparkContext, SQLContext, SparkSession (for spark 2.x, 3.x) and ZeppelinContext are automatically created and exposed as variable names `sc`, `sqlContext`, `spark` and `z` respectively, in Scala, Python and R environments. +SparkContext, SparkSession and ZeppelinContext are automatically created and exposed as variable names `sc`, `spark` and `z` respectively, in Scala, Python and R environments. > Note that Scala/Python/R environment shares the same SparkContext, > SQLContext, SparkSession and ZeppelinContext instance. diff --git a/docs/setup/basics/how_to_build.md b/docs/setup/basics/how_to_build.md index 8c8cd947f8..99951a9353 100644 --- a/docs/setup/basics/how_to_build.md +++ b/docs/setup/basics/how_to_build.md @@ -83,7 +83,7 @@ You can directly start Zeppelin by running the following command after successfu To be noticed, the spark profiles here only affect the unit test (no need to specify `SPARK_HOME`) of spark interpreter. Zeppelin doesn't require you to build with different spark to make different versions of spark work in Zeppelin. -You can run different versions of Spark in Zeppelin as long as you specify `SPARK_HOME`. Actually Zeppelin supports all the versions of Spark from 3.2 to 3.5. +You can run different versions of Spark in Zeppelin as long as you specify `SPARK_HOME`. Actually Zeppelin supports all the versions of Spark from 3.3 to 3.5. To build with a specific Spark version or scala versions, define one or more of the following profiles and options: @@ -97,7 +97,6 @@ Available profiles are -Pspark-3.5 -Pspark-3.4 -Pspark-3.3 --Pspark-3.2 ``` minor version can be adjusted by `-Dspark.version=x.x.x` diff --git a/docs/setup/deployment/flink_and_spark_cluster.md b/docs/setup/deployment/flink_and_spark_cluster.md index 76f9063cf1..df5df80d9a 100644 --- a/docs/setup/deployment/flink_and_spark_cluster.md +++ b/docs/setup/deployment/flink_and_spark_cluster.md @@ -42,8 +42,8 @@ Assuming the minimal install, there are several programs that we will need to in - git - openssh-server -- OpenJDK 7 -- Maven 3.1+ +- OpenJDK 11 +- Maven For git, openssh-server, and OpenJDK 7 we will be using the apt package manager. @@ -60,17 +60,10 @@ sudo apt-get install git sudo apt-get install openssh-server ``` -##### OpenJDK 7 +##### OpenJDK 11 ```bash -sudo apt-get install openjdk-7-jdk openjdk-7-jre-lib -``` -*A note for those using Ubuntu 16.04*: To install `openjdk-7` on Ubuntu 16.04, one must add a repository. [Source](http://askubuntu.com/questions/761127/ubuntu-16-04-and-openjdk-7) - -```bash -sudo add-apt-repository ppa:openjdk-r/ppa -sudo apt-get update -sudo apt-get install openjdk-7-jdk openjdk-7-jre-lib +sudo apt-get install openjdk-11-jdk ``` ### Installing Zeppelin @@ -92,26 +85,23 @@ cd zeppelin Package Zeppelin. ```bash -./mvnw clean package -DskipTests -Pspark-3.2 -Dflink.version=1.1.3 -Pscala-2.11 +./mvnw clean package -DskipTests -Pspark-3.5 -Pflink-1.17 ``` `-DskipTests` skips build tests- you're not developing (yet), so you don't need to do tests, the clone version *should* build. -`-Pspark-3.2` tells maven to build a Zeppelin with Spark 3.2. This is important because Zeppelin has its own Spark interpreter and the versions must be the same. +`-Pspark-3.5` tells maven to build a Zeppelin with Spark 3.5. This is important because Zeppelin has its own Spark interpreter and the versions must be the same. -`-Dflink.version=1.1.3` tells maven specifically to build Zeppelin with Flink version 1.1.3. +`-Pflink-1.17` tells maven to build a Zeppelin with Flink 1.17. --`-Pscala-2.11` tells maven to build with Scala v2.11. - - -**Note:** You can build against any version of Spark that has a Zeppelin build profile available. The key is to make sure you check out the matching version of Spark to build. At the time of this writing, Spark 3.2 was the most recent Spark version available. +**Note:** You can build against any version of Spark that has a Zeppelin build profile available. The key is to make sure you check out the matching version of Spark to build. At the time of this writing, Spark 3.5 was the most recent Spark version available. **Note:** On build failures. Having installed Zeppelin close to 30 times now, I will tell you that sometimes the build fails for seemingly no reason. As long as you didn't edit any code, it is unlikely the build is failing because of something you did. What does tend to happen, is some dependency that maven is trying to download is unreachable. If your build fails on this step here are some tips: - Don't get discouraged. - Scroll up and read through the logs. There will be clues there. -- Retry (that is, run the `./mvnw clean package -DskipTests -Pspark-3.2` again) +- Retry (that is, run the `./mvnw clean package -DskipTests -Pspark-3.5` again) - If there were clues that a dependency couldn't be downloaded wait a few hours or even days and retry again. Open source software when compiling is trying to download all of the dependencies it needs, if a server is off-line there is nothing you can do but wait for it to come back. - Make sure you followed all of the steps carefully. - Ask the community to help you. Go [here](http://zeppelin.apache.org/community.html) and join the user mailing list. People are there to help you. Make sure to copy and paste the build output (everything that happened in the console) and include that in your message. @@ -225,16 +215,16 @@ Building from source is recommended where possible, for simplicity in this tuto To download the Flink Binary use `wget` ```bash -wget "http://mirror.cogentco.com/pub/apache/flink/flink-1.16.2/flink-1.16.2-bin-scala_2.12.tgz" -tar -xzvf flink-1.16.2-bin-scala_2.12.tgz +wget "https://archive.apache.org/dist/flink/flink-1.17.1/flink-1.17.1-bin-scala_2.12.tgz" +tar -xzvf flink-1.17.1-bin-scala_2.12.tgz ``` -This will download Flink 1.16.2. +This will download Flink 1.17.1. Start the Flink Cluster. ```bash -flink-1.16.2/bin/start-cluster.sh +flink-1.17.1/bin/start-cluster.sh ``` ###### Building From source @@ -243,13 +233,13 @@ If you wish to build Flink from source, the following will be instructive. Note See the [Flink Installation guide](https://github.com/apache/flink/blob/master/README.md) for more detailed instructions. -Return to the directory where you have been downloading, this tutorial assumes that is `$HOME`. Clone Flink, check out release-1.1.3-rc2, and build. +Return to the directory where you have been downloading, this tutorial assumes that is `$HOME`. Clone Flink, check out release-1.17.1, and build. ```bash cd $HOME git clone https://github.com/apache/flink.git cd flink -git checkout release-1.1.3-rc2 +git checkout release-1.17.1 mvn clean install -DskipTests ``` @@ -271,8 +261,8 @@ If no task managers are present, restart the Flink cluster with the following co (if binaries) ```bash -flink-1.1.3/bin/stop-cluster.sh -flink-1.1.3/bin/start-cluster.sh +flink-1.17.1/bin/stop-cluster.sh +flink-1.17.1/bin/start-cluster.sh ``` @@ -284,7 +274,7 @@ build-target/bin/start-cluster.sh ``` -##### Spark 1.6 Cluster +##### Spark Cluster ###### Download Binaries @@ -295,12 +285,12 @@ Using binaries is also To download the Spark Binary use `wget` ```bash -wget "https://dlcdn.apache.org/spark/spark-3.4.1/spark-3.4.1-bin-hadoop3.tgz" -tar -xzvf spark-3.4.1-bin-hadoop3.tgz -mv spark-3.4.1-bin-hadoop3 spark +wget "https://archive.apache.org/dist/spark/spark-3.5.2/spark-3.5.2-bin-hadoop3.tgz" +tar -xzvf spark-3.5.2-bin-hadoop3.tgz +mv spark-3.5.2-bin-hadoop3 spark ``` -This will download Spark 3.4.1, compatible with Hadoop 3. You do not have to install Hadoop for this binary to work, but if you are using Hadoop, please change `3` to your appropriate version. +This will download Spark 3.5.2, compatible with Hadoop 3. You do not have to install Hadoop for this binary to work, but if you are using Hadoop, please change `3` to your appropriate version. ###### Building From source @@ -308,21 +298,18 @@ Spark is an extraordinarily large project, which takes considerable time to down See the [Spark Installation](https://github.com/apache/spark/blob/master/README.md) guide for more detailed instructions. -Return to the directory where you have been downloading, this tutorial assumes that is $HOME. Clone Spark, check out branch-1.6, and build. -**Note:** Recall, we're only checking out 1.6 because it is the most recent Spark for which a Zeppelin profile exists at - the time of writing. You are free to check out other version, just make sure you build Zeppelin against the correct version of Spark. However if you use Spark 2.0, the word count example will need to be changed as Spark 2.0 is not compatible with the following examples. - +Return to the directory where you have been downloading, this tutorial assumes that is $HOME. Clone Spark, check out branch-3.5, and build. ```bash cd $HOME ``` -Clone, check out, and build Spark version 1.6.x. +Clone, check out, and build Spark version 3.5.x. ```bash git clone https://github.com/apache/spark.git cd spark -git checkout branch-1.6 +git checkout branch-3.5 mvn clean package -DskipTests ``` diff --git a/spark/interpreter/pom.xml b/spark/interpreter/pom.xml index 3156a17053..81e79dcf37 100644 --- a/spark/interpreter/pom.xml +++ b/spark/interpreter/pom.xml @@ -40,10 +40,6 @@ <maven.aeither.provider.version>3.0.3</maven.aeither.provider.version> <wagon.version>2.7</wagon.version> - <datanucleus.rdbms.version>4.1.19</datanucleus.rdbms.version> - <datanucleus.apijdo.version>4.2.4</datanucleus.apijdo.version> - <datanucleus.core.version>4.1.17</datanucleus.core.version> - <!-- spark versions --> <spark.version>3.4.1</spark.version> <protobuf.version>3.21.12</protobuf.version> @@ -222,27 +218,6 @@ </dependency> <!--test libraries--> - <dependency> - <groupId>org.datanucleus</groupId> - <artifactId>datanucleus-core</artifactId> - <version>${datanucleus.core.version}</version> - <scope>test</scope> - </dependency> - - <dependency> - <groupId>org.datanucleus</groupId> - <artifactId>datanucleus-api-jdo</artifactId> - <version>${datanucleus.apijdo.version}</version> - <scope>test</scope> - </dependency> - - <dependency> - <groupId>org.datanucleus</groupId> - <artifactId>datanucleus-rdbms</artifactId> - <version>${datanucleus.rdbms.version}</version> - <scope>test</scope> - </dependency> - <dependency> <groupId>org.mockito</groupId> <artifactId>mockito-core</artifactId> @@ -589,16 +564,6 @@ <py4j.version>0.10.9.5</py4j.version> </properties> </profile> - - <profile> - <id>spark-3.2</id> - <properties> - <spark.version>3.2.4</spark.version> - <protobuf.version>2.5.0</protobuf.version> - <py4j.version>0.10.9.5</py4j.version> - </properties> - </profile> - </profiles> </project> diff --git a/spark/interpreter/src/main/resources/interpreter-setting.json b/spark/interpreter/src/main/resources/interpreter-setting.json index eb3a4ef65f..70c00dc977 100644 --- a/spark/interpreter/src/main/resources/interpreter-setting.json +++ b/spark/interpreter/src/main/resources/interpreter-setting.json @@ -159,7 +159,7 @@ "envName": null, "propertyName": "zeppelin.spark.deprecatedMsg.show", "defaultValue": true, - "description": "Whether show the spark deprecated message, spark 2.2 and before are deprecated. Zeppelin will display warning message by default", + "description": "Whether show the spark deprecated message, prior Spark 3.3 are deprecated. Zeppelin will display warning message by default", "type": "checkbox" } }, diff --git a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java index b8720f86e5..5bd9cbba69 100644 --- a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java +++ b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java @@ -97,7 +97,7 @@ class SparkShimsTest { when(mockContext.getIntpEventClient()).thenReturn(mockIntpEventClient); try { - sparkShims = SparkShims.getInstance(SparkVersion.SPARK_3_2_0.toString(), new Properties(), null); + sparkShims = SparkShims.getInstance(SparkVersion.SPARK_3_3_0.toString(), new Properties(), null); } catch (Throwable e1) { throw new RuntimeException("All SparkShims are tried, but no one can be created."); } diff --git a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java index 20594c4bde..05556ba4e0 100644 --- a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java +++ b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java @@ -292,14 +292,7 @@ public class SparkSqlInterpreterTest { InterpreterContext context = getInterpreterContext(); InterpreterResult ret = sqlInterpreter.interpret("create table t1(id int, name string)", context); assertEquals(InterpreterResult.Code.SUCCESS, ret.code(), context.out.toString()); - // spark 1.x will still return DataFrame with non-empty columns. - // org.apache.spark.sql.DataFrame = [result: string] - if (!sparkInterpreter.getSparkContext().version().startsWith("1.")) { - assertTrue(ret.message().isEmpty()); - } else { - assertEquals(Type.TABLE, ret.message().get(0).getType()); - assertEquals("result\n", ret.message().get(0).getData()); - } + assertTrue(ret.message().isEmpty()); // create the same table again ret = sqlInterpreter.interpret("create table t1(id int, name string)", context); diff --git a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkVersionTest.java b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkVersionTest.java index a454854a7f..06aa392e4c 100644 --- a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkVersionTest.java +++ b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkVersionTest.java @@ -48,14 +48,14 @@ class SparkVersionTest { assertEquals(SparkVersion.SPARK_3_5_0, SparkVersion.fromVersionString("3.5.0.2.5.0.0-1245")); // test newer than - assertTrue(SparkVersion.SPARK_3_5_0.newerThan(SparkVersion.SPARK_3_2_0)); + assertTrue(SparkVersion.SPARK_3_5_0.newerThan(SparkVersion.SPARK_3_3_0)); assertTrue(SparkVersion.SPARK_3_5_0.newerThanEquals(SparkVersion.SPARK_3_5_0)); - assertFalse(SparkVersion.SPARK_3_2_0.newerThan(SparkVersion.SPARK_3_5_0)); + assertFalse(SparkVersion.SPARK_3_3_0.newerThan(SparkVersion.SPARK_3_5_0)); // test older than - assertTrue(SparkVersion.SPARK_3_2_0.olderThan(SparkVersion.SPARK_3_5_0)); - assertTrue(SparkVersion.SPARK_3_2_0.olderThanEquals(SparkVersion.SPARK_3_2_0)); - assertFalse(SparkVersion.SPARK_3_5_0.olderThan(SparkVersion.SPARK_3_2_0)); + assertTrue(SparkVersion.SPARK_3_3_0.olderThan(SparkVersion.SPARK_3_5_0)); + assertTrue(SparkVersion.SPARK_3_5_0.olderThanEquals(SparkVersion.SPARK_3_5_0)); + assertFalse(SparkVersion.SPARK_3_5_0.olderThan(SparkVersion.SPARK_3_3_0)); // test newerThanEqualsPatchVersion assertTrue(SparkVersion.fromVersionString("2.3.1") diff --git a/spark/pom.xml b/spark/pom.xml index f189c6c23f..f3eb50d00b 100644 --- a/spark/pom.xml +++ b/spark/pom.xml @@ -33,11 +33,6 @@ <description>Zeppelin Spark Support</description> <properties> - <datanucleus.rdbms.version>3.2.9</datanucleus.rdbms.version> - <datanucleus.apijdo.version>3.2.6</datanucleus.apijdo.version> - <datanucleus.core.version>3.2.10</datanucleus.core.version> - - <!-- spark versions --> <spark.version>3.4.1</spark.version> <protobuf.version>2.5.0</protobuf.version> <py4j.version>0.10.9.7</py4j.version> diff --git a/spark/spark-shims/src/main/java/org/apache/zeppelin/spark/SparkVersion.java b/spark/spark-shims/src/main/java/org/apache/zeppelin/spark/SparkVersion.java index 27e0378f53..ff018c0369 100644 --- a/spark/spark-shims/src/main/java/org/apache/zeppelin/spark/SparkVersion.java +++ b/spark/spark-shims/src/main/java/org/apache/zeppelin/spark/SparkVersion.java @@ -25,15 +25,13 @@ import org.slf4j.LoggerFactory; public class SparkVersion { private static final Logger LOGGER = LoggerFactory.getLogger(SparkVersion.class); - public static final SparkVersion SPARK_3_2_0 = SparkVersion.fromVersionString("3.2.0"); - public static final SparkVersion SPARK_3_3_0 = SparkVersion.fromVersionString("3.3.0"); public static final SparkVersion SPARK_3_5_0 = SparkVersion.fromVersionString("3.5.0"); public static final SparkVersion SPARK_4_0_0 = SparkVersion.fromVersionString("4.0.0"); - public static final SparkVersion MIN_SUPPORTED_VERSION = SPARK_3_2_0; + public static final SparkVersion MIN_SUPPORTED_VERSION = SPARK_3_3_0; public static final SparkVersion UNSUPPORTED_FUTURE_VERSION = SPARK_4_0_0; private int version; diff --git a/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/SparkIntegrationTest32.java b/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/SparkIntegrationTest32.java deleted file mode 100644 index 27c511e64e..0000000000 --- a/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/SparkIntegrationTest32.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.zeppelin.integration; - -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Nested; -import java.io.IOException; - -public class SparkIntegrationTest32 { - - @Nested - @DisplayName("Hadoop3") - public class Hadoop3 extends SparkIntegrationTest { - - @BeforeEach - public void downloadSpark() throws IOException { - prepareSpark("3.2.0", "3.2"); - } - } -} diff --git a/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/ZeppelinSparkClusterTest32.java b/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/ZeppelinSparkClusterTest32.java deleted file mode 100644 index 1f1b769245..0000000000 --- a/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/ZeppelinSparkClusterTest32.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.zeppelin.integration; - -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Nested; - -public class ZeppelinSparkClusterTest32 { - - @Nested - @DisplayName("Hadoop3") - public class Hadoop3 extends ZeppelinSparkClusterTest { - - @BeforeEach - public void downloadSpark() throws Exception { - prepareSpark("3.2.0", "3.2"); - } - } -}