paulpaul1076 opened a new issue, #8721:
URL: https://github.com/apache/iceberg/issues/8721
### Apache Iceberg version
1.3.1 (latest release)
### Query engine
Spark
### Please describe the bug 🐞
Spark fails to write the dataframe with new schema after updating the schema
of a table:
```
import org.apache.iceberg.{CatalogUtil, Schema}
import org.apache.iceberg.catalog.{Catalog, TableIdentifier}
import org.apache.iceberg.types.Types
import org.apache.spark.sql.SparkSession
import java.util.Properties
//https://iceberg.apache.org/docs/latest/nessie/
object IcebergJobNessie extends App {
val spark = SparkSession.builder()
.master("local[*]")
.appName("iceberg test")
.config("spark.sql.catalog.nessie",
"org.apache.iceberg.spark.SparkCatalog")
.config("spark.sql.extensions",
"org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions,org.projectnessie.spark.extensions.NessieSparkSessionExtensions")
.config("spark.sql.catalog.nessie.catalog-impl",
"org.apache.iceberg.nessie.NessieCatalog")
.config("spark.sql.catalog.nessie.ref", "main")
.config("spark.sql.catalog.nessie.uri", "http://localhost:19120/api/v1")
.config("spark.sql.catalog.nessie.s3.endpoint", "***")
.config("spark.sql.catalog.nessie.s3.access.key", "***")
.config("spark.sql.catalog.nessie.s3.secret.key", "***")
.config("spark.sql.defaultCatalog", "nessie")
.config("spark.hadoop.fs.s3a.endpoint", "***")
.config("spark.hadoop.fs.s3a.access.key", "***")
.config("spark.hadoop.fs.s3a.secret.key", "***")
.config("spark.hadoop.fs.s3a.impl",
"org.apache.hadoop.fs.s3a.S3AFileSystem")
.config("spark.hadoop.fs.s3.impl",
"org.apache.hadoop.fs.s3a.S3AFileSystem")
.config("spark.sql.catalog.nessie.s3a.path-style-access ", "true")
.config("spark.sql.catalog.nessie",
"org.apache.iceberg.spark.SparkCatalog")
.config("spark.sql.catalog.nessie.warehouse",
"s3://hdp-temp/iceberg_catalog")
.getOrCreate()
import spark.implicits._
val options = new java.util.HashMap[String, String]()
options.put("warehouse", "s3://hdp-temp/iceberg_catalog")
options.put("ref", "main")
options.put("uri", "http://localhost:19120/api/v1")
val nessieCatalog: Catalog =
CatalogUtil.loadCatalog("org.apache.iceberg.nessie.NessieCatalog", "nessie",
options, spark.sparkContext.hadoopConfiguration)
// ---------------------PART 1---------------------------------------
val name = TableIdentifier.of("db_nessie", "schema_evolution15")
val schema = new Schema(
Types.NestedField.required(1, "age", Types.IntegerType.get()),
Types.NestedField.optional(2, "sibling_info",
Types.ListType.ofOptional(3, Types.StructType.of(
Types.NestedField.required(4, "age", Types.IntegerType.get()),
Types.NestedField.optional(5, "name", Types.StringType.get())
))
)
)
nessieCatalog.createTable(name, schema)
val df = List(
(1, List(
SiblingInfo(1, "John"),
SiblingInfo(2, "Sean"),
SiblingInfo(3, "Peter"))
),
(12, List(
SiblingInfo(13, "Ivan"),
SiblingInfo(11, "Sean")
)
)).toDF("age", "sibling_info")
df.writeTo("db_nessie.schema_evolution15").append()
spark.sql("select * from db_nessie.schema_evolution15").show(false)
val table = nessieCatalog.loadTable(name)
val newIcebergSchema = new Schema(
Types.NestedField.required(1, "age", Types.IntegerType.get()),
Types.NestedField.optional(2, "sibling_info",
Types.ListType.ofOptional(3, Types.StructType.of(
Types.NestedField.required(4, "age", Types.IntegerType.get()),
Types.NestedField.optional(5, "name", Types.StringType.get()),
Types.NestedField.optional(6, "lastName", Types.StringType.get())
))
)
)
table.updateSchema()
.unionByNameWith(newIcebergSchema)
.commit()
// ---------------------PART 2---------------------------------------
val df2 = List(
(1, List(
SiblingInfo2(1, "John", "Johnson"),
SiblingInfo2(2, "Sean", "Johnson"),
SiblingInfo2(3, "Peter", "Johnson"))
),
(12, List(
SiblingInfo2(13, "Ivan", "Johnson"),
SiblingInfo2(11, "Test", "Johnson")
)
)).toDF("age", "sibling_info")
df2.writeTo("db_nessie.schema_evolution15").append()
spark.sql("select * from db_nessie.schema_evolution15").show(false)
}
```
The exception is:
```
Exception in thread "main" org.apache.spark.sql.AnalysisException: Cannot
write incompatible data to table 'spark_catalog1.db.schema_evolution15':
- Cannot write nullable values to non-null column 'sibling_info.x.age'.
at
org.apache.spark.sql.errors.QueryCompilationErrors$.cannotWriteIncompatibleDataToTableError(QueryCompilationErrors.scala:2072)
at
org.apache.spark.sql.catalyst.analysis.TableOutputResolver$.resolveOutputColumns(TableOutputResolver.scala:64)
at
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveOutputRelation$$anonfun$apply$50.applyOrElse(Analyzer.scala:3326)
```
As you can see 1) I do an insert, then 2) update the schema by adding the
field "lastName" into the element type of the field "sibling_info", then 3) I
do another insert and it fails.
But if I execute these inserts (see PART 1 and PART 2 comments) separately
(do 2 application runs), they work fine. What is wrong here?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]