nastra commented on code in PR #9332: URL: https://github.com/apache/iceberg/pull/9332#discussion_r1431103608
########## spark/v3.5/spark-extensions/src/main/scala/org/apache/spark/sql/catalyst/parser/extensions/IcebergSparkSqlExtensionsParser.scala: ########## @@ -122,37 +147,132 @@ class IcebergSparkSqlExtensionsParser(delegate: ParserInterface) extends ParserI if (isIcebergCommand(sqlTextAfterSubstitution)) { parse(sqlTextAfterSubstitution) { parser => astBuilder.visit(parser.singleStatement()) }.asInstanceOf[LogicalPlan] } else { - delegate.parsePlan(sqlText) + ViewSubstitutionExecutor.execute(delegate.parsePlan(sqlText)) } } - object UnresolvedIcebergTable { + private object ViewSubstitutionExecutor extends RuleExecutor[LogicalPlan] { + private val fixedPoint = FixedPoint( + maxIterations, + errorOnExceed = true, + maxIterationsSetting = SQLConf.ANALYZER_MAX_ITERATIONS.key) - def unapply(plan: LogicalPlan): Option[LogicalPlan] = { - EliminateSubqueryAliases(plan) match { - case UnresolvedRelation(multipartIdentifier, _, _) if isIcebergTable(multipartIdentifier) => - Some(plan) - case _ => + override protected def batches: Seq[Batch] = Seq(Batch("pre-substitution", fixedPoint, V2ViewSubstitution)) + } + + private object V2ViewSubstitution extends Rule[LogicalPlan] { + import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ + + // the reason for handling these cases here is because ResolveSessionCatalog exits early for v2 commands + override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp { + case u@UnresolvedView(identifier, _, _, _) => + lookupTableOrView(identifier, viewOnly = true).getOrElse(u) + + case u@UnresolvedTableOrView(identifier, _, _) => + lookupTableOrView(identifier).getOrElse(u) + + case CreateView(UnresolvedIdentifier(nameParts, allowTemp), userSpecifiedColumns, + comment, properties, originalText, query, allowExisting, replace) => + CreateIcebergView(UnresolvedIdentifier(nameParts, allowTemp), userSpecifiedColumns, + comment, properties, originalText, query, allowExisting, replace) + + case ShowViews(UnresolvedNamespace(multipartIdentifier), pattern, output) => + ShowIcebergViews(UnresolvedNamespace(multipartIdentifier), pattern, output) + + case DropView(UnresolvedIdentifier(nameParts, allowTemp), ifExists) => + DropIcebergView(UnresolvedIdentifier(nameParts, allowTemp), ifExists) + } + + private def expandIdentifier(nameParts: Seq[String]): Seq[String] = { + if (!isResolvingView || isReferredTempViewName(nameParts)) return nameParts + + if (nameParts.length == 1) { + AnalysisContext.get.catalogAndNamespace :+ nameParts.head Review Comment: this code was copied 1:1 from Spark's `Analyzer` [here](https://github.com/apache/spark/blob/0c061702af42cb8b8646c3efdbe8fe58c749ff47/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala#L1198-L1217) so that we can plug-in the lookup of views. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org