This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch branch-3.5
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-3.5 by this push:
new 9e492b71c4aa [SPARK-45963][SQL][DOCS][3.5] Restore documentation for
DSv2 API
9e492b71c4aa is described below
commit 9e492b71c4aaa070bc36bfae120e1c6ca05e4a7a
Author: Hyukjin Kwon <[email protected]>
AuthorDate: Fri Nov 17 13:04:19 2023 -0800
[SPARK-45963][SQL][DOCS][3.5] Restore documentation for DSv2 API
This PR cherry-picks https://github.com/apache/spark/pull/43855 to
branch-3.5
---
### What changes were proposed in this pull request?
This PR restores the DSv2 documentation.
https://github.com/apache/spark/pull/38392 mistakenly added
`org/apache/spark/sql/connect` as a private that includes
`org/apache/spark/sql/connector`.
### Why are the changes needed?
For end users to read DSv2 documentation.
### Does this PR introduce _any_ user-facing change?
Yes, it restores the DSv2 API documentation that used to be there
https://spark.apache.org/docs/3.3.0/api/scala/org/apache/spark/sql/connector/catalog/index.html
### How was this patch tested?
Manually tested via:
```
SKIP_PYTHONDOC=1 SKIP_RDOC=1 SKIP_SQLDOC=1 bundle exec jekyll build
```
### Was this patch authored or co-authored using generative AI tooling?
No.
Closes #43865 from HyukjinKwon/SPARK-45963-3.5.
Authored-by: Hyukjin Kwon <[email protected]>
Signed-off-by: Dongjoon Hyun <[email protected]>
---
project/SparkBuild.scala | 2 +-
.../apache/spark/sql/connector/catalog/SupportsMetadataColumns.java | 4 ++--
.../org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala | 4 ++--
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 718f2bb28cec..60d52368de45 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -1401,7 +1401,7 @@ object Unidoc {
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/io")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/kvstore")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalyst")))
-
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/connect")))
+
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/connect/")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/execution")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/internal")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive")))
diff --git
a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsMetadataColumns.java
b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsMetadataColumns.java
index 894184dbcc82..e42424268b44 100644
---
a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsMetadataColumns.java
+++
b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsMetadataColumns.java
@@ -58,8 +58,8 @@ public interface SupportsMetadataColumns extends Table {
* Determines how this data source handles name conflicts between metadata
and data columns.
* <p>
* If true, spark will automatically rename the metadata column to resolve
the conflict. End users
- * can reliably select metadata columns (renamed or not) with {@link
Dataset.metadataColumn}, and
- * internal code can use {@link MetadataAttributeWithLogicalName} to extract
the logical name from
+ * can reliably select metadata columns (renamed or not) with {@code
Dataset.metadataColumn}, and
+ * internal code can use {@code MetadataAttributeWithLogicalName} to extract
the logical name from
* a metadata attribute.
* <p>
* If false, the data column will hide the metadata column. It is
recommended that Table
diff --git
a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala
b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala
index a0a4d8bdee9f..a309db341d8e 100644
---
a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala
+++
b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala
@@ -619,9 +619,9 @@ class BufferedRows(val key: Seq[Any] = Seq.empty) extends
WriterCommitMessage
}
/**
- * Theoretically, [[InternalRow]] returned by
[[HasPartitionKey#partitionKey()]]
+ * Theoretically, `InternalRow` returned by `HasPartitionKey#partitionKey()`
* does not need to implement equal and hashcode methods.
- * But [[GenericInternalRow]] implements equals and hashcode methods already.
Here we override it
+ * But `GenericInternalRow` implements equals and hashcode methods already.
Here we override it
* to simulate that it has not been implemented to verify codes correctness.
*/
case class PartitionInternalRow(keys: Array[Any])
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]