This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch branch-3.4
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-3.4 by this push:
new c674ac3c8909 [SPARK-45963][SQL][DOCS][3.4] Restore documentation for
DSv2 API
c674ac3c8909 is described below
commit c674ac3c890918e550e09863a89429840e4811da
Author: Hyukjin Kwon <[email protected]>
AuthorDate: Fri Nov 17 21:34:33 2023 -0800
[SPARK-45963][SQL][DOCS][3.4] Restore documentation for DSv2 API
This PR cherry-picks https://github.com/apache/spark/pull/43855 to
branch-3.4
---
### What changes were proposed in this pull request?
This PR restores the DSv2 documentation.
https://github.com/apache/spark/pull/38392 mistakenly added
`org/apache/spark/sql/connect` as a private that includes
`org/apache/spark/sql/connector`.
### Why are the changes needed?
For end users to read DSv2 documentation.
### Does this PR introduce _any_ user-facing change?
Yes, it restores the DSv2 API documentation that used to be there
https://spark.apache.org/docs/3.3.0/api/scala/org/apache/spark/sql/connector/catalog/index.html
### How was this patch tested?
Manually tested via:
```
SKIP_PYTHONDOC=1 SKIP_RDOC=1 SKIP_SQLDOC=1 bundle exec jekyll build
```
### Was this patch authored or co-authored using generative AI tooling?
No.
Closes #43866 from HyukjinKwon/SPARK-45963-3.4.
Lead-authored-by: Hyukjin Kwon <[email protected]>
Co-authored-by: Hyukjin Kwon <[email protected]>
Signed-off-by: Dongjoon Hyun <[email protected]>
---
project/SparkBuild.scala | 2 +-
.../org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index c71df993ae2a..91b31856a28a 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -1388,7 +1388,7 @@ object Unidoc {
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/io")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/kvstore")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalyst")))
-
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/connect")))
+
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/connect/")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/execution")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/internal")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive")))
diff --git
a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala
b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala
index 236fb7a6dbca..a6cf1fb93de5 100644
---
a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala
+++
b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryBaseTable.scala
@@ -573,9 +573,9 @@ class BufferedRows(val key: Seq[Any] = Seq.empty) extends
WriterCommitMessage
}
/**
- * Theoretically, [[InternalRow]] returned by
[[HasPartitionKey#partitionKey()]]
+ * Theoretically, `InternalRow` returned by `HasPartitionKey#partitionKey()`
* does not need to implement equal and hashcode methods.
- * But [[GenericInternalRow]] implements equals and hashcode methods already.
Here we override it
+ * But `GenericInternalRow` implements equals and hashcode methods already.
Here we override it
* to simulate that it has not been implemented to verify codes correctness.
*/
case class PartitionInternalRow(keys: Array[Any])
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]