This is an automated email from the ASF dual-hosted git repository.

weichenxu123 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 7019d5e63b72 [SPARK-51867][ML][FOLLOW-UP][ML] Use private to avoid 
exposing Data class
7019d5e63b72 is described below

commit 7019d5e63b7218049bacf3392716bf6faf8f82a1
Author: Weichen Xu <weichen...@databricks.com>
AuthorDate: Thu May 1 12:58:19 2025 +0800

    [SPARK-51867][ML][FOLLOW-UP][ML] Use private to avoid exposing Data class
    
    ### What changes were proposed in this pull request?
    
    Use private[ml] to avoid exposing Data class
    
    ### Why are the changes needed?
    
    to avoid exposing Data class
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    UT.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #50763 from WeichenXu123/save-to-local-follow-up.
    
    Authored-by: Weichen Xu <weichen...@databricks.com>
    Signed-off-by: Weichen Xu <weichen...@databricks.com>
---
 .../scala/org/apache/spark/ml/classification/FMClassifier.scala     | 2 +-
 .../main/scala/org/apache/spark/ml/classification/LinearSVC.scala   | 2 +-
 .../org/apache/spark/ml/classification/LogisticRegression.scala     | 2 +-
 .../spark/ml/classification/MultilayerPerceptronClassifier.scala    | 2 +-
 .../main/scala/org/apache/spark/ml/classification/NaiveBayes.scala  | 2 +-
 .../main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala | 6 +++++-
 mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala    | 4 ++--
 mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala       | 2 +-
 .../org/apache/spark/ml/feature/BucketedRandomProjectionLSH.scala   | 2 +-
 .../src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala  | 2 +-
 .../main/scala/org/apache/spark/ml/feature/CountVectorizer.scala    | 2 +-
 mllib/src/main/scala/org/apache/spark/ml/feature/IDF.scala          | 2 +-
 mllib/src/main/scala/org/apache/spark/ml/feature/MaxAbsScaler.scala | 2 +-
 mllib/src/main/scala/org/apache/spark/ml/feature/MinHashLSH.scala   | 2 +-
 mllib/src/main/scala/org/apache/spark/ml/feature/MinMaxScaler.scala | 2 +-
 .../src/main/scala/org/apache/spark/ml/feature/OneHotEncoder.scala  | 2 +-
 mllib/src/main/scala/org/apache/spark/ml/feature/PCA.scala          | 2 +-
 mllib/src/main/scala/org/apache/spark/ml/feature/RFormula.scala     | 4 ++--
 mllib/src/main/scala/org/apache/spark/ml/feature/RobustScaler.scala | 2 +-
 .../src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala | 2 +-
 .../src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala  | 2 +-
 .../src/main/scala/org/apache/spark/ml/feature/TargetEncoder.scala  | 2 +-
 .../org/apache/spark/ml/feature/UnivariateFeatureSelector.scala     | 2 +-
 .../org/apache/spark/ml/feature/VarianceThresholdSelector.scala     | 2 +-
 .../src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala  | 2 +-
 mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala   | 2 +-
 .../org/apache/spark/ml/regression/AFTSurvivalRegression.scala      | 2 +-
 .../src/main/scala/org/apache/spark/ml/regression/FMRegressor.scala | 2 +-
 .../apache/spark/ml/regression/GeneralizedLinearRegression.scala    | 2 +-
 .../scala/org/apache/spark/ml/regression/IsotonicRegression.scala   | 2 +-
 .../scala/org/apache/spark/ml/regression/LinearRegression.scala     | 4 ++--
 31 files changed, 38 insertions(+), 34 deletions(-)

diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/FMClassifier.scala 
b/mllib/src/main/scala/org/apache/spark/ml/classification/FMClassifier.scala
index b0dba4e3cf9d..222cfbb80c3d 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/FMClassifier.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/FMClassifier.scala
@@ -345,7 +345,7 @@ class FMClassificationModel private[classification] (
 
 @Since("3.0.0")
 object FMClassificationModel extends MLReadable[FMClassificationModel] {
-  private case class Data(
+  private[ml] case class Data(
     intercept: Double,
     linear: Vector,
     factors: Matrix
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/LinearSVC.scala 
b/mllib/src/main/scala/org/apache/spark/ml/classification/LinearSVC.scala
index e67e7b0daed1..c5d1170318f7 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/LinearSVC.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/LinearSVC.scala
@@ -447,7 +447,7 @@ class LinearSVCModel private[classification] (
 
 @Since("2.2.0")
 object LinearSVCModel extends MLReadable[LinearSVCModel] {
-  private case class Data(coefficients: Vector, intercept: Double)
+  private[ml] case class Data(coefficients: Vector, intercept: Double)
 
   @Since("2.2.0")
   override def read: MLReader[LinearSVCModel] = new LinearSVCReader
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
index 093f3efba2dd..d09cacf3fb5b 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
@@ -1318,7 +1318,7 @@ class LogisticRegressionModel private[spark] (
 
 @Since("1.6.0")
 object LogisticRegressionModel extends MLReadable[LogisticRegressionModel] {
-  case class Data(
+  private[ml] case class Data(
     numClasses: Int,
     numFeatures: Int,
     interceptVector: Vector,
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala
index f8f41a6a6bec..2359749f8b48 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala
@@ -368,7 +368,7 @@ class MultilayerPerceptronClassificationModel private[ml] (
 @Since("2.0.0")
 object MultilayerPerceptronClassificationModel
   extends MLReadable[MultilayerPerceptronClassificationModel] {
-  private case class Data(weights: Vector)
+  private[ml] case class Data(weights: Vector)
 
   @Since("2.0.0")
   override def read: MLReader[MultilayerPerceptronClassificationModel] =
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala 
b/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
index c07e3289f653..ce26478c625c 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
@@ -598,7 +598,7 @@ class NaiveBayesModel private[ml] (
 
 @Since("1.6.0")
 object NaiveBayesModel extends MLReadable[NaiveBayesModel] {
-  private case class Data(pi: Vector, theta: Matrix, sigma: Matrix)
+  private[ml] case class Data(pi: Vector, theta: Matrix, sigma: Matrix)
 
   @Since("1.6.0")
   override def read: MLReader[NaiveBayesModel] = new NaiveBayesModelReader
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala 
b/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
index 5924a9976c9b..ee0b19f8129d 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
@@ -223,7 +223,11 @@ class GaussianMixtureModel private[ml] (
 
 @Since("2.0.0")
 object GaussianMixtureModel extends MLReadable[GaussianMixtureModel] {
-  private case class Data(weights: Array[Double], mus: Array[OldVector], 
sigmas: Array[OldMatrix])
+  private[ml] case class Data(
+      weights: Array[Double],
+      mus: Array[OldVector],
+      sigmas: Array[OldMatrix]
+  )
 
   @Since("2.0.0")
   override def read: MLReader[GaussianMixtureModel] = new 
GaussianMixtureModelReader
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala 
b/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala
index ca90097eb01d..e87dc9eb040b 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala
@@ -213,7 +213,7 @@ class KMeansModel private[ml] (
 }
 
 /** Helper class for storing model data */
-private case class ClusterData(clusterIdx: Int, clusterCenter: Vector)
+private[ml] case class ClusterData(clusterIdx: Int, clusterCenter: Vector)
 
 
 /** A writer for KMeans that handles the "internal" (or default) format */
@@ -265,7 +265,7 @@ object KMeansModel extends MLReadable[KMeansModel] {
    * We store all cluster centers in a single row and use this class to store 
model data by
    * Spark 1.6 and earlier. A model can be loaded from such older data for 
backward compatibility.
    */
-  private case class OldData(clusterCenters: Array[OldVector])
+  private[ml] case class OldData(clusterCenters: Array[OldVector])
 
   private class KMeansModelReader extends MLReader[KMeansModel] {
 
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala 
b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
index 9fde28502973..4db66ca9325c 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
@@ -643,7 +643,7 @@ class LocalLDAModel private[ml] (
 
 @Since("1.6.0")
 object LocalLDAModel extends MLReadable[LocalLDAModel] {
-  private case class LocalModelData(
+  private[ml] case class LocalModelData(
     vocabSize: Int,
     topicsMatrix: Matrix,
     docConcentration: Vector,
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/BucketedRandomProjectionLSH.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/feature/BucketedRandomProjectionLSH.scala
index aee51e4be519..ef7ff1be69a6 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/feature/BucketedRandomProjectionLSH.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/feature/BucketedRandomProjectionLSH.scala
@@ -213,7 +213,7 @@ object BucketedRandomProjectionLSH extends 
DefaultParamsReadable[BucketedRandomP
 @Since("2.1.0")
 object BucketedRandomProjectionLSHModel extends 
MLReadable[BucketedRandomProjectionLSHModel] {
   // TODO: Save using the existing format of Array[Vector] once SPARK-12878 is 
resolved.
-  private case class Data(randUnitVectors: Matrix)
+  private[ml] case class Data(randUnitVectors: Matrix)
 
   @Since("2.1.0")
   override def read: MLReader[BucketedRandomProjectionLSHModel] = {
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala
index 5205e3965bbc..545bac693a93 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala
@@ -170,7 +170,7 @@ final class ChiSqSelectorModel private[ml] (
 
 @Since("1.6.0")
 object ChiSqSelectorModel extends MLReadable[ChiSqSelectorModel] {
-  private case class Data(selectedFeatures: Seq[Int])
+  private[ml] case class Data(selectedFeatures: Seq[Int])
 
   class ChiSqSelectorModelWriter(instance: ChiSqSelectorModel) extends 
MLWriter {
 
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/CountVectorizer.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/CountVectorizer.scala
index 55e03781ad27..92b2a09f85b5 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/CountVectorizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/CountVectorizer.scala
@@ -368,7 +368,7 @@ class CountVectorizerModel(
 
 @Since("1.6.0")
 object CountVectorizerModel extends MLReadable[CountVectorizerModel] {
-  private case class Data(vocabulary: Seq[String])
+  private[ml] case class Data(vocabulary: Seq[String])
 
   private[CountVectorizerModel]
   class CountVectorizerModelWriter(instance: CountVectorizerModel) extends 
MLWriter {
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/IDF.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/IDF.scala
index e4ba7a0adec2..11ef88ac1fb8 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/IDF.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/IDF.scala
@@ -195,7 +195,7 @@ class IDFModel private[ml] (
 
 @Since("1.6.0")
 object IDFModel extends MLReadable[IDFModel] {
-  private case class Data(idf: Vector, docFreq: Array[Long], numDocs: Long)
+  private[ml] case class Data(idf: Vector, docFreq: Array[Long], numDocs: Long)
 
   private[IDFModel] class IDFModelWriter(instance: IDFModel) extends MLWriter {
 
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/MaxAbsScaler.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/MaxAbsScaler.scala
index a15578ae3185..db60ee879afb 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/MaxAbsScaler.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/MaxAbsScaler.scala
@@ -158,7 +158,7 @@ class MaxAbsScalerModel private[ml] (
 
 @Since("2.0.0")
 object MaxAbsScalerModel extends MLReadable[MaxAbsScalerModel] {
-  private case class Data(maxAbs: Vector)
+  private[ml] case class Data(maxAbs: Vector)
 
   private[MaxAbsScalerModel]
   class MaxAbsScalerModelWriter(instance: MaxAbsScalerModel) extends MLWriter {
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/MinHashLSH.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/MinHashLSH.scala
index 1bddc67f8f81..8faadcc7db49 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/MinHashLSH.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/MinHashLSH.scala
@@ -210,7 +210,7 @@ object MinHashLSH extends DefaultParamsReadable[MinHashLSH] 
{
 
 @Since("2.1.0")
 object MinHashLSHModel extends MLReadable[MinHashLSHModel] {
-  private case class Data(randCoefficients: Array[Int])
+  private[ml] case class Data(randCoefficients: Array[Int])
 
   @Since("2.1.0")
   override def read: MLReader[MinHashLSHModel] = new MinHashLSHModelReader
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/MinMaxScaler.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/MinMaxScaler.scala
index e806d4a29d33..e02a25bf7b8d 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/MinMaxScaler.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/MinMaxScaler.scala
@@ -242,7 +242,7 @@ class MinMaxScalerModel private[ml] (
 
 @Since("1.6.0")
 object MinMaxScalerModel extends MLReadable[MinMaxScalerModel] {
-  private case class Data(originalMin: Vector, originalMax: Vector)
+  private[ml] case class Data(originalMin: Vector, originalMax: Vector)
 
   private[MinMaxScalerModel]
   class MinMaxScalerModelWriter(instance: MinMaxScalerModel) extends MLWriter {
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/OneHotEncoder.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/OneHotEncoder.scala
index d34ffbfc202f..0a9b6c46feae 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/OneHotEncoder.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/OneHotEncoder.scala
@@ -401,7 +401,7 @@ class OneHotEncoderModel private[ml] (
 
 @Since("3.0.0")
 object OneHotEncoderModel extends MLReadable[OneHotEncoderModel] {
-  private case class Data(categorySizes: Array[Int])
+  private[ml] case class Data(categorySizes: Array[Int])
 
   private[OneHotEncoderModel]
   class OneHotEncoderModelWriter(instance: OneHotEncoderModel) extends 
MLWriter {
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/PCA.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/PCA.scala
index 0c80d442114c..e5fd96671b20 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/PCA.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/PCA.scala
@@ -181,7 +181,7 @@ class PCAModel private[ml] (
 
 @Since("1.6.0")
 object PCAModel extends MLReadable[PCAModel] {
-  private case class Data(pc: Matrix, explainedVariance: Vector)
+  private[ml] case class Data(pc: Matrix, explainedVariance: Vector)
 
   private[PCAModel] class PCAModelWriter(instance: PCAModel) extends MLWriter {
 
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/RFormula.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/RFormula.scala
index abb69d7e873d..b482d08b2fac 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/RFormula.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/RFormula.scala
@@ -496,7 +496,7 @@ private class ColumnPruner(override val uid: String, val 
columnsToPrune: Set[Str
 }
 
 private object ColumnPruner extends MLReadable[ColumnPruner] {
-  private case class Data(columnsToPrune: Seq[String])
+  private[ml] case class Data(columnsToPrune: Seq[String])
 
   override def read: MLReader[ColumnPruner] = new ColumnPrunerReader
 
@@ -588,7 +588,7 @@ private class VectorAttributeRewriter(
 }
 
 private object VectorAttributeRewriter extends 
MLReadable[VectorAttributeRewriter] {
-  private case class Data(vectorCol: String, prefixesToRewrite: Map[String, 
String])
+  private[ml] case class Data(vectorCol: String, prefixesToRewrite: 
Map[String, String])
 
   override def read: MLReader[VectorAttributeRewriter] = new 
VectorAttributeRewriterReader
 
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/RobustScaler.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/RobustScaler.scala
index bb0179613b7b..246e553b3add 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/RobustScaler.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/RobustScaler.scala
@@ -279,7 +279,7 @@ class RobustScalerModel private[ml] (
 
 @Since("3.0.0")
 object RobustScalerModel extends MLReadable[RobustScalerModel] {
-  private case class Data(range: Vector, median: Vector)
+  private[ml] case class Data(range: Vector, median: Vector)
 
   private[RobustScalerModel]
   class RobustScalerModelWriter(instance: RobustScalerModel) extends MLWriter {
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala
index 19c3e4ca25cc..87e2557eb484 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala
@@ -200,7 +200,7 @@ class StandardScalerModel private[ml] (
 
 @Since("1.6.0")
 object StandardScalerModel extends MLReadable[StandardScalerModel] {
-  private case class Data(std: Vector, mean: Vector)
+  private[ml] case class Data(std: Vector, mean: Vector)
 
   private[StandardScalerModel]
   class StandardScalerModelWriter(instance: StandardScalerModel) extends 
MLWriter {
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
index 30b8c813188f..243333f9f0de 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
@@ -469,7 +469,7 @@ class StringIndexerModel (
 
 @Since("1.6.0")
 object StringIndexerModel extends MLReadable[StringIndexerModel] {
-  private case class Data(labelsArray: Seq[Seq[String]])
+  private[ml] case class Data(labelsArray: Seq[Seq[String]])
 
   private[StringIndexerModel]
   class StringIndexModelWriter(instance: StringIndexerModel) extends MLWriter {
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/TargetEncoder.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/TargetEncoder.scala
index 8634779b0bc9..aa11a139b022 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/TargetEncoder.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/TargetEncoder.scala
@@ -402,7 +402,7 @@ class TargetEncoderModel private[ml] (
 
 @Since("4.0.0")
 object TargetEncoderModel extends MLReadable[TargetEncoderModel] {
-  private case class Data(
+  private[ml] case class Data(
     index: Int, categories: Array[Double],
     counts: Array[Double], stats: Array[Double])
 
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/UnivariateFeatureSelector.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/feature/UnivariateFeatureSelector.scala
index 75ff263d61b3..c394f121a215 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/feature/UnivariateFeatureSelector.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/feature/UnivariateFeatureSelector.scala
@@ -338,7 +338,7 @@ class UnivariateFeatureSelectorModel private[ml](
 
 @Since("3.1.1")
 object UnivariateFeatureSelectorModel extends 
MLReadable[UnivariateFeatureSelectorModel] {
-  private case class Data(selectedFeatures: Seq[Int])
+  private[ml] case class Data(selectedFeatures: Seq[Int])
 
   @Since("3.1.1")
   override def read: MLReader[UnivariateFeatureSelectorModel] =
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/VarianceThresholdSelector.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/feature/VarianceThresholdSelector.scala
index 08ba51b413d2..0549434e2429 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/feature/VarianceThresholdSelector.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/feature/VarianceThresholdSelector.scala
@@ -176,7 +176,7 @@ class VarianceThresholdSelectorModel private[ml](
 
 @Since("3.1.0")
 object VarianceThresholdSelectorModel extends 
MLReadable[VarianceThresholdSelectorModel] {
-  private case class Data(selectedFeatures: Seq[Int])
+  private[ml] case class Data(selectedFeatures: Seq[Int])
 
   @Since("3.1.0")
   override def read: MLReader[VarianceThresholdSelectorModel] =
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala
index 48ad67af0934..8d98153a8a14 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala
@@ -528,7 +528,7 @@ class VectorIndexerModel private[ml] (
 
 @Since("1.6.0")
 object VectorIndexerModel extends MLReadable[VectorIndexerModel] {
-  private case class Data(numFeatures: Int, categoryMaps: Map[Int, Map[Double, 
Int]])
+  private[ml] case class Data(numFeatures: Int, categoryMaps: Map[Int, 
Map[Double, Int]])
 
   private[VectorIndexerModel]
   class VectorIndexerModelWriter(instance: VectorIndexerModel) extends 
MLWriter {
diff --git a/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala 
b/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
index 0dd10691c5d2..039361aea08e 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
@@ -547,7 +547,7 @@ class ALSModel private[ml] (
   }
 }
 
-private case class FeatureData(id: Int, features: Array[Float])
+private[ml] case class FeatureData(id: Int, features: Array[Float])
 
 @Since("1.6.0")
 object ALSModel extends MLReadable[ALSModel] {
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
index 1b77c1d4d51a..de9d016edea6 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
@@ -497,7 +497,7 @@ class AFTSurvivalRegressionModel private[ml] (
 
 @Since("1.6.0")
 object AFTSurvivalRegressionModel extends 
MLReadable[AFTSurvivalRegressionModel] {
-  private case class Data(coefficients: Vector, intercept: Double, scale: 
Double)
+  private[ml] case class Data(coefficients: Vector, intercept: Double, scale: 
Double)
 
   @Since("1.6.0")
   override def read: MLReader[AFTSurvivalRegressionModel] = new 
AFTSurvivalRegressionModelReader
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/regression/FMRegressor.scala 
b/mllib/src/main/scala/org/apache/spark/ml/regression/FMRegressor.scala
index 09df9295d618..0bb89354c47a 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/FMRegressor.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/FMRegressor.scala
@@ -510,7 +510,7 @@ class FMRegressionModel private[regression] (
 
 @Since("3.0.0")
 object FMRegressionModel extends MLReadable[FMRegressionModel] {
-  private case class Data(
+  private[ml] case class Data(
      intercept: Double,
      linear: Vector,
      factors: Matrix)
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
index 0584a21d25fc..777b70e7d021 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
@@ -1143,7 +1143,7 @@ class GeneralizedLinearRegressionModel private[ml] (
 
 @Since("2.0.0")
 object GeneralizedLinearRegressionModel extends 
MLReadable[GeneralizedLinearRegressionModel] {
-  private case class Data(intercept: Double, coefficients: Vector)
+  private[ml] case class Data(intercept: Double, coefficients: Vector)
 
   @Since("2.0.0")
   override def read: MLReader[GeneralizedLinearRegressionModel] =
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala 
b/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala
index 5d93541ab245..131fbcd4d167 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala
@@ -285,7 +285,7 @@ class IsotonicRegressionModel private[ml] (
 
 @Since("1.6.0")
 object IsotonicRegressionModel extends MLReadable[IsotonicRegressionModel] {
-  private case class Data(
+  private[ml] case class Data(
     boundaries: Array[Double],
     predictions: Array[Double],
     isotonic: Boolean)
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala 
b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
index ea27afa75551..919cec4e16e4 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
@@ -786,7 +786,7 @@ class LinearRegressionModel private[ml] (
   }
 }
 
-private case class LinearModelData(intercept: Double, coefficients: Vector, 
scale: Double)
+private[ml] case class LinearModelData(intercept: Double, coefficients: 
Vector, scale: Double)
 
 /** A writer for LinearRegression that handles the "internal" (or default) 
format */
 private class InternalLinearRegressionModelWriter
@@ -816,7 +816,7 @@ private class PMMLLinearRegressionModelWriter
 
   override def stageName(): String = 
"org.apache.spark.ml.regression.LinearRegressionModel"
 
-  private case class Data(intercept: Double, coefficients: Vector)
+  private[ml] case class Data(intercept: Double, coefficients: Vector)
 
   override def write(path: String, sparkSession: SparkSession,
     optionMap: mutable.Map[String, String], stage: PipelineStage): Unit = {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to