Your message dated Mon, 02 Dec 2024 16:50:44 +0000
with message-id <e1ti9de-001etc...@fasolo.debian.org>
and subject line Bug#1082291: fixed in scikit-learn 1.4.2+dfsg-7
has caused the Debian Bug report #1082291,
regarding scikit-learn: tests fail with scipy 1.14
to be marked as done.
This means that you claim that the problem has been dealt with.
If this is not the case it is now your responsibility to reopen the
Bug report if necessary, and/or fix the problem forthwith.
(NB: If you are a system administrator and have no idea what this
message is talking about, this may indicate a serious mail system
misconfiguration somewhere. Please contact ow...@bugs.debian.org
immediately.)
--
1082291: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1082291
Debian Bug Tracking System
Contact ow...@bugs.debian.org with problems
--- Begin Message ---
Source: scikit-learn
Version: 1.4.2+dfsg-6
Severity: normal
scikit-learn tests fail with scipy 1.14 from experimental
Perhaps it's been fixed in the latest upstream release, I don't know.
https://ci.debian.net/packages/s/scikit-learn/unstable/amd64/51759247/
648s _______ test_standard_scaler_partial_fit_numerical_stability[csc_array]
________
648s
648s sparse_container = <class 'scipy.sparse._csc.csc_array'>
648s
648s @pytest.mark.parametrize("sparse_container", CSC_CONTAINERS +
CSR_CONTAINERS)
648s def
test_standard_scaler_partial_fit_numerical_stability(sparse_container):
648s # Test if the incremental computation introduces significative
errors
648s # for large datasets with values of large magniture
648s rng = np.random.RandomState(0)
648s n_features = 2
648s n_samples = 100
648s offsets = rng.uniform(-1e15, 1e15, size=n_features)
648s scales = rng.uniform(1e3, 1e6, size=n_features)
648s X = rng.randn(n_samples, n_features) * scales + offsets
648s
648s scaler_batch = StandardScaler().fit(X)
648s scaler_incr = StandardScaler()
648s for chunk in X:
648s scaler_incr = scaler_incr.partial_fit(chunk.reshape(1,
n_features))
648s
648s # Regardless of abs values, they must not be more diff 6
significant digits
648s tol = 10 ** (-6)
648s assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
648s assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
648s assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
648s # NOTE Be aware that for much larger offsets std is very unstable
(last
648s # assert) while mean is OK.
648s
648s # Sparse input
648s size = (100, 3)
648s scale = 1e20
648s X = sparse_container(rng.randint(0, 2, size).astype(np.float64) *
scale)
648s
648s # with_mean=False is required with sparse input
648s scaler = StandardScaler(with_mean=False).fit(X)
648s scaler_incr = StandardScaler(with_mean=False)
648s
648s for chunk in X:
648s > scaler_incr = scaler_incr.partial_fit(chunk)
648s
648s
/usr/lib/python3/dist-packages/sklearn/preprocessing/tests/test_data.py:598:
648s _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _
648s /usr/lib/python3/dist-packages/sklearn/base.py:1474: in wrapper
648s return fit_method(estimator, *args, **kwargs)
648s _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _
648s
648s self = StandardScaler(with_mean=False)
648s X = <Compressed Sparse Row sparse array of dtype 'float64'
648s with 0 stored elements and shape (3,)>
648s y = None, sample_weight = None
648s
648s @_fit_context(prefer_skip_nested_validation=True)
648s def partial_fit(self, X, y=None, sample_weight=None):
648s """Online computation of mean and std on X for later scaling.
648s
648s All of X is processed as a single batch. This is intended for cases
648s when :meth:`fit` is not feasible due to very large number of
648s `n_samples` or because X is read from a continuous stream.
648s
648s The algorithm for incremental mean and std is given in Equation
1.5a,b
648s in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque.
"Algorithms
648s for computing the sample variance: Analysis and recommendations."
648s The American Statistician 37.3 (1983): 242-247:
648s
648s Parameters
648s ----------
648s X : {array-like, sparse matrix} of shape (n_samples, n_features)
648s The data used to compute the mean and standard deviation
648s used for later scaling along the features axis.
648s
648s y : None
648s Ignored.
648s
648s sample_weight : array-like of shape (n_samples,), default=None
648s Individual weights for each sample.
648s
648s .. versionadded:: 0.24
648s parameter *sample_weight* support to StandardScaler.
648s
648s Returns
648s -------
648s self : object
648s Fitted scaler.
648s """
648s first_call = not hasattr(self, "n_samples_seen_")
648s X = self._validate_data(
648s X,
648s accept_sparse=("csr", "csc"),
648s dtype=FLOAT_DTYPES,
648s force_all_finite="allow-nan",
648s reset=first_call,
648s )
648s > n_features = X.shape[1]
648s E IndexError: tuple index out of range
648s
648s /usr/lib/python3/dist-packages/sklearn/preprocessing/_data.py:919:
IndexError
648s _______ test_standard_scaler_partial_fit_numerical_stability[csr_array]
________
648s
648s sparse_container = <class 'scipy.sparse._csr.csr_array'>
648s
648s @pytest.mark.parametrize("sparse_container", CSC_CONTAINERS +
CSR_CONTAINERS)
648s def
test_standard_scaler_partial_fit_numerical_stability(sparse_container):
648s # Test if the incremental computation introduces significative
errors
648s # for large datasets with values of large magniture
648s rng = np.random.RandomState(0)
648s n_features = 2
648s n_samples = 100
648s offsets = rng.uniform(-1e15, 1e15, size=n_features)
648s scales = rng.uniform(1e3, 1e6, size=n_features)
648s X = rng.randn(n_samples, n_features) * scales + offsets
648s
648s scaler_batch = StandardScaler().fit(X)
648s scaler_incr = StandardScaler()
648s for chunk in X:
648s scaler_incr = scaler_incr.partial_fit(chunk.reshape(1,
n_features))
648s
648s # Regardless of abs values, they must not be more diff 6
significant digits
648s tol = 10 ** (-6)
648s assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
648s assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
648s assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
648s # NOTE Be aware that for much larger offsets std is very unstable
(last
648s # assert) while mean is OK.
648s
648s # Sparse input
648s size = (100, 3)
648s scale = 1e20
648s X = sparse_container(rng.randint(0, 2, size).astype(np.float64) *
scale)
648s
648s # with_mean=False is required with sparse input
648s scaler = StandardScaler(with_mean=False).fit(X)
648s scaler_incr = StandardScaler(with_mean=False)
648s
648s for chunk in X:
648s > scaler_incr = scaler_incr.partial_fit(chunk)
648s
648s
/usr/lib/python3/dist-packages/sklearn/preprocessing/tests/test_data.py:598:
648s _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _
648s /usr/lib/python3/dist-packages/sklearn/base.py:1474: in wrapper
648s return fit_method(estimator, *args, **kwargs)
648s _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _
648s
648s self = StandardScaler(with_mean=False)
648s X = <Compressed Sparse Row sparse array of dtype 'float64'
648s with 0 stored elements and shape (3,)>
648s y = None, sample_weight = None
648s
648s @_fit_context(prefer_skip_nested_validation=True)
648s def partial_fit(self, X, y=None, sample_weight=None):
648s """Online computation of mean and std on X for later scaling.
648s
648s All of X is processed as a single batch. This is intended for cases
648s when :meth:`fit` is not feasible due to very large number of
648s `n_samples` or because X is read from a continuous stream.
648s
648s The algorithm for incremental mean and std is given in Equation
1.5a,b
648s in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque.
"Algorithms
648s for computing the sample variance: Analysis and recommendations."
648s The American Statistician 37.3 (1983): 242-247:
648s
648s Parameters
648s ----------
648s X : {array-like, sparse matrix} of shape (n_samples, n_features)
648s The data used to compute the mean and standard deviation
648s used for later scaling along the features axis.
648s
648s y : None
648s Ignored.
648s
648s sample_weight : array-like of shape (n_samples,), default=None
648s Individual weights for each sample.
648s
648s .. versionadded:: 0.24
648s parameter *sample_weight* support to StandardScaler.
648s
648s Returns
648s -------
648s self : object
648s Fitted scaler.
648s """
648s first_call = not hasattr(self, "n_samples_seen_")
648s X = self._validate_data(
648s X,
648s accept_sparse=("csr", "csc"),
648s dtype=FLOAT_DTYPES,
648s force_all_finite="allow-nan",
648s reset=first_call,
648s )
648s > n_features = X.shape[1]
648s E IndexError: tuple index out of range
648s
648s /usr/lib/python3/dist-packages/sklearn/preprocessing/_data.py:919:
IndexError
...
650s FAILED
../../../../usr/lib/python3/dist-packages/sklearn/preprocessing/tests/test_data.py::test_standard_scaler_partial_fit_numerical_stability[csc_array]
650s FAILED
../../../../usr/lib/python3/dist-packages/sklearn/preprocessing/tests/test_data.py::test_standard_scaler_partial_fit_numerical_stability[csr_array]
650s = 2 failed, 29267 passed, 3384 skipped, 2 deselected, 88 xfailed, 45
xpassed, 3276 warnings in 600.51s (0:10:00) =
We've only recently upgrade to scipy 1.13 but we'll want to upgrade
further to scopy 1.14 before too long.
--- End Message ---
--- Begin Message ---
Source: scikit-learn
Source-Version: 1.4.2+dfsg-7
Done: Thomas Goirand <z...@debian.org>
We believe that the bug you reported is fixed in the latest version of
scikit-learn, which is due to be installed in the Debian FTP archive.
A summary of the changes between this version and the previous one is
attached.
Thank you for reporting the bug, which will now be closed. If you
have further comments please address them to 1082...@bugs.debian.org,
and the maintainer will reopen the bug report if appropriate.
Debian distribution maintenance software
pp.
Thomas Goirand <z...@debian.org> (supplier of updated scikit-learn package)
(This message was generated automatically at their request; if you
believe that there is a problem with it please contact the archive
administrators by mailing ftpmas...@ftp-master.debian.org)
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
Format: 1.8
Date: Fri, 29 Nov 2024 13:24:48 +0100
Source: scikit-learn
Architecture: source
Version: 1.4.2+dfsg-7
Distribution: unstable
Urgency: medium
Maintainer: Debian Science Maintainers
<debian-science-maintain...@lists.alioth.debian.org>
Changed-By: Thomas Goirand <z...@debian.org>
Closes: 1082291
Changes:
scikit-learn (1.4.2+dfsg-7) unstable; urgency=medium
.
* Team upload.
* Use pytest-xdist to run tests in parallel.
* Exclude 3 more tests from autopkgtest (Closes: #1082291).
Checksums-Sha1:
adea92e2961828d2cb81d9a3680648b7d6c0d7f3 3169 scikit-learn_1.4.2+dfsg-7.dsc
47d4fee6795230c173449dab220473159c54bf63 24280
scikit-learn_1.4.2+dfsg-7.debian.tar.xz
e583d5bd46d82332f630caaaa145744b50ca451a 15017
scikit-learn_1.4.2+dfsg-7_amd64.buildinfo
Checksums-Sha256:
60934671b1c5b09ddeadb7c52b873013b627204f41f17fdd2fcd2366ca5e95a7 3169
scikit-learn_1.4.2+dfsg-7.dsc
b251b15fa674c8659607663ccedd08ec7ac6c299eb55a30058432f03d8c473ac 24280
scikit-learn_1.4.2+dfsg-7.debian.tar.xz
5c5d4a1bb4e735d219f19b79fc9d2f31f5092f973d1274a4a0aeac1e96b3c2fd 15017
scikit-learn_1.4.2+dfsg-7_amd64.buildinfo
Files:
0bebb0f997b323d4e89b41ab2efe3980 3169 python optional
scikit-learn_1.4.2+dfsg-7.dsc
cb1192e9390a5f2ea6431162f3265774 24280 python optional
scikit-learn_1.4.2+dfsg-7.debian.tar.xz
ea79d815ef01f29d6d9ba1702414f3ac 15017 python optional
scikit-learn_1.4.2+dfsg-7_amd64.buildinfo
-----BEGIN PGP SIGNATURE-----
iQIzBAEBCAAdFiEEoLGp81CJVhMOekJc1BatFaxrQ/4FAmdN4LMACgkQ1BatFaxr
Q/4+rQ/9Hkg7fLvGP9sHI+zb6TtS0K99KtWlgWP1BuXmbU5YDCKV/bl5IHP0bA5U
T59CoNxz+lFtl5XNwim6l4YSm0LWUZnKUuIIcNCOopojIiUuRtNvYHn5CKW8XPmc
scxF+5vRBFCuph3xoPCcbjo/IVfQS7JV3q92Vud2F+a4LvHAZ6ilCH5RRPBJE1hi
C7Em6wJeRhEFEmyX+k0U+MAR824utdWolGk8+jgA3ZeGwJGt3VPCuFAJb0WWqdjZ
4mY8B9iG8W8htVt7BoHO7raG8KWzMHhwX0rWLxC5Tvz6NXKx/N/HwnfRnPwgJdTV
c9uudsXV1BRA9F5XAq7beQ1Htiy+G7eRqQ25xMxbnTUFxhaKwWe7VgnT5rQPRjCS
7DF/JADghDPHeuHli4nMgTuIq2wbAiH/jd3/Ia6H+n1y39W+FK0Lkf3vQ/e1mnNL
ZR17M237g/srQxQY8lxYZl2/nkmR25KaajJIpIogpuPXHfNYxmxnLAtm9qHp4Y/9
uzaO2n8Y5WRmX4LQ1c04TrbwDJlz1gl6hGpNC41zMwBZcujBtSbQipGjJJD7vEjt
bYr3hd3cagrbV7LiFWTvf2L44DJuXYlE74bFxCHaxyHVvmetdDpd4FlY5xolTHki
rB7dvt54sT/hhyPzScrI4Jkf6tlrVCxpaFeqEQHAP9zyv2NjMa8=
=mDY3
-----END PGP SIGNATURE-----
pgpTHKXMG_Bmx.pgp
Description: PGP signature
--- End Message ---