commit:     12a265407c6996b16a2f54e6ee32448fcfe74ea6
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Mon Dec 25 09:39:47 2023 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Mon Dec 25 09:40:29 2023 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=12a26540

sci-libs/datasets: 2.14.7 bump, remove old

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/datasets/Manifest                         |   5 +-
 sci-libs/datasets/datasets-2.11.0-r2.ebuild        |  52 -----
 sci-libs/datasets/datasets-2.12.0.ebuild           |  53 -----
 sci-libs/datasets/datasets-2.13.1.ebuild           |  59 -----
 ...tasets-2.14.4.ebuild => datasets-2.14.7.ebuild} |  13 +-
 .../datasets/files/datasets-2.11.0-tests.patch     | 242 ---------------------
 .../datasets/files/datasets-2.12.0-tests.patch     | 242 ---------------------
 7 files changed, 11 insertions(+), 655 deletions(-)

diff --git a/sci-libs/datasets/Manifest b/sci-libs/datasets/Manifest
index ac1dc508ab82..aedfb1428ab5 100644
--- a/sci-libs/datasets/Manifest
+++ b/sci-libs/datasets/Manifest
@@ -1,4 +1 @@
-DIST datasets-2.11.0.gh.tar.gz 2141289 BLAKE2B 
0fb471dd6ee5de3831eb6586c4a15e67381262470b72d5ab02ee87dfc7977cb4d40e04da6507049d1e47cb8948cad11988bb7627293b48231e1cd413d2cfb885
 SHA512 
9ec2274d7978e3dde1b2f8ce78dd65bdf66742bbfee7b8672af46216aeaae3ef5c4604a8a5ea0bdee808f1c362cca9a122c16d2e9a161678148e581e4cd5c863
-DIST datasets-2.12.0.gh.tar.gz 2149274 BLAKE2B 
8f188901dfe293ac2b673f37e0d135e01a8f131adf9030ef1815ce2faa7ba0b36faf64a002cae1ced2d3ed5b7f50f43ba5cda90ab9254fd5f66bbfaed6085f3f
 SHA512 
7389a1c6ee8ff4cda39a2c3f52218aa6f4b1cd6b45f48f83bfa2191359a8999d54153120d968b3cf7e5e932f88822783578e3d859dcb20f38fb0d915d88220c9
-DIST datasets-2.13.1.gh.tar.gz 2166516 BLAKE2B 
2269434b94145837e491ec6784218f6972df94a558b9067020076fb44dd937a103e3c57dd3761bb0a4cb3c3b6248299ec2a6c3f03c5bd016daaa8957591bf7b6
 SHA512 
3d2d1aad86b6a472cd6d0e6c661d4730cc0ed1a0fff55c739fc6a0ba68a8f53ae8789029553abd713d0b30648dd020f1880b2d8110c72b5c89a320c2b24f7752
-DIST datasets-2.14.4.gh.tar.gz 2142214 BLAKE2B 
d4c98a9f29ca748c3c20f32b9a89f053cf6327f56353341ba0073d3b5561ed9aea372d2fa74cadfa8b0f2ba0f6c2e9b3181cca9724719cfe3969f36bbb893f11
 SHA512 
c3a0701dd83474f4a0d839fe4ef56cfccc9f1d45b6506d44d0f9100bc9dbc90014d16c8e0090dc13f3b2d963bd96af45281bde6e3d7af230467ec7dd26204aa3
+DIST datasets-2.14.7.gh.tar.gz 2145270 BLAKE2B 
b3196f75bd52432091052e63ccfc538072b30bead213c7ddc549724c8efedacdf6bb8934574220ee62e27a48240a769ad5e79c4e39cad92538dc6947f7f9bd2b
 SHA512 
87ecaec34670af5b4879aaa85e730fc4ba376028e7ca033a556aec9ac55156f11252dd130c12dc160d5c3d5618fa8888072e46c7dcc01eed9c0e2e07657b0b74

diff --git a/sci-libs/datasets/datasets-2.11.0-r2.ebuild 
b/sci-libs/datasets/datasets-2.11.0-r2.ebuild
deleted file mode 100644
index a2f4ad26e65b..000000000000
--- a/sci-libs/datasets/datasets-2.11.0-r2.ebuild
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2023 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-DISTUTILS_USE_PEP517=setuptools
-PYTHON_COMPAT=( python3_{9..11} )
-DISTUTILS_SINGLE_IMPL=1
-inherit distutils-r1
-
-DESCRIPTION="Access and share datasets for Audio, Computer Vision, and NLP 
tasks"
-HOMEPAGE="
-       https://pypi.org/project/datasets/
-"
-SRC_URI="https://github.com/huggingface/${PN}/archive/refs/tags/${PV}.tar.gz
-       -> ${P}.gh.tar.gz"
-IUSE="test"
-
-LICENSE="Apache-2.0"
-SLOT="0"
-KEYWORDS="~amd64"
-
-RDEPEND="
-       ${PYTHON_DEPS}
-       sci-libs/pytorch[${PYTHON_SINGLE_USEDEP}]
-       $(python_gen_cond_dep '
-               dev-python/absl-py[${PYTHON_USEDEP}]
-               dev-python/aiohttp[${PYTHON_USEDEP}]
-               dev-python/fsspec[${PYTHON_USEDEP}]
-               dev-python/multiprocess[${PYTHON_USEDEP}]
-               dev-python/pandas[${PYTHON_USEDEP}]
-               dev-python/pyarrow[${PYTHON_USEDEP},parquet,snappy]
-               dev-python/tqdm[${PYTHON_USEDEP}]
-               dev-python/xxhash[${PYTHON_USEDEP}]
-               dev-python/zstandard[${PYTHON_USEDEP}]
-               sci-libs/huggingface_hub[${PYTHON_USEDEP}]
-               sci-libs/scikit-learn[${PYTHON_USEDEP}]
-       ')
-"
-DEPEND="${RDEPEND}"
-BDEPEND="test? (
-       $(python_gen_cond_dep '
-               dev-python/pytest-datadir[${PYTHON_USEDEP}]
-               dev-python/decorator[${PYTHON_USEDEP}]
-               sci-libs/jiwer[${PYTHON_USEDEP}]
-               sci-libs/seqeval[${PYTHON_USEDEP}]
-       ')
-)"
-
-PATCHES=( "${FILESDIR}"/${P}-tests.patch )
-
-distutils_enable_tests pytest

diff --git a/sci-libs/datasets/datasets-2.12.0.ebuild 
b/sci-libs/datasets/datasets-2.12.0.ebuild
deleted file mode 100644
index 66b609fd2b57..000000000000
--- a/sci-libs/datasets/datasets-2.12.0.ebuild
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2023 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-DISTUTILS_USE_PEP517=setuptools
-PYTHON_COMPAT=( python3_{9..11} )
-DISTUTILS_SINGLE_IMPL=1
-inherit distutils-r1
-
-DESCRIPTION="Access and share datasets for Audio, Computer Vision, and NLP 
tasks"
-HOMEPAGE="
-       https://pypi.org/project/datasets/
-"
-SRC_URI="https://github.com/huggingface/${PN}/archive/refs/tags/${PV}.tar.gz
-       -> ${P}.gh.tar.gz"
-IUSE="test"
-
-LICENSE="Apache-2.0"
-SLOT="0"
-KEYWORDS="~amd64"
-
-RDEPEND="
-       ${PYTHON_DEPS}
-       sci-libs/pytorch[${PYTHON_SINGLE_USEDEP}]
-       $(python_gen_cond_dep '
-               dev-python/absl-py[${PYTHON_USEDEP}]
-               dev-python/aiohttp[${PYTHON_USEDEP}]
-               dev-python/fsspec[${PYTHON_USEDEP}]
-               dev-python/multiprocess[${PYTHON_USEDEP}]
-               dev-python/pandas[${PYTHON_USEDEP}]
-               dev-python/pyarrow[${PYTHON_USEDEP},parquet,snappy]
-               dev-python/tqdm[${PYTHON_USEDEP}]
-               dev-python/xxhash[${PYTHON_USEDEP}]
-               dev-python/zstandard[${PYTHON_USEDEP}]
-               sci-libs/huggingface_hub[${PYTHON_USEDEP}]
-               sci-libs/scikit-learn[${PYTHON_USEDEP}]
-       ')
-"
-DEPEND="${RDEPEND}"
-BDEPEND="test? (
-       $(python_gen_cond_dep '
-               dev-python/pytest-datadir[${PYTHON_USEDEP}]
-               dev-python/decorator[${PYTHON_USEDEP}]
-               =dev-python/sqlalchemy-1*[${PYTHON_USEDEP}]
-               sci-libs/jiwer[${PYTHON_USEDEP}]
-               sci-libs/seqeval[${PYTHON_USEDEP}]
-       ')
-)"
-
-PATCHES=( "${FILESDIR}"/${P}-tests.patch )
-
-distutils_enable_tests pytest

diff --git a/sci-libs/datasets/datasets-2.13.1.ebuild 
b/sci-libs/datasets/datasets-2.13.1.ebuild
deleted file mode 100644
index 60a16a43e361..000000000000
--- a/sci-libs/datasets/datasets-2.13.1.ebuild
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2023 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-DISTUTILS_USE_PEP517=setuptools
-PYTHON_COMPAT=( python3_{9..11} )
-DISTUTILS_SINGLE_IMPL=1
-inherit distutils-r1
-
-DESCRIPTION="Access and share datasets for Audio, Computer Vision, and NLP 
tasks"
-HOMEPAGE="
-       https://pypi.org/project/datasets/
-"
-SRC_URI="https://github.com/huggingface/${PN}/archive/refs/tags/${PV}.tar.gz
-       -> ${P}.gh.tar.gz"
-IUSE="test"
-
-LICENSE="Apache-2.0"
-SLOT="0"
-KEYWORDS="~amd64"
-
-RDEPEND="
-       ${PYTHON_DEPS}
-       sci-libs/pytorch[${PYTHON_SINGLE_USEDEP}]
-       $(python_gen_cond_dep '
-               dev-python/absl-py[${PYTHON_USEDEP}]
-               dev-python/aiohttp[${PYTHON_USEDEP}]
-               dev-python/fsspec[${PYTHON_USEDEP}]
-               dev-python/multiprocess[${PYTHON_USEDEP}]
-               dev-python/pandas[${PYTHON_USEDEP}]
-               dev-python/pyarrow[${PYTHON_USEDEP},parquet,snappy]
-               dev-python/tqdm[${PYTHON_USEDEP}]
-               dev-python/xxhash[${PYTHON_USEDEP}]
-               dev-python/zstandard[${PYTHON_USEDEP}]
-               sci-libs/huggingface_hub[${PYTHON_USEDEP}]
-               sci-libs/scikit-learn[${PYTHON_USEDEP}]
-       ')
-"
-DEPEND="${RDEPEND}"
-BDEPEND="test? (
-       $(python_gen_cond_dep '
-               dev-python/pytest-datadir[${PYTHON_USEDEP}]
-               dev-python/decorator[${PYTHON_USEDEP}]
-               =dev-python/sqlalchemy-1*[${PYTHON_USEDEP}]
-               sci-libs/jiwer[${PYTHON_USEDEP}]
-               sci-libs/seqeval[${PYTHON_USEDEP}]
-       ')
-)"
-
-PATCHES=( "${FILESDIR}"/${PN}-2.12.0-tests.patch )
-
-distutils_enable_tests pytest
-
-src_prepare() {
-       distutils-r1_src_prepare
-       rm tests/packaged_modules/test_spark.py || die
-       rm tests/test_upstream_hub.py || die
-}

diff --git a/sci-libs/datasets/datasets-2.14.4.ebuild 
b/sci-libs/datasets/datasets-2.14.7.ebuild
similarity index 76%
rename from sci-libs/datasets/datasets-2.14.4.ebuild
rename to sci-libs/datasets/datasets-2.14.7.ebuild
index 08ed796e9c2d..0fab7cd550c4 100644
--- a/sci-libs/datasets/datasets-2.14.4.ebuild
+++ b/sci-libs/datasets/datasets-2.14.7.ebuild
@@ -20,26 +20,30 @@ LICENSE="Apache-2.0"
 SLOT="0"
 KEYWORDS="~amd64"
 
+# For pin on fsspec see https://github.com/huggingface/datasets/issues/6333
 RDEPEND="
        ${PYTHON_DEPS}
        sci-libs/pytorch[${PYTHON_SINGLE_USEDEP}]
        $(python_gen_cond_dep '
                dev-python/absl-py[${PYTHON_USEDEP}]
                dev-python/aiohttp[${PYTHON_USEDEP}]
-               dev-python/fsspec[${PYTHON_USEDEP}]
+               <=dev-python/fsspec-2023.10.0[${PYTHON_USEDEP}]
                dev-python/multiprocess[${PYTHON_USEDEP}]
+               dev-python/packaging[${PYTHON_USEDEP}]
                dev-python/pandas[${PYTHON_USEDEP}]
                dev-python/pyarrow[${PYTHON_USEDEP},parquet,snappy]
+               dev-python/pyyaml[${PYTHON_USEDEP}]
                dev-python/tqdm[${PYTHON_USEDEP}]
                dev-python/xxhash[${PYTHON_USEDEP}]
                dev-python/zstandard[${PYTHON_USEDEP}]
-               sci-libs/huggingface_hub[${PYTHON_USEDEP}]
+               >=sci-libs/huggingface_hub-0.14.0[${PYTHON_USEDEP}]
                sci-libs/scikit-learn[${PYTHON_USEDEP}]
        ')
 "
 DEPEND="${RDEPEND}"
 BDEPEND="test? (
        $(python_gen_cond_dep '
+               dev-python/absl-py[${PYTHON_USEDEP}]
                dev-python/pytest-datadir[${PYTHON_USEDEP}]
                dev-python/decorator[${PYTHON_USEDEP}]
                =dev-python/sqlalchemy-1*[${PYTHON_USEDEP}]
@@ -48,7 +52,7 @@ BDEPEND="test? (
        ')
 )"
 
-PATCHES=( "${FILESDIR}"/${P}-tests.patch )
+PATCHES=( "${FILESDIR}"/${PN}-2.14.4-tests.patch )
 
 distutils_enable_tests pytest
 
@@ -56,4 +60,7 @@ src_prepare() {
        distutils-r1_src_prepare
        rm tests/packaged_modules/test_spark.py || die
        rm tests/test_upstream_hub.py || die
+       sed -i -e \
+               "/pyarrow_hotfix/d" \
+               src/datasets/features/features.py || die
 }

diff --git a/sci-libs/datasets/files/datasets-2.11.0-tests.patch 
b/sci-libs/datasets/files/datasets-2.11.0-tests.patch
deleted file mode 100644
index e105c01bc63b..000000000000
--- a/sci-libs/datasets/files/datasets-2.11.0-tests.patch
+++ /dev/null
@@ -1,242 +0,0 @@
---- a/tests/test_metric_common.py      2023-05-04 18:48:48.550861318 +0200
-+++ b/tests/test_metric_common.py      2023-05-04 18:50:25.787364577 +0200
-@@ -93,6 +93,7 @@
-     INTENSIVE_CALLS_PATCHER = {}
-     metric_name = None
- 
-+    @pytest.mark.skip(reason="disabling, depends on bert_score, bleurt, 
math_equivalence, coval, nltk, faiss, mauve, rouge_score, sacrebleu, sacremoses 
...")
-     def test_load_metric(self, metric_name):
-         doctest.ELLIPSIS_MARKER = "[...]"
-         metric_module = importlib.import_module(
---- a/tests/test_hf_gcp.py     2023-05-04 19:33:31.150825303 +0200
-+++ b/tests/test_hf_gcp.py     2023-05-04 19:40:08.401759538 +0200
-@@ -69,6 +69,7 @@
-             self.assertTrue(os.path.exists(datset_info_path))
- 
- 
[email protected](reason="require apache_beam")
- @pytest.mark.integration
- def test_wikipedia_frr(tmp_path_factory):
-     tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
---- a/tests/test_distributed.py        2023-05-04 19:43:09.861275030 +0200
-+++ b/tests/test_distributed.py        2023-05-04 19:44:17.608326722 +0200
-@@ -55,6 +55,7 @@
-     assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) 
== full_size
- 
- 
[email protected](reason="require distributed torch")
- @pytest.mark.parametrize("streaming", [False, True])
- @require_torch
- @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't 
support windows")
-@@ -76,6 +77,7 @@
-     execute_subprocess_async(cmd, env=os.environ.copy())
- 
- 
[email protected](reason="require distributed torch")
- @pytest.mark.parametrize(
-     "nproc_per_node, num_workers",
-     [
---- a/tests/utils.py   2023-05-06 08:43:16.251987543 +0200
-+++ b/tests/utils.py   2023-05-06 08:44:24.467952870 +0200
-@@ -54,8 +54,8 @@
- # Audio
- require_sndfile = pytest.mark.skipif(
-     # On Windows and OS X, soundfile installs sndfile
--    find_spec("soundfile") is None or 
version.parse(importlib_metadata.version("soundfile")) < 
version.parse("0.12.0"),
--    reason="test requires sndfile>=0.12.1: 'pip install 
\"soundfile>=0.12.1\"'; ",
-+    True,
-+    reason="test requires librosa",
- )
- 
- # Beam
---- a/tests/features/test_audio.py     2023-05-06 09:03:58.680108142 +0200
-+++ a/tests/features/test_audio.py     2023-05-06 09:05:50.463407967 +0200
-@@ -57,6 +57,7 @@
-     assert features.arrow_schema == pa.schema({"sequence_of_audios": 
pa.list_(Audio().pa_type)})
- 
- 
[email protected](reason="require librosa")
- @pytest.mark.parametrize(
-     "build_example",
-     [
-@@ -81,6 +82,7 @@
-     assert decoded_example.keys() == {"path", "array", "sampling_rate"}
- 
- 
[email protected](reason="require librosa")
- @pytest.mark.parametrize(
-     "build_example",
-     [
-@@ -148,6 +149,7 @@
-     assert decoded_example["sampling_rate"] == 48000
- 
- 
[email protected](reason="require librosa")
- @pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
- def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
-     audio_path = str(shared_datadir / "test_audio_16000.pcm")
-@@ -414,6 +417,7 @@
-     assert column[0]["sampling_rate"] == 16000
- 
- 
[email protected](reason="require librosa")
- @pytest.mark.parametrize(
-     "build_data",
-     [
-@@ -438,6 +442,7 @@
-     assert item["audio"].keys() == {"path", "array", "sampling_rate"}
- 
- 
[email protected](reason="require librosa")
- def test_dataset_concatenate_audio_features(shared_datadir):
-     # we use a different data structure between 1 and 2 to make sure they are 
compatible with each other
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-@@ -451,6 +456,7 @@
-     assert concatenated_dataset[1]["audio"]["array"].shape == 
dset2[0]["audio"]["array"].shape
- 
- 
[email protected](reason="require librosa")
- def test_dataset_concatenate_nested_audio_features(shared_datadir):
-     # we use a different data structure between 1 and 2 to make sure they are 
compatible with each other
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-@@ -610,6 +616,7 @@
-     assert isinstance(ds, Dataset)
- 
- 
-+@require_sndfile
- def test_dataset_with_audio_feature_undecoded(shared_datadir):
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-     data = {"audio": [audio_path]}
-@@ -627,6 +634,7 @@
-     assert column[0] == {"path": audio_path, "bytes": None}
- 
- 
-+@require_sndfile
- def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-     data = {"audio": [audio_path]}
-@@ -658,6 +666,7 @@
-         assert column[0] == {"path": audio_path, "bytes": None}
- 
- 
-+@require_sndfile
- def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-     data = {"audio": [audio_path]}
---- a/tests/packaged_modules/test_audiofolder.py       2023-05-06 
14:00:39.560876163 +0200
-+++ b/tests/packaged_modules/test_audiofolder.py       2023-05-06 
14:01:26.005212423 +0200
-@@ -1,10 +1,8 @@
- import shutil
- import textwrap
- 
--import librosa
- import numpy as np
- import pytest
--import soundfile as sf
- 
- from datasets import Audio, ClassLabel, Features, Value
- from datasets.data_files import DataFilesDict, get_data_patterns_locally
-@@ -192,8 +190,11 @@
-     return data_files_with_two_splits_and_metadata
- 
- 
[email protected](reason="require soundfile")
- @pytest.fixture
- def data_files_with_zip_archives(tmp_path, audio_file):
-+    import soundfile as sf
-+    import librosa
-     data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
-     data_dir.mkdir(parents=True, exist_ok=True)
-     archive_dir = data_dir / "archive"
---- a/tests/test_arrow_dataset.py      2023-05-06 15:36:11.080459079 +0200
-+++ b/tests/test_arrow_dataset.py      2023-05-06 15:38:07.452828528 +0200
-@@ -3928,6 +3928,7 @@
-                 )
-                 self.assertDictEqual(features_after_cast, dset.features)
- 
-+    @pytest.mark.skip(reason="require soundfile")
-     def test_task_automatic_speech_recognition(self):
-         # Include a dummy extra column `dummy` to test we drop it correctly
-         features_before_cast = Features(
---- a/tests/test_streaming_download_manager.py 2023-05-15 23:06:59.146379973 
+0200
-+++ b/tests/test_streaming_download_manager.py 2023-05-15 23:11:32.441363757 
+0200
-@@ -217,6 +217,7 @@
-     assert output_path == 
_readd_double_slash_removed_by_path(Path(expected_path).as_posix())
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, exists",
-     [
-@@ -299,6 +300,7 @@
-         assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True)
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, expected_paths",
-     [
-@@ -328,6 +330,7 @@
-         xlistdir(root_url, use_auth_token=hf_token)
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, isdir",
-     [
-@@ -355,6 +358,7 @@
-         xisdir(root_url, use_auth_token=hf_token)
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, isfile",
-     [
-@@ -378,6 +382,7 @@
-     assert xisfile(root_url + "qwertyuiop", use_auth_token=hf_token) is False
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, size",
-     [
-@@ -402,6 +407,7 @@
-         xgetsize(root_url + "qwertyuiop", use_auth_token=hf_token)
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, expected_paths",
-     [
-@@ -444,6 +450,7 @@
-     assert len(xglob("zip://qwertyuiop/*::" + root_url, 
use_auth_token=hf_token)) == 0
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, expected_outputs",
-     [
-@@ -533,6 +540,7 @@
-     def test_xpath_as_posix(self, input_path, expected_path):
-         assert xPath(input_path).as_posix() == expected_path
- 
-+    @pytest.mark.skip(reason="not working in sandbox")
-     @pytest.mark.parametrize(
-         "input_path, exists",
-         [
-@@ -548,6 +556,7 @@
-             (tmp_path / "file.txt").touch()
-         assert xexists(input_path) is exists
- 
-+    @pytest.mark.skip(reason="not working in sandbox")
-     @pytest.mark.parametrize(
-         "input_path, pattern, expected_paths",
-         [
-@@ -586,6 +595,7 @@
-         output_paths = sorted(xPath(input_path).glob(pattern))
-         assert output_paths == expected_paths
- 
-+    @pytest.mark.skip(reason="not working in sandbox")
-     @pytest.mark.parametrize(
-         "input_path, pattern, expected_paths",
-         [

diff --git a/sci-libs/datasets/files/datasets-2.12.0-tests.patch 
b/sci-libs/datasets/files/datasets-2.12.0-tests.patch
deleted file mode 100644
index 6be3156bb70d..000000000000
--- a/sci-libs/datasets/files/datasets-2.12.0-tests.patch
+++ /dev/null
@@ -1,242 +0,0 @@
---- a/tests/test_metric_common.py      2023-05-04 18:48:48.550861318 +0200
-+++ b/tests/test_metric_common.py      2023-05-04 18:50:25.787364577 +0200
-@@ -93,6 +93,7 @@
-     INTENSIVE_CALLS_PATCHER = {}
-     metric_name = None
- 
-+    @pytest.mark.skip(reason="disabling, depends on bert_score, bleurt, 
math_equivalence, coval, nltk, faiss, mauve, rouge_score, sacrebleu, sacremoses 
...")
-     @pytest.mark.filterwarnings("ignore:metric_module_factory is 
deprecated:FutureWarning")
-     @pytest.mark.filterwarnings("ignore:load_metric is 
deprecated:FutureWarning")
-     def test_load_metric(self, metric_name):
---- a/tests/test_hf_gcp.py     2023-05-04 19:33:31.150825303 +0200
-+++ b/tests/test_hf_gcp.py     2023-05-04 19:40:08.401759538 +0200
-@@ -75,6 +75,7 @@
-             self.assertTrue(os.path.exists(datset_info_path))
- 
- 
[email protected](reason="require apache_beam")
- @pytest.mark.integration
- def test_as_dataset_from_hf_gcs(tmp_path_factory):
-     tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
---- a/tests/test_distributed.py        2023-05-04 19:43:09.861275030 +0200
-+++ b/tests/test_distributed.py        2023-05-04 19:44:17.608326722 +0200
-@@ -74,6 +74,7 @@
-         split_dataset_by_node(full_ds.shuffle(), rank=0, 
world_size=world_size)
- 
- 
[email protected](reason="require distributed torch")
- @pytest.mark.parametrize("streaming", [False, True])
- @require_torch
- @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't 
support windows")
-@@ -95,6 +96,7 @@
-     execute_subprocess_async(cmd, env=os.environ.copy())
- 
- 
[email protected](reason="require distributed torch")
- @pytest.mark.parametrize(
-     "nproc_per_node, num_workers",
-     [
---- a/tests/utils.py   2023-05-06 08:43:16.251987543 +0200
-+++ b/tests/utils.py   2023-05-06 08:44:24.467952870 +0200
-@@ -55,8 +55,8 @@
- # Audio
- require_sndfile = pytest.mark.skipif(
-     # On Windows and OS X, soundfile installs sndfile
--    find_spec("soundfile") is None or 
version.parse(importlib_metadata.version("soundfile")) < 
version.parse("0.12.0"),
--    reason="test requires sndfile>=0.12.1: 'pip install 
\"soundfile>=0.12.1\"'; ",
-+    True,
-+    reason="test requires librosa",
- )
- 
- # Beam
---- a/tests/features/test_audio.py     2023-05-06 09:03:58.680108142 +0200
-+++ a/tests/features/test_audio.py     2023-05-06 09:05:50.463407967 +0200
-@@ -57,6 +57,7 @@
-     assert features.arrow_schema == pa.schema({"sequence_of_audios": 
pa.list_(Audio().pa_type)})
- 
- 
[email protected](reason="require librosa")
- @pytest.mark.parametrize(
-     "build_example",
-     [
-@@ -81,6 +82,7 @@
-     assert decoded_example.keys() == {"path", "array", "sampling_rate"}
- 
- 
[email protected](reason="require librosa")
- @pytest.mark.parametrize(
-     "build_example",
-     [
-@@ -148,6 +149,7 @@
-     assert decoded_example["sampling_rate"] == 48000
- 
- 
[email protected](reason="require librosa")
- @pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
- def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
-     audio_path = str(shared_datadir / "test_audio_16000.pcm")
-@@ -414,6 +417,7 @@
-     assert column[0]["sampling_rate"] == 16000
- 
- 
[email protected](reason="require librosa")
- @pytest.mark.parametrize(
-     "build_data",
-     [
-@@ -438,6 +442,7 @@
-     assert item["audio"].keys() == {"path", "array", "sampling_rate"}
- 
- 
[email protected](reason="require librosa")
- def test_dataset_concatenate_audio_features(shared_datadir):
-     # we use a different data structure between 1 and 2 to make sure they are 
compatible with each other
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-@@ -451,6 +456,7 @@
-     assert concatenated_dataset[1]["audio"]["array"].shape == 
dset2[0]["audio"]["array"].shape
- 
- 
[email protected](reason="require librosa")
- def test_dataset_concatenate_nested_audio_features(shared_datadir):
-     # we use a different data structure between 1 and 2 to make sure they are 
compatible with each other
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-@@ -610,6 +616,7 @@
-     assert isinstance(ds, Dataset)
- 
- 
-+@require_sndfile
- def test_dataset_with_audio_feature_undecoded(shared_datadir):
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-     data = {"audio": [audio_path]}
-@@ -627,6 +634,7 @@
-     assert column[0] == {"path": audio_path, "bytes": None}
- 
- 
-+@require_sndfile
- def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-     data = {"audio": [audio_path]}
-@@ -658,6 +666,7 @@
-         assert column[0] == {"path": audio_path, "bytes": None}
- 
- 
-+@require_sndfile
- def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-     data = {"audio": [audio_path]}
---- a/tests/packaged_modules/test_audiofolder.py       2023-05-06 
14:00:39.560876163 +0200
-+++ b/tests/packaged_modules/test_audiofolder.py       2023-05-06 
14:01:26.005212423 +0200
-@@ -1,10 +1,8 @@
- import shutil
- import textwrap
- 
--import librosa
- import numpy as np
- import pytest
--import soundfile as sf
- 
- from datasets import Audio, ClassLabel, Features, Value
- from datasets.data_files import DataFilesDict, get_data_patterns_locally
-@@ -192,8 +190,11 @@
-     return data_files_with_two_splits_and_metadata
- 
- 
[email protected](reason="require soundfile")
- @pytest.fixture
- def data_files_with_zip_archives(tmp_path, audio_file):
-+    import soundfile as sf
-+    import librosa
-     data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
-     data_dir.mkdir(parents=True, exist_ok=True)
-     archive_dir = data_dir / "archive"
---- a/tests/test_arrow_dataset.py      2023-05-06 15:36:11.080459079 +0200
-+++ b/tests/test_arrow_dataset.py      2023-05-06 15:38:07.452828528 +0200
-@@ -3983,6 +3983,7 @@
-                 )
-                 self.assertDictEqual(features_after_cast, dset.features)
- 
-+    @pytest.mark.skip(reason="require soundfile")
-     def test_task_automatic_speech_recognition(self):
-         # Include a dummy extra column `dummy` to test we drop it correctly
-         features_before_cast = Features(
---- a/tests/test_streaming_download_manager.py 2023-05-15 23:06:59.146379973 
+0200
-+++ b/tests/test_streaming_download_manager.py 2023-05-15 23:11:32.441363757 
+0200
-@@ -217,6 +217,7 @@
-     assert output_path == 
_readd_double_slash_removed_by_path(Path(expected_path).as_posix())
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, exists",
-     [
-@@ -299,6 +300,7 @@
-         assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True)
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, expected_paths",
-     [
-@@ -328,6 +330,7 @@
-         xlistdir(root_url, use_auth_token=hf_token)
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, isdir",
-     [
-@@ -355,6 +358,7 @@
-         xisdir(root_url, use_auth_token=hf_token)
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, isfile",
-     [
-@@ -378,6 +382,7 @@
-     assert xisfile(root_url + "qwertyuiop", use_auth_token=hf_token) is False
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, size",
-     [
-@@ -402,6 +407,7 @@
-         xgetsize(root_url + "qwertyuiop", use_auth_token=hf_token)
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, expected_paths",
-     [
-@@ -444,6 +450,7 @@
-     assert len(xglob("zip://qwertyuiop/*::" + root_url, 
use_auth_token=hf_token)) == 0
- 
- 
[email protected](reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, expected_outputs",
-     [
-@@ -533,6 +540,7 @@
-     def test_xpath_as_posix(self, input_path, expected_path):
-         assert xPath(input_path).as_posix() == expected_path
- 
-+    @pytest.mark.skip(reason="not working in sandbox")
-     @pytest.mark.parametrize(
-         "input_path, exists",
-         [
-@@ -548,6 +556,7 @@
-             (tmp_path / "file.txt").touch()
-         assert xexists(input_path) is exists
- 
-+    @pytest.mark.skip(reason="not working in sandbox")
-     @pytest.mark.parametrize(
-         "input_path, pattern, expected_paths",
-         [
-@@ -586,6 +595,7 @@
-         output_paths = sorted(xPath(input_path).glob(pattern))
-         assert output_paths == expected_paths
- 
-+    @pytest.mark.skip(reason="not working in sandbox")
-     @pytest.mark.parametrize(
-         "input_path, pattern, expected_paths",
-         [

Reply via email to