Your message dated Wed, 26 Feb 2025 17:23:49 +0000
with message-id <e1tnl8p-00ewil...@fasolo.debian.org>
and subject line Bug#1071777: fixed in python-array-api-compat 1.10-1
has caused the Debian Bug report #1071777,
regarding python-array-api-compat: FTBFS: ImportError: Failed to load PyTorch C 
extensions
to be marked as done.

This means that you claim that the problem has been dealt with.
If this is not the case it is now your responsibility to reopen the
Bug report if necessary, and/or fix the problem forthwith.

(NB: If you are a system administrator and have no idea what this
message is talking about, this may indicate a serious mail system
misconfiguration somewhere. Please contact ow...@bugs.debian.org
immediately.)


-- 
1071777: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1071777
Debian Bug Tracking System
Contact ow...@bugs.debian.org with problems
--- Begin Message ---
Package: src:python-array-api-compat
Version: 1.4-1
Severity: serious
Tags: ftbfs

Dear maintainer:

During a rebuild of all packages in unstable, your package failed to build:

--------------------------------------------------------------------------------
[...]
 debian/rules binary
dh binary --with python3 --buildsystem=pybuild
   dh_update_autotools_config -O--buildsystem=pybuild
   dh_autoreconf -O--buildsystem=pybuild
   dh_auto_configure -O--buildsystem=pybuild
I: pybuild base:311: python3.12 setup.py config
running config
I: pybuild base:311: python3.11 setup.py config
running config
   dh_auto_build -O--buildsystem=pybuild
I: pybuild base:311: /usr/bin/python3.12 setup.py build
running build
running build_py
creating 
/<<PKGBUILDDIR>>/.pybuild/cpython3_3.12_array_api_compat/build/array_api_compat

[... snipped ...]

    # 
https://github.com/pytorch/multipy/blob/d60f34ad38c371e441fe7ffdb77a3c3dda5a5d19/multipy/runtime/interpreter/interpreter_impl.cpp#L134-L137
    def _running_with_deploy():
        return sys.modules.get("torch._meta_registrations", None) is object
from ._utils import _import_dotted_name, classproperty
    from ._utils_internal import get_file_path, 
prepare_multiprocessing_environment, \
        USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
# TODO(torch_deploy) figure out how to freeze version.py in fbcode build
    if _running_with_deploy():
        __version__ = "torch-deploy-1.8"
    else:
        from .torch_version import __version__ as __version__
from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, TYPE_CHECKING, Union, List
    import builtins
__all__ = [
        'typename', 'is_tensor', 'is_storage',
        'set_default_tensor_type', 'set_default_device',
        'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
        'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
        'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
        'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
        'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
        'TypedStorage', 'UntypedStorage',
        'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
        'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
        'lobpcg', 'use_deterministic_algorithms',
        'are_deterministic_algorithms_enabled',
        'is_deterministic_algorithms_warn_only_enabled',
        'set_deterministic_debug_mode', 'get_deterministic_debug_mode',
        'set_float32_matmul_precision', 'get_float32_matmul_precision',
        'set_warn_always', 'is_warn_always_enabled', 'SymInt', 'SymFloat',
        'SymBool', 'sym_not',
        'sym_int', 'sym_float', 'sym_max', 'sym_min', 'compile', 'vmap',
        'export',
    ]
################################################################################
    # Load the extension module
    
################################################################################
if sys.platform == 'win32':
        pfiles_path = os.getenv('ProgramFiles', 'C:\\Program Files')
        py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
        th_dll_path = os.path.join(os.path.dirname(__file__), 'lib')
# When users create a virtualenv that inherits the base environment,
        # we will need to add the corresponding library directory into
        # DLL search directories. Otherwise, it will rely on `PATH` which
        # is dependent on user settings.
        if sys.exec_prefix != sys.base_exec_prefix:
            base_py_dll_path = os.path.join(sys.base_exec_prefix, 'Library', 
'bin')
        else:
            base_py_dll_path = ''
dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, base_py_dll_path])) if all(not os.path.exists(os.path.join(p, 'nvToolsExt64_1.dll')) for p in dll_paths):
            nvtoolsext_dll_path = os.path.join(
                os.getenv('NVTOOLSEXT_PATH', os.path.join(pfiles_path, 'NVIDIA 
Corporation', 'NvToolsExt')), 'bin', 'x64')
        else:
            nvtoolsext_dll_path = ''
from .version import cuda as cuda_version
        import glob
        if cuda_version and all(not glob.glob(os.path.join(p, 'cudart64*.dll')) 
for p in dll_paths):
            cuda_version_1 = cuda_version.replace('.', '_')
            cuda_path_var = 'CUDA_PATH_V' + cuda_version_1
            default_path = os.path.join(pfiles_path, 'NVIDIA GPU Computing 
Toolkit', 'CUDA', 'v' + cuda_version)
            cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 
'bin')
        else:
            cuda_path = ''
dll_paths.extend(filter(os.path.exists, [nvtoolsext_dll_path, cuda_path])) kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
        with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
        prev_error_mode = kernel32.SetErrorMode(0x0001)
kernel32.LoadLibraryW.restype = ctypes.c_void_p
        if with_load_library_flags:
            kernel32.LoadLibraryExW.restype = ctypes.c_void_p
for dll_path in dll_paths:
            os.add_dll_directory(dll_path)
try:
            ctypes.CDLL('vcruntime140.dll')
            ctypes.CDLL('msvcp140.dll')
            ctypes.CDLL('vcruntime140_1.dll')
        except OSError:
            print('''Microsoft Visual C++ Redistributable is not installed, 
this may lead to the DLL load failure.
                     It can be downloaded at 
https://aka.ms/vs/16/release/vc_redist.x64.exe''')
dlls = glob.glob(os.path.join(th_dll_path, '*.dll'))
        path_patched = False
        for dll in dlls:
            is_loaded = False
            if with_load_library_flags:
                res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
                last_error = ctypes.get_last_error()
                if res is None and last_error != 126:
                    err = ctypes.WinError(last_error)
                    err.strerror += f' Error loading "{dll}" or one of its 
dependencies.'
                    raise err
                elif res is not None:
                    is_loaded = True
            if not is_loaded:
                if not path_patched:
                    os.environ['PATH'] = ';'.join(dll_paths + 
[os.environ['PATH']])
                    path_patched = True
                res = kernel32.LoadLibraryW(dll)
                if res is None:
                    err = ctypes.WinError(ctypes.get_last_error())
                    err.strerror += f' Error loading "{dll}" or one of its 
dependencies.'
                    raise err
kernel32.SetErrorMode(prev_error_mode) def _preload_cuda_deps(lib_folder, lib_name):
        """Preloads cuda deps if they could not be found otherwise."""
        # Should only be called on Linux if default path resolution have failed
        assert platform.system() == 'Linux', 'Should only be called on Linux'
        import glob
        lib_path = None
        for path in sys.path:
            nvidia_path = os.path.join(path, 'nvidia')
            if not os.path.exists(nvidia_path):
                continue
            candidate_lib_paths = glob.glob(os.path.join(nvidia_path, 
lib_folder, 'lib', lib_name))
            if candidate_lib_paths and not lib_path:
                lib_path = candidate_lib_paths[0]
            if lib_path:
                break
        if not lib_path:
            raise ValueError(f"{lib_name} not found in the system path 
{sys.path}")
        ctypes.CDLL(lib_path)
# See Note [Global dependencies]
    def _load_global_deps() -> None:
        if _running_with_deploy() or platform.system() == 'Windows':
            return
lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
        here = os.path.abspath(__file__)
        lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
try:
            ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
        except OSError as err:
            # Can only happen for wheel with cuda libs as PYPI deps
            # As PyTorch is not purelib, but nvidia-*-cu12 is
            cuda_libs: Dict[str, str] = {
                'cublas': 'libcublas.so.*[0-9]',
                'cudnn': 'libcudnn.so.*[0-9]',
                'cuda_nvrtc': 'libnvrtc.so.*[0-9]',
                'cuda_runtime': 'libcudart.so.*[0-9]',
                'cuda_cupti': 'libcupti.so.*[0-9]',
                'cufft': 'libcufft.so.*[0-9]',
                'curand': 'libcurand.so.*[0-9]',
                'cusolver': 'libcusolver.so.*[0-9]',
                'cusparse': 'libcusparse.so.*[0-9]',
                'nccl': 'libnccl.so.*[0-9]',
                'nvtx': 'libnvToolsExt.so.*[0-9]',
            }
            is_cuda_lib_err = [lib for lib in cuda_libs.values() 
if(lib.split('.')[0] in err.args[0])]
            if not is_cuda_lib_err:
                raise err
            for lib_folder, lib_name in cuda_libs.items():
                _preload_cuda_deps(lib_folder, lib_name)
            ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
            (_running_with_deploy() or platform.system() != 'Windows'):
        # Do it the hard way.  You might want to load libtorch with RTLD_GLOBAL 
in a
        # few circumstances:
        #
        #   1. You're in a build environment (e.g., fbcode) where
        #      libtorch_global_deps is not available, but you still need
        #      to get mkl to link in with RTLD_GLOBAL or it will just
        #      not work.
        #
        #   2. You're trying to run PyTorch under UBSAN and you need
        #      to ensure that only one copy of libtorch is loaded, so
        #      vptr checks work properly
        #
        # If you're using this setting, you must verify that all the libraries
        # you load consistently use the same libstdc++, or you may have
        # mysterious segfaults.
        #
        old_flags = sys.getdlopenflags()
        sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY)
        from torch._C import *  # noqa: F403
        sys.setdlopenflags(old_flags)
        del old_flags
else:
        # Easy way.  You want this most of the time, because it will prevent
        # C++ symbols from libtorch clobbering C++ symbols from other
        # libraries, leading to mysterious segfaults.
        #
        # If building in an environment where libtorch_global_deps isn't 
available
        # like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you 
will
        # want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
        #
        # See Note [Global dependencies]
        if USE_GLOBAL_DEPS:
            _load_global_deps()
        from torch._C import *  # noqa: F403
# Appease the type checker; ordinarily this binding is inserted by the
    # torch._C module initialization code in C
    if TYPE_CHECKING:
        import torch._C as _C
class SymInt:
        """
        Like an int (including magic methods), but redirects all operations on 
the
        wrapped node. This is used in particular to symbolically record 
operations
        in the symbolic shape workflow.
        """
def __init__(self, node):
            # This field MUST be named node; C++ binding code assumes that this
            # class has a field named node that stores SymNode
            self.node = node
def __bool__(self):
            return builtins.bool(self != 0)
def __int__(self):
            return self.node.int_()
def __index__(self):
            return self.node.int_()
# Magic methods installed by torch.fx.experimental.symbolic_shapes def __eq__(self, other: object) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __lt__(self, other) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __gt__(self, other) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __le__(self, other) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __ge__(self, other) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __sym_max__(self, other):
            raise AssertionError("type stub not overridden")
def __sym_min__(self, other):
            raise AssertionError("type stub not overridden")
def __sym_float__(self):
            raise AssertionError("type stub not overridden")
def __repr__(self):
            return str(self.node)
class SymFloat:
        """
        Like an float (including magic methods), but redirects all operations 
on the
        wrapped node. This is used in particular to symbolically record 
operations
        in the symbolic shape workflow.
        """
def __init__(self, node):
            # This field MUST be named node; C++ binding code assumes that this
            # class has a field named node that stores SymNode
            self.node = node
def __bool__(self):
            return self.node.bool_()
# Magic methods installed by torch.fx.experimental.symbolic_shapes def __eq__(self, other: object) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __lt__(self, other) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __gt__(self, other) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __le__(self, other) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __ge__(self, other) -> builtins.bool:
            raise AssertionError("type stub not overridden")
def __sym_max__(self, other):
            raise AssertionError("type stub not overridden")
def __sym_min__(self, other):
            raise AssertionError("type stub not overridden")
def __sym_int__(self):
            raise AssertionError("type stub not overridden")
def __repr__(self):
            return self.node.str()
class SymBool:
        """
        Like an bool (including magic methods), but redirects all operations on 
the
        wrapped node. This is used in particular to symbolically record 
operations
        in the symbolic shape workflow.
Unlike regular bools, regular boolean operators will force extra guards instead
        of symbolically evaluate.  Use the bitwise operators instead to handle 
this.
        """
def __init__(self, node):
            # This field MUST be named node; C++ binding code assumes that this
            # class has a field named node that stores SymNode
            self.node = node
def __bool__(self):
            return self.node.bool_()
def __int__(self):
            return builtins.int(self.node.bool_())
# Magic methods installed by torch.fx.experimental.symbolic_shapes
        def __and__(self, other) -> "SymBool":
            raise AssertionError("type stub not overridden")
def __or__(self, other) -> "SymBool":
            raise AssertionError("type stub not overridden")
# We very carefully define __sym_not__, and not a number of other
        # plausible alternatives:
        #
        #   - We do not override __not__ because this is not a real magic
        #     method; you cannot override the meaning of the not builtin in
        #     Python.  We use the name 'sym_not' to clarify that in user code 
you
        #     cannot use the builtin not or operator.not_ or operator.__not__ 
and
        #     hit this magic method; you must use our custom sym_not operator.
        #
        #   - We do not override the __invert__ method because SymBool is
        #     meant to be usable in situations where bool is expected.  However,
        #     bitwise negation ~a does the wrong thing with booleans (because
        #     bool is a subclass of int, so ~1 = -2 which is not falseish.)
        #     This would be a giant footgun, so we get around it by defining
        #     our own operator.  Note that bitwise and/or do the right thing,
        #     so we reuse the conventional operators there for readability.
        #
        def __sym_not__(self) -> "SymBool":
            raise AssertionError("type stub not overridden")
def __repr__(self):
            return self.node.str()
def sym_not(a):
        r""" SymInt-aware utility for logical negation.
Args:
            a (SymBool or bool): Object to negate
        """
        if hasattr(a, '__sym_not__'):
            return a.__sym_not__()
        return not a
def sym_float(a):
        r""" SymInt-aware utility for float casting.
Args:
            a (SymInt, SymFloat, or object): Object to cast
        """
        if isinstance(a, SymFloat):
            return a
        elif hasattr(a, '__sym_float__'):
            return a.__sym_float__()
        return py_float(a)  # type: ignore[operator]
def sym_int(a):
        r""" SymInt-aware utility for int casting.
Args:
            a (SymInt, SymFloat, or object): Object to cast
        """
        if isinstance(a, SymInt):
            return a
        elif isinstance(a, SymFloat):
            return math.floor(a) if a >= 0 else math.ceil(a)  # type: 
ignore[arg-type, call-overload]
        return py_int(a)  # type: ignore[operator]
def sym_max(a, b):
        """ SymInt-aware utility for max()."""
        if isinstance(a, (SymInt, SymFloat)):
            return a.__sym_max__(b)
        elif isinstance(b, (SymInt, SymFloat)):
            # NB: If you actually care about preserving output type exactly
            # if you do something like max(0, 0.0), it is NOT sound to treat
            # min/max as commutative
            return b.__sym_max__(a)
        return builtins.max(a, b)  # type: ignore[operator]
def sym_min(a, b):
        """ SymInt-aware utility for max()."""
        if isinstance(a, (SymInt, SymFloat)):
            return a.__sym_min__(b)
        elif isinstance(b, (SymInt, SymFloat)):
            return b.__sym_min__(a)
        return builtins.min(a, b)  # type: ignore[operator]
# Check to see if we can load C extensions, and if not provide some guidance
    # on what the problem might be.
    try:
        # _initExtension is chosen (arbitrarily) as a sentinel.
        from torch._C import _initExtension
    except ImportError:
        import torch._C as _C_for_compiled_check
# The __file__ check only works for Python 3.7 and above.
        if _C_for_compiled_check.__file__ is None:
          raise ImportError(textwrap.dedent('''
                Failed to load PyTorch C extensions:
                    It appears that PyTorch has loaded the `torch/_C` folder
                    of the PyTorch repository rather than the C extensions which
                    are expected in the `torch._C` namespace. This can occur 
when
                    using the `install` workflow. e.g.
                        $ python setup.py install && python -c "import torch"
This error can generally be solved using the `develop` workflow
                        $ python setup.py develop && python -c "import torch"  
# This should succeed
                    or by running Python from a different directory.
                ''').strip()) from None
E           ImportError: Failed to load PyTorch C extensions:
E               It appears that PyTorch has loaded the `torch/_C` folder
E               of the PyTorch repository rather than the C extensions which
E               are expected in the `torch._C` namespace. This can occur when
E               using the `install` workflow. e.g.
E                   $ python setup.py install && python -c "import torch"
E
E               This error can generally be solved using the `develop` workflow
E                   $ python setup.py develop && python -c "import torch"  # 
This should succeed
E               or by running Python from a different directory.

/usr/lib/python3/dist-packages/torch/__init__.py:451: ImportError
=========================== short test summary info ============================
FAILED tests/test_array_namespace.py::test_array_namespace[None-torch] - Impo...
FAILED tests/test_array_namespace.py::test_array_namespace[2021.12-torch] - I...
FAILED tests/test_array_namespace.py::test_array_namespace_errors - ImportErr...
FAILED tests/test_common.py::test_to_device_host[torch] - ImportError: Failed...
FAILED tests/test_isdtype.py::test_isdtype_spec_dtypes[torch] - ImportError: ...
FAILED tests/test_isdtype.py::test_isdtype_additional_dtypes[float16-torch]
FAILED tests/test_isdtype.py::test_isdtype_additional_dtypes[float128-torch]
FAILED tests/test_isdtype.py::test_isdtype_additional_dtypes[complex256-torch]
FAILED tests/test_isdtype.py::test_isdtype_additional_dtypes[bfloat16-torch]
=================== 9 failed, 10 passed, 8 skipped in 0.48s ====================
E: pybuild pybuild:389: test: plugin distutils failed with: exit code=1: cd 
/<<PKGBUILDDIR>>/.pybuild/cpython3_3.12_array_api_compat/build; python3.12 -m 
pytest --ignore tests/test_vendoring.py
I: pybuild base:311: cd 
/<<PKGBUILDDIR>>/.pybuild/cpython3_3.11_array_api_compat/build; python3.11 -m 
pytest --ignore tests/test_vendoring.py
============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.1.2, pluggy-1.5.0
rootdir: /<<PKGBUILDDIR>>
plugins: hypothesis-6.102.4
collected 27 items

tests/test_array_namespace.py s..s.....                                  [ 33%]
tests/test_common.py s..                                                 [ 44%]
tests/test_isdtype.py s..s..s..s..s..                                    [100%]

======================== 19 passed, 8 skipped in 1.52s =========================
dh_auto_test: error: pybuild --test --test-pytest -i python{version} -p "3.12 
3.11" returned exit code 13
make: *** [debian/rules:9: binary] Error 25
dpkg-buildpackage: error: debian/rules binary subprocess returned exit status 2
--------------------------------------------------------------------------------

The above is just how the build ends and not necessarily the most relevant part.
If required, the full build log is available here:

https://people.debian.org/~sanvila/build-logs/202405/

About the archive rebuild: The build was made on virtual machines
of type m6a.large and r6a.large from AWS, using sbuild and a
reduced chroot with only build-essential packages.

If you could not reproduce the bug please contact me privately, as I
am willing to provide ssh access to a virtual machine where the bug is
fully reproducible.

If this is really a bug in one of the build-depends, please use
reassign and affects, so that this is still visible in the BTS web
page for this package.

Thanks.

--- End Message ---
--- Begin Message ---
Source: python-array-api-compat
Source-Version: 1.10-1
Done: Shengqi Chen <ha...@debian.org>

We believe that the bug you reported is fixed in the latest version of
python-array-api-compat, which is due to be installed in the Debian FTP archive.

A summary of the changes between this version and the previous one is
attached.

Thank you for reporting the bug, which will now be closed.  If you
have further comments please address them to 1071...@bugs.debian.org,
and the maintainer will reopen the bug report if appropriate.

Debian distribution maintenance software
pp.
Shengqi Chen <ha...@debian.org> (supplier of updated python-array-api-compat 
package)

(This message was generated automatically at their request; if you
believe that there is a problem with it please contact the archive
administrators by mailing ftpmas...@ftp-master.debian.org)


-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA512

Format: 1.8
Date: Thu, 27 Feb 2025 00:19:56 +0800
Source: python-array-api-compat
Architecture: source
Version: 1.10-1
Distribution: unstable
Urgency: medium
Maintainer: Debian Python Team <team+pyt...@tracker.debian.org>
Changed-By: Shengqi Chen <ha...@debian.org>
Closes: 1071777
Changes:
 python-array-api-compat (1.10-1) unstable; urgency=medium
 .
   [ Andreas Tille ]
   * Testsuite: autopkgtest-pkg-python (routine-update)
   * Build-Depends: s/dh-python/dh-sequence-python3/ (routine-update)
   * Set upstream metadata fields: Repository.
 .
   [ Shengqi Chen ]
   * New upstream version 1.10
   * d/control:
    + build only for default Python version (closes: #1071777)
    + bump std-ver to 4.7.1 (no changes required)
    + add python3-dask as B-D for dh_auto_test
    + change uploader to me (thanks @tille for his work!)
   * d/patches: add patch to disable tests depending on libraries not in
     archive (e.g. jax, cupy).
Checksums-Sha1:
 32a1e301fb3e74b20b684b426f9076db97d3f69e 2242 
python-array-api-compat_1.10-1.dsc
 8e3362e9e4f6593f273a1e75c1c2392359696985 76976 
python-array-api-compat_1.10.orig.tar.gz
 1b31417922b28af738011e9cc83ac450706243f7 3724 
python-array-api-compat_1.10-1.debian.tar.xz
 370ae822835ef724d5ff8d7eacbd987b8e89731a 9778 
python-array-api-compat_1.10-1_amd64.buildinfo
Checksums-Sha256:
 635916254b0abfda05296bdc46a1a962eb9a54319cfe5c2cf45a50f8d8ba574e 2242 
python-array-api-compat_1.10-1.dsc
 03e7c68b9bfc2fbf54cb8de6ce52f000c1b3229d52e8be4e2150daf3bcd52a5d 76976 
python-array-api-compat_1.10.orig.tar.gz
 158215b480b0efc3f689e7fd7487dd9b2fe495de22783f9d131d1bf5ad2916cc 3724 
python-array-api-compat_1.10-1.debian.tar.xz
 a4b806992df562da82e5c879799a608ae1e4a3f79405ce7c7a561905645b5a47 9778 
python-array-api-compat_1.10-1_amd64.buildinfo
Files:
 f87b721808ef75cfac787861f1ba1869 2242 python optional 
python-array-api-compat_1.10-1.dsc
 abf9896a16777a2eaa0213d568254b6b 76976 python optional 
python-array-api-compat_1.10.orig.tar.gz
 7768784c0e0c37697b1e8e67859c5f2d 3724 python optional 
python-array-api-compat_1.10-1.debian.tar.xz
 4507844def798ec8e2fe3d0220747d27 9778 python optional 
python-array-api-compat_1.10-1_amd64.buildinfo

-----BEGIN PGP SIGNATURE-----

iQIzBAEBCgAdFiEE+Fg++qmpHzqjSzySzhGnsHs6uUYFAme/QckACgkQzhGnsHs6
uUauBg//aAuaZ36auIfd5gS98dHUloqm/rmb7CII2pCDUH5bo0a4T0XSv8+azEkq
6N9EXhAHcwrhg5/58AddcwM9Bk0C7Ldo0jvq84jfLBkxyM5oYQJruzWwkt9Hujbp
QL4/BPTlr/+XS4Oyzqb3oIgwCZtlPM1UJ/GxsasWL2gFCPDtSUDSc2MnlgbkOcI8
UDPnS80mQt/KEaflwLbwcCXceMH7Ng6UzHOTUnt4k6ZMnvSRmZIWfjTKmDBotMt6
QMfPKC1THRAoEUmy5RWzVz6lA4z2yP7RqR8/6Jh/1HfR1lFmjXq9ozZg465mGNV1
N6UGpPBmaWStzKzZa5LGU4B1+0BWjZd7J4ChBdYCfbvFnHTHvT7Tkd4sh/Ge/F5w
Pc2E+2ITVi6srOCSPdLpJgX76GD9lIBJ21/DrFPIr9KCLnGi45u883QIfKr47704
3bKAlMj0FVBT2LMgvxtslUPEfwk1QHlLxoEDhmF7q/bduBKnilAGHuWJ06jVR54d
9ZeuQCeCoI2QxmaaY6E8sBqliIKixbE7MUZBfn/3oRzRUhXJJSDC1KUCM5dQMMnV
e7KtfcvvBfsio+Iulvo0rrEP1pdYGSPGydo1A/bxR3ofOnXI8b3kiRHjCCtVcCD4
t8oIg5kAgNmdfdE29x6hsVGayYOj746lHKgN7O11IPQr+/AUDtU=
=YBdd
-----END PGP SIGNATURE-----

Attachment: pgpudDYN892hl.pgp
Description: PGP signature


--- End Message ---

Reply via email to