"""Test the numpy pickler as a replacement of the standard pickler.""" import copy import os import random import re import io import sys import warnings import gzip import zlib import bz2 import pickle import socket from contextlib import closing import mmap from pathlib import Path try: import lzma except ImportError: lzma = None import pytest from joblib.test.common import np, with_numpy, with_lz4, without_lz4 from joblib.test.common import with_memory_profiler, memory_used from joblib.testing import parametrize, raises, warns # numpy_pickle is not a drop-in replacement of pickle, as it takes # filenames instead of open files as arguments. from joblib import numpy_pickle, register_compressor from joblib.test import data from joblib.numpy_pickle_utils import _IO_BUFFER_SIZE from joblib.numpy_pickle_utils import _detect_compressor from joblib.numpy_pickle_utils import _is_numpy_array_byte_order_mismatch from joblib.numpy_pickle_utils import _ensure_native_byte_order from joblib.compressor import (_COMPRESSORS, _LZ4_PREFIX, CompressorWrapper, LZ4_NOT_INSTALLED_ERROR, BinaryZlibFile) ############################################################################### # Define a list of standard types. # Borrowed from dill, initial author: Micheal McKerns: # http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py typelist = [] # testing types _none = None typelist.append(_none) _type = type typelist.append(_type) _bool = bool(1) typelist.append(_bool) _int = int(1) typelist.append(_int) _float = float(1) typelist.append(_float) _complex = complex(1) typelist.append(_complex) _string = str(1) typelist.append(_string) _tuple = () typelist.append(_tuple) _list = [] typelist.append(_list) _dict = {} typelist.append(_dict) _builtin = len typelist.append(_builtin) def _function(x): yield x class _class: def _method(self): pass class _newclass(object): def _method(self): pass typelist.append(_function) typelist.append(_class) typelist.append(_newclass) # _instance = _class() typelist.append(_instance) _object = _newclass() typelist.append(_object) # ############################################################################### # Tests @parametrize('compress', [0, 1]) @parametrize('member', typelist) def test_standard_types(tmpdir, compress, member): # Test pickling and saving with standard types. filename = tmpdir.join('test.pkl').strpath numpy_pickle.dump(member, filename, compress=compress) _member = numpy_pickle.load(filename) # We compare the pickled instance to the reloaded one only if it # can be compared to a copied one if member == copy.deepcopy(member): assert member == _member def test_value_error(): # Test inverting the input arguments to dump with raises(ValueError): numpy_pickle.dump('foo', dict()) @parametrize('wrong_compress', [-1, 10, dict()]) def test_compress_level_error(wrong_compress): # Verify that passing an invalid compress argument raises an error. exception_msg = ('Non valid compress level given: ' '"{0}"'.format(wrong_compress)) with raises(ValueError) as excinfo: numpy_pickle.dump('dummy', 'foo', compress=wrong_compress) excinfo.match(exception_msg) @with_numpy @parametrize('compress', [False, True, 0, 3, 'zlib']) def test_numpy_persistence(tmpdir, compress): filename = tmpdir.join('test.pkl').strpath rnd = np.random.RandomState(0) a = rnd.random_sample((10, 2)) # We use 'a.T' to have a non C-contiguous array. for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])): filenames = numpy_pickle.dump(obj, filename, compress=compress) # All is cached in one file assert len(filenames) == 1 # Check that only one file was created assert filenames[0] == filename # Check that this file does exist assert os.path.exists(filenames[0]) # Unpickle the object obj_ = numpy_pickle.load(filename) # Check that the items are indeed arrays for item in obj_: assert isinstance(item, np.ndarray) # And finally, check that all the values are equal. np.testing.assert_array_equal(np.array(obj), np.array(obj_)) # Now test with an array subclass obj = np.memmap(filename + 'mmap', mode='w+', shape=4, dtype=np.float64) filenames = numpy_pickle.dump(obj, filename, compress=compress) # All is cached in one file assert len(filenames) == 1 obj_ = numpy_pickle.load(filename) if (type(obj) is not np.memmap and hasattr(obj, '__array_prepare__')): # We don't reconstruct memmaps assert isinstance(obj_, type(obj)) np.testing.assert_array_equal(obj_, obj) # Test with an object containing multiple numpy arrays obj = ComplexTestObject() filenames = numpy_pickle.dump(obj, filename, compress=compress) # All is cached in one file assert len(filenames) == 1 obj_loaded = numpy_pickle.load(filename) assert isinstance(obj_loaded, type(obj)) np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float) np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int) np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj) @with_numpy def test_numpy_persistence_bufferred_array_compression(tmpdir): big_array = np.ones((_IO_BUFFER_SIZE + 100), dtype=np.uint8) filename = tmpdir.join('test.pkl').strpath numpy_pickle.dump(big_array, filename, compress=True) arr_reloaded = numpy_pickle.load(filename) np.testing.assert_array_equal(big_array, arr_reloaded) @with_numpy def test_memmap_persistence(tmpdir): rnd = np.random.RandomState(0) a = rnd.random_sample(10) filename = tmpdir.join('test1.pkl').strpath numpy_pickle.dump(a, filename) b = numpy_pickle.load(filename, mmap_mode='r') assert isinstance(b, np.memmap) # Test with an object containing multiple numpy arrays filename = tmpdir.join('test2.pkl').strpath obj = ComplexTestObject() numpy_pickle.dump(obj, filename) obj_loaded = numpy_pickle.load(filename, mmap_mode='r') assert isinstance(obj_loaded, type(obj)) assert isinstance(obj_loaded.array_float, np.memmap) assert not obj_loaded.array_float.flags.writeable assert isinstance(obj_loaded.array_int, np.memmap) assert not obj_loaded.array_int.flags.writeable # Memory map not allowed for numpy object arrays assert not isinstance(obj_loaded.array_obj, np.memmap) np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float) np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int) np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj) # Test we can write in memmapped arrays obj_loaded = numpy_pickle.load(filename, mmap_mode='r+') assert obj_loaded.array_float.flags.writeable obj_loaded.array_float[0:10] = 10.0 assert obj_loaded.array_int.flags.writeable obj_loaded.array_int[0:10] = 10 obj_reloaded = numpy_pickle.load(filename, mmap_mode='r') np.testing.assert_array_equal(obj_reloaded.array_float, obj_loaded.array_float) np.testing.assert_array_equal(obj_reloaded.array_int, obj_loaded.array_int) # Test w+ mode is caught and the mode has switched to r+ numpy_pickle.load(filename, mmap_mode='w+') assert obj_loaded.array_int.flags.writeable assert obj_loaded.array_int.mode == 'r+' assert obj_loaded.array_float.flags.writeable assert obj_loaded.array_float.mode == 'r+' @with_numpy def test_memmap_persistence_mixed_dtypes(tmpdir): # loading datastructures that have sub-arrays with dtype=object # should not prevent memmapping on fixed size dtype sub-arrays. rnd = np.random.RandomState(0) a = rnd.random_sample(10) b = np.array([1, 'b'], dtype=object) construct = (a, b) filename = tmpdir.join('test.pkl').strpath numpy_pickle.dump(construct, filename) a_clone, b_clone = numpy_pickle.load(filename, mmap_mode='r') # the floating point array has been memory mapped assert isinstance(a_clone, np.memmap) # the object-dtype array has been loaded in memory assert not isinstance(b_clone, np.memmap) @with_numpy def test_masked_array_persistence(tmpdir): # The special-case picker fails, because saving masked_array # not implemented, but it just delegates to the standard pickler. rnd = np.random.RandomState(0) a = rnd.random_sample(10) a = np.ma.masked_greater(a, 0.5) filename = tmpdir.join('test.pkl').strpath numpy_pickle.dump(a, filename) b = numpy_pickle.load(filename, mmap_mode='r') assert isinstance(b, np.ma.masked_array) @with_numpy def test_compress_mmap_mode_warning(tmpdir): # Test the warning in case of compress + mmap_mode rnd = np.random.RandomState(0) a = rnd.random_sample(10) this_filename = tmpdir.join('test.pkl').strpath numpy_pickle.dump(a, this_filename, compress=1) with warns(UserWarning) as warninfo: numpy_pickle.load(this_filename, mmap_mode='r+') debug_msg = "\n".join([str(w) for w in warninfo]) warninfo = [w.message for w in warninfo] assert len(warninfo) == 1, debug_msg assert ( str(warninfo[0]) == 'mmap_mode "r+" is not compatible with compressed ' f'file {this_filename}. "r+" flag will be ignored.' ) @with_numpy @parametrize('cache_size', [None, 0, 10]) def test_cache_size_warning(tmpdir, cache_size): # Check deprecation warning raised when cache size is not None filename = tmpdir.join('test.pkl').strpath rnd = np.random.RandomState(0) a = rnd.random_sample((10, 2)) warnings.simplefilter("always") with warnings.catch_warnings(record=True) as warninfo: numpy_pickle.dump(a, filename, cache_size=cache_size) expected_nb_warnings = 1 if cache_size is not None else 0 assert len(warninfo) == expected_nb_warnings for w in warninfo: assert w.category == DeprecationWarning assert (str(w.message) == "Please do not set 'cache_size' in joblib.dump, this " "parameter has no effect and will be removed. You " "used 'cache_size={0}'".format(cache_size)) @with_numpy @with_memory_profiler @parametrize('compress', [True, False]) def test_memory_usage(tmpdir, compress): # Verify memory stays within expected bounds. filename = tmpdir.join('test.pkl').strpath small_array = np.ones((10, 10)) big_array = np.ones(shape=100 * int(1e6), dtype=np.uint8) for obj in (small_array, big_array): size = obj.nbytes / 1e6 obj_filename = filename + str(np.random.randint(0, 1000)) mem_used = memory_used(numpy_pickle.dump, obj, obj_filename, compress=compress) # The memory used to dump the object shouldn't exceed the buffer # size used to write array chunks (16MB). write_buf_size = _IO_BUFFER_SIZE + 16 * 1024 ** 2 / 1e6 assert mem_used <= write_buf_size mem_used = memory_used(numpy_pickle.load, obj_filename) # memory used should be less than array size + buffer size used to # read the array chunk by chunk. read_buf_size = 32 + _IO_BUFFER_SIZE # MiB assert mem_used < size + read_buf_size @with_numpy def test_compressed_pickle_dump_and_load(tmpdir): expected_list = [np.arange(5, dtype=np.dtype('i8')), np.arange(5, dtype=np.dtype('f8')), np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'), np.arange(256, dtype=np.uint8).tobytes(), u"C'est l'\xe9t\xe9 !"] fname = tmpdir.join('temp.pkl.gz').strpath dumped_filenames = numpy_pickle.dump(expected_list, fname, compress=1) assert len(dumped_filenames) == 1 result_list = numpy_pickle.load(fname) for result, expected in zip(result_list, expected_list): if isinstance(expected, np.ndarray): expected = _ensure_native_byte_order(expected) assert result.dtype == expected.dtype np.testing.assert_equal(result, expected) else: assert result == expected def _check_pickle(filename, expected_list, mmap_mode=None): """Helper function to test joblib pickle content. Note: currently only pickles containing an iterable are supported by this function. """ version_match = re.match(r'.+py(\d)(\d).+', filename) py_version_used_for_writing = int(version_match.group(1)) py_version_to_default_pickle_protocol = {2: 2, 3: 3} pickle_reading_protocol = py_version_to_default_pickle_protocol.get(3, 4) pickle_writing_protocol = py_version_to_default_pickle_protocol.get( py_version_used_for_writing, 4) if pickle_reading_protocol >= pickle_writing_protocol: try: with warnings.catch_warnings(record=True) as warninfo: warnings.simplefilter('always') warnings.filterwarnings( 'ignore', module='numpy', message='The compiler package is deprecated') result_list = numpy_pickle.load(filename, mmap_mode=mmap_mode) filename_base = os.path.basename(filename) expected_nb_deprecation_warnings = 1 if ( "_0.9" in filename_base or "_0.8.4" in filename_base) else 0 expected_nb_user_warnings = 3 if ( re.search("_0.1.+.pkl$", filename_base) and mmap_mode is not None) else 0 expected_nb_warnings = \ expected_nb_deprecation_warnings + expected_nb_user_warnings assert len(warninfo) == expected_nb_warnings deprecation_warnings = [ w for w in warninfo if issubclass( w.category, DeprecationWarning)] user_warnings = [ w for w in warninfo if issubclass( w.category, UserWarning)] for w in deprecation_warnings: assert (str(w.message) == "The file '{0}' has been generated with a joblib " "version less than 0.10. Please regenerate this " "pickle file.".format(filename)) for w in user_warnings: escaped_filename = re.escape(filename) assert re.search( f"memmapped.+{escaped_filename}.+segmentation fault", str(w.message)) for result, expected in zip(result_list, expected_list): if isinstance(expected, np.ndarray): expected = _ensure_native_byte_order(expected) assert result.dtype == expected.dtype np.testing.assert_equal(result, expected) else: assert result == expected except Exception as exc: # When trying to read with python 3 a pickle generated # with python 2 we expect a user-friendly error if py_version_used_for_writing == 2: assert isinstance(exc, ValueError) message = ('You may be trying to read with ' 'python 3 a joblib pickle generated with python 2.') assert message in str(exc) elif filename.endswith('.lz4') and with_lz4.args[0]: assert isinstance(exc, ValueError) assert LZ4_NOT_INSTALLED_ERROR in str(exc) else: raise else: # Pickle protocol used for writing is too high. We expect a # "unsupported pickle protocol" error message try: numpy_pickle.load(filename) raise AssertionError('Numpy pickle loading should ' 'have raised a ValueError exception') except ValueError as e: message = 'unsupported pickle protocol: {0}'.format( pickle_writing_protocol) assert message in str(e.args) @with_numpy def test_joblib_pickle_across_python_versions(): # We need to be specific about dtypes in particular endianness # because the pickles can be generated on one architecture and # the tests run on another one. See # https://github.com/joblib/joblib/issues/279. expected_list = [np.arange(5, dtype=np.dtype('i8'), ('', '>f8')]), np.arange(3, dtype=np.dtype('>i8')), np.arange(3, dtype=np.dtype('>f8'))] # Verify the byteorder mismatch is correctly detected. for array in be_arrays: if sys.byteorder == 'big': assert not _is_numpy_array_byte_order_mismatch(array) else: assert _is_numpy_array_byte_order_mismatch(array) converted = _ensure_native_byte_order(array) if converted.dtype.fields: for f in converted.dtype.fields.values(): f[0].byteorder == '=' else: assert converted.dtype.byteorder == "=" # List of numpy arrays with little endian byteorder. le_arrays = [np.array([(1, 2.0), (3, 4.0)], dtype=[('', ' size np.testing.assert_array_equal(obj, memmaps) def test_register_compressor(tmpdir): # Check that registering compressor file works. compressor_name = 'test-name' compressor_prefix = 'test-prefix' class BinaryCompressorTestFile(io.BufferedIOBase): pass class BinaryCompressorTestWrapper(CompressorWrapper): def __init__(self): CompressorWrapper.__init__(self, obj=BinaryCompressorTestFile, prefix=compressor_prefix) register_compressor(compressor_name, BinaryCompressorTestWrapper()) assert (_COMPRESSORS[compressor_name].fileobj_factory == BinaryCompressorTestFile) assert _COMPRESSORS[compressor_name].prefix == compressor_prefix # Remove this dummy compressor file from extra compressors because other # tests might fail because of this _COMPRESSORS.pop(compressor_name) @parametrize('invalid_name', [1, (), {}]) def test_register_compressor_invalid_name(invalid_name): # Test that registering an invalid compressor name is not allowed. with raises(ValueError) as excinfo: register_compressor(invalid_name, None) excinfo.match("Compressor name should be a string") def test_register_compressor_invalid_fileobj(): # Test that registering an invalid file object is not allowed. class InvalidFileObject(): pass class InvalidFileObjectWrapper(CompressorWrapper): def __init__(self): CompressorWrapper.__init__(self, obj=InvalidFileObject, prefix=b'prefix') with raises(ValueError) as excinfo: register_compressor('invalid', InvalidFileObjectWrapper()) excinfo.match("Compressor 'fileobj_factory' attribute should implement " "the file object interface") class AnotherZlibCompressorWrapper(CompressorWrapper): def __init__(self): CompressorWrapper.__init__(self, obj=BinaryZlibFile, prefix=b'prefix') class StandardLibGzipCompressorWrapper(CompressorWrapper): def __init__(self): CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b'prefix') def test_register_compressor_already_registered(): # Test registration of existing compressor files. compressor_name = 'test-name' # register a test compressor register_compressor(compressor_name, AnotherZlibCompressorWrapper()) with raises(ValueError) as excinfo: register_compressor(compressor_name, StandardLibGzipCompressorWrapper()) excinfo.match("Compressor '{}' already registered." .format(compressor_name)) register_compressor(compressor_name, StandardLibGzipCompressorWrapper(), force=True) assert compressor_name in _COMPRESSORS assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile # Remove this dummy compressor file from extra compressors because other # tests might fail because of this _COMPRESSORS.pop(compressor_name) @with_lz4 def test_lz4_compression(tmpdir): # Check that lz4 can be used when dependency is available. import lz4.frame compressor = 'lz4' assert compressor in _COMPRESSORS assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile fname = tmpdir.join('test.pkl').strpath data = 'test data' numpy_pickle.dump(data, fname, compress=compressor) with open(fname, 'rb') as f: assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX assert numpy_pickle.load(fname) == data # Test that LZ4 is applied based on file extension numpy_pickle.dump(data, fname + '.lz4') with open(fname, 'rb') as f: assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX assert numpy_pickle.load(fname) == data @without_lz4 def test_lz4_compression_without_lz4(tmpdir): # Check that lz4 cannot be used when dependency is not available. fname = tmpdir.join('test.nolz4').strpath data = 'test data' msg = LZ4_NOT_INSTALLED_ERROR with raises(ValueError) as excinfo: numpy_pickle.dump(data, fname, compress='lz4') excinfo.match(msg) with raises(ValueError) as excinfo: numpy_pickle.dump(data, fname + '.lz4') excinfo.match(msg) protocols = [pickle.DEFAULT_PROTOCOL] if pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL: protocols.append(pickle.HIGHEST_PROTOCOL) @with_numpy @parametrize('protocol', protocols) def test_memmap_alignment_padding(tmpdir, protocol): # Test that memmaped arrays returned by numpy.load are correctly aligned fname = tmpdir.join('test.mmap').strpath a = np.random.randn(2) numpy_pickle.dump(a, fname, protocol=protocol) memmap = numpy_pickle.load(fname, mmap_mode='r') assert isinstance(memmap, np.memmap) np.testing.assert_array_equal(a, memmap) assert ( memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) assert memmap.flags.aligned array_list = [ np.random.randn(2), np.random.randn(2), np.random.randn(2), np.random.randn(2) ] # On Windows OSError 22 if reusing the same path for memmap ... fname = tmpdir.join('test1.mmap').strpath numpy_pickle.dump(array_list, fname, protocol=protocol) l_reloaded = numpy_pickle.load(fname, mmap_mode='r') for idx, memmap in enumerate(l_reloaded): assert isinstance(memmap, np.memmap) np.testing.assert_array_equal(array_list[idx], memmap) assert ( memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) assert memmap.flags.aligned array_dict = { 'a0': np.arange(2, dtype=np.uint8), 'a1': np.arange(3, dtype=np.uint8), 'a2': np.arange(5, dtype=np.uint8), 'a3': np.arange(7, dtype=np.uint8), 'a4': np.arange(11, dtype=np.uint8), 'a5': np.arange(13, dtype=np.uint8), 'a6': np.arange(17, dtype=np.uint8), 'a7': np.arange(19, dtype=np.uint8), 'a8': np.arange(23, dtype=np.uint8), } # On Windows OSError 22 if reusing the same path for memmap ... fname = tmpdir.join('test2.mmap').strpath numpy_pickle.dump(array_dict, fname, protocol=protocol) d_reloaded = numpy_pickle.load(fname, mmap_mode='r') for key, memmap in d_reloaded.items(): assert isinstance(memmap, np.memmap) np.testing.assert_array_equal(array_dict[key], memmap) assert ( memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) assert memmap.flags.aligned