From ac8816b4ecded7f2613afc052da46db33f52bc05 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 06:48:37 -0700 Subject: [PATCH 01/26] REVERT: Restore dfd113c531ad0dbb301db919b8573ad12ae0bb56 for testing --- .travis.yml | 10 +- COPYING | 52 ++- Changelog | 47 +-- appveyor.yml | 5 +- bin/nib-diff | 17 - doc/source/conf.py | 2 +- doc/source/coordinate_systems.rst | 6 +- doc/source/devel/modified_images.rst | 6 +- doc/source/gettingstarted.rst | 2 +- doc/source/images_and_memory.rst | 62 ++-- doc/source/index.rst | 7 - doc/source/neuro_radio_conventions.rst | 2 +- doc/source/nibabel_images.rst | 40 +-- doc/source/old/examples.txt | 14 +- doc/source/old/orientation.txt | 2 +- doc/source/scripts/make_coord_examples.py | 6 +- nibabel/arrayproxy.py | 113 ++---- nibabel/casting.py | 3 - nibabel/cifti2/parse_cifti2.py | 8 +- nibabel/cmdline/diff.py | 365 -------------------- nibabel/cmdline/tests/test_utils.py | 170 +-------- nibabel/dataobj_images.py | 10 +- nibabel/deprecated.py | 4 +- nibabel/ecat.py | 4 - nibabel/externals/oset.py | 85 ----- nibabel/freesurfer/io.py | 44 +-- nibabel/freesurfer/mghformat.py | 9 +- nibabel/freesurfer/tests/test_io.py | 10 +- nibabel/freesurfer/tests/test_mghformat.py | 2 - nibabel/info.py | 2 +- nibabel/minc1.py | 4 - nibabel/nifti1.py | 13 +- nibabel/parrec.py | 4 - nibabel/tests/test_arrayproxy.py | 250 +++++++------- nibabel/tests/test_diff.py | 74 ---- nibabel/tests/test_floating.py | 51 ++- nibabel/tests/test_image_api.py | 378 +++++++++------------ nibabel/tests/test_nifti1.py | 23 +- nibabel/tests/test_openers.py | 2 +- nibabel/tests/test_proxy_api.py | 8 - nibabel/tests/test_scripts.py | 39 --- nibabel/tests/test_testing.py | 7 +- nibabel/volumeutils.py | 14 +- setup.py | 1 - 44 files changed, 500 insertions(+), 1477 deletions(-) delete mode 100755 bin/nib-diff delete mode 100755 nibabel/cmdline/diff.py delete mode 100644 nibabel/externals/oset.py delete mode 100644 nibabel/tests/test_diff.py diff --git a/.travis.yml b/.travis.yml index b6e69d09ba..28ac4fa5f4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,10 +4,11 @@ # for it to be on multiple physical lines, so long as you remember: - There # can't be any leading "-"s - All newlines will be removed, so use ";"s -dist: xenial -sudo: true language: python +# Run jobs on container-based infrastructure, can be overridden per job +sudo: false + cache: directories: - $HOME/.cache/pip @@ -21,14 +22,11 @@ env: - EXTRA_PIP_FLAGS="--find-links=$EXTRA_WHEELS" - PRE_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" python: + - 3.4 - 3.5 - 3.6 - - 3.7 matrix: include: - - python: 3.4 - dist: trusty - sudo: false - python: 2.7 env: - COVERAGE=1 diff --git a/COPYING b/COPYING index 6f03ba5ccd..5827950a17 100644 --- a/COPYING +++ b/COPYING @@ -121,40 +121,36 @@ Sphinx 0.6 doesn't work properly. OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -OrderedSet +Ordereddict ----------- -In ``nibabel/externals/oset.py`` +In ``nibabel/externals/ordereddict.py`` -Copied from: https://files.pythonhosted.org/packages/d6/b1/a49498c699a3fda5d635cc1fa222ffc686ea3b5d04b84a3166c4cab0c57b/oset-0.1.3.tar.gz +Copied from: https://pypi.python.org/packages/source/o/ordereddict/ordereddict-1.1.tar.gz#md5=a0ed854ee442051b249bfad0f638bbec :: - Copyright (c) 2009, Raymond Hettinger, and others All rights reserved. - - Package structured based on the one developed to odict Copyright (c) 2010, BlueDynamics Alliance, Austria - - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - - - Neither the name of the BlueDynamics Alliance nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY BlueDynamics Alliance AS IS AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - SHALL BlueDynamics Alliance BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY - OF SUCH DAMAGE. + Copyright (c) 2009 Raymond Hettinger + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation files + (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. mni_icbm152_t1_tal_nlin_asym_09a -------------------------------- diff --git a/Changelog b/Changelog index 153d522629..b96ddd40a7 100644 --- a/Changelog +++ b/Changelog @@ -24,68 +24,25 @@ Gerhard (SG) and Eric Larson (EL). References like "pr/298" refer to github pull request numbers. -2.3 (Tuesday 12 June 2018) -========================== +Upcoming Release +================ New features ------------ -* TRK <=> TCK streamlines conversion CLI tools (pr/606) (MC, reviewed by CM) * Image slicing for SpatialImages (pr/550) (CM) Enhancements ------------ * Simplfiy MGHImage and add footer fields (pr/569) (CM, reviewed by MB) -* Force sform/qform codes to be ints, rather than numpy types (pr/575) (Paul - McCarthy, reviewed by MB, CM) -* Auto-fill color table in FreeSurfer annotation file (pr/592) (Paul McCarthy, - reviewed by CM, MB) -* Set default intent code for CIFTI2 images (pr/604) (Mathias Goncalves, - reviewed by CM, Satra Ghosh, MB, Tim Coalson) -* Raise informative error on empty files (pr/611) (Pradeep Raamana, reviewed - by CM, MB) -* Accept degenerate filenames such as ``.nii`` (pr/621) (Dimitri - Papadopoulos-Orfanos, reviewed by Yaroslav Halchenko) -* Take advantage of ``IndexedGzipFile`` ``drop_handles`` flag to release - filehandles by default (pr/614) (Paul McCarthy, reviewed by CM, MB) Bug fixes --------- -* Preserve first point of `LazyTractogram` (pr/588) (MC, reviewed by Nil - Goyette, CM, MB) -* Stop adding extraneous metadata padding (pr/593) (Jon Stutters, reviewed by - CM, MB) -* Accept lower-case orientation codes in TRK files (pr/600) (Kesshi Jordan, - MB, reviewed by MB, MC, CM) -* Annotation file reading (pr/592) (Paul McCarthy, reviewed by CM, MB) -* Fix buffer size calculation in ArraySequence (pr/597) (Serge Koudoro, - reviewed by MC, MB, Eleftherios Garyfallidis, CM) -* Resolve ``UnboundLocalError`` in Python 3 (pr/607) (Jakub Kaczmarzyk, - reviewed by MB, CM) -* Do not crash on non-``ImportError`` failures in optional imports (pr/618) - (Yaroslav Halchenko, reviewed by CM) -* Return original array from ``get_fdata`` for array image, if no cast - required (pr/638, MB, reviewed by CM) Maintenance ----------- -* Use SSH address to use key-based auth (pr/587) (CM, reviewed by MB) -* Fix doctests for numpy 1.14 array printing (pr/591) (MB, reviewed by CM) -* Refactor for pydicom 1.0 API changes (pr/599) (MB, reviewed by CM) -* Increase test coverage, remove unreachable code (pr/602) (CM, reviewed by - Yaroslav Halchenko, MB) -* Move ``nib-ls`` and other programs to a new cmdline module (pr/601, pr/615) - (Chris Cheng, reviewed by MB, Yaroslav Halchenko) -* Remove deprecated numpy indexing (EL, reviewed by CM) -* Update documentation to encourage ``get_fdata`` over ``get_data`` (pr/637, - MB, reviewed by CM) API changes and deprecations ---------------------------- -* Support for ``keep_file_open = 'auto'`` as a parameter to ``Opener()`` will - be deprecated in 2.4, for removal in 3.0. Accordingly, support for - ``openers.KEEP_FILE_OPEN_DEFAULT = 'auto'`` will be dropped on the same - schedule. -* Drop-in support for ``indexed_gzip < 0.7`` has been removed. 2.2.1 (Wednesday 22 November 2017) diff --git a/appveyor.yml b/appveyor.yml index 772bfa142d..e41aee90c8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,8 +12,6 @@ environment: - PYTHON: C:\Python35-x64 - PYTHON: C:\Python36 - PYTHON: C:\Python36-x64 - - PYTHON: C:\Python37 - - PYTHON: C:\Python37-x64 install: # Prepend newly installed Python to the PATH of this build (this cannot be @@ -22,7 +20,8 @@ install: - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% # Install the dependencies of the project. - - pip install numpy scipy matplotlib nose h5py mock pydicom + - pip install numpy scipy matplotlib nose h5py mock + - pip install pydicom - pip install . - SET NIBABEL_DATA_DIR=%CD%\nibabel-data diff --git a/bin/nib-diff b/bin/nib-diff deleted file mode 100755 index 2ae66dda9d..0000000000 --- a/bin/nib-diff +++ /dev/null @@ -1,17 +0,0 @@ -#!python -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Quick diff summary for a set of neuroimaging files -""" - -from nibabel.cmdline.diff import main - -if __name__ == '__main__': - main() diff --git a/doc/source/conf.py b/doc/source/conf.py index ac95cc1dd9..cb4bb8cb49 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -87,7 +87,7 @@ # General information about the project. project = u'NiBabel' -copyright = u'2006-2018, %(MAINTAINER)s <%(AUTHOR_EMAIL)s>' % rel +copyright = u'2006-2017, %(MAINTAINER)s <%(AUTHOR_EMAIL)s>' % rel # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/source/coordinate_systems.rst b/doc/source/coordinate_systems.rst index ffb24a2e78..a45488012d 100644 --- a/doc/source/coordinate_systems.rst +++ b/doc/source/coordinate_systems.rst @@ -34,7 +34,7 @@ We can load up the EPI image to get the image data array: >>> import nibabel as nib >>> epi_img = nib.load('downloads/someones_epi.nii.gz') - >>> epi_img_data = epi_img.get_fdata() + >>> epi_img_data = epi_img.get_data() >>> epi_img_data.shape (53, 61, 33) @@ -64,7 +64,7 @@ and look at slices in the three axes: :context: >>> anat_img = nib.load('downloads/someones_anatomy.nii.gz') - >>> anat_img_data = anat_img.get_fdata() + >>> anat_img_data = anat_img.get_data() >>> anat_img_data.shape (57, 67, 56) >>> show_slices([anat_img_data[28, :, :], @@ -255,7 +255,7 @@ axes *starts* on the right, anterior, superior of the subject, rather than *ending* on the right, anterior, superior. In other words, they would use "RAS" to refer to a coordinate system we would call "LPI". To be safe, we'll call our interpretation of the RAS convention "RAS+", meaning that Right, -Anterior, Superior are all positive values on these axes. +Anterior, Posterior are all positive values on these axes. Some people also use "right" to mean the right hand side when an observer looks at the front of the scanner, from the foot the scanner bed. diff --git a/doc/source/devel/modified_images.rst b/doc/source/devel/modified_images.rst index 5b6e203a42..6230f5bb11 100644 --- a/doc/source/devel/modified_images.rst +++ b/doc/source/devel/modified_images.rst @@ -77,10 +77,10 @@ flag when anyone asks for the data, on the basis that the user may then do something to the data and you can't know if they have:: img = nibabel.load('some_image.nii') - data = img.get_fdata() + data = img.get_data() data[:] = 0 img2 = nibabel.load('some_image.nii') - assert not np.all(img2.get_fdata() == img.get_fdata()) + assert not np.all(img2.get_data() == img.get_data()) The image consists of the data, the affine and a header. In order to keep track of the header and affine, we could cache them when loading @@ -96,7 +96,7 @@ When we need to know whether the image object and image file correspond, we could check the current header and current affine (the header may be separate from the affine for an SPM Analyze image) against their cached copies, if they are the same and the 'dirty' flag has not been set by a previous call to -``get_fdata()``, we know that the image file does correspond to the image +``get_data()``, we know that the image file does correspond to the image object. This may be OK for small bits of memory like the affine and the header, diff --git a/doc/source/gettingstarted.rst b/doc/source/gettingstarted.rst index 3e328a5209..9502c09d7c 100644 --- a/doc/source/gettingstarted.rst +++ b/doc/source/gettingstarted.rst @@ -66,7 +66,7 @@ This information is available without the need to load anything of the main image data into the memory. Of course there is also access to the image data as a NumPy_ array ->>> data = img.get_fdata() +>>> data = img.get_data() >>> data.shape (128, 96, 24, 2) >>> type(data) diff --git a/doc/source/images_and_memory.rst b/doc/source/images_and_memory.rst index 2ff0de14c5..02688156e0 100644 --- a/doc/source/images_and_memory.rst +++ b/doc/source/images_and_memory.rst @@ -19,17 +19,17 @@ disk. Nibabel does not load the image array from the proxy when you ``load`` the image. It waits until you ask for the array data. The standard way to ask -for the array data is to call the ``get_fdata()`` method: +for the array data is to call the ``get_data()`` method: ->>> data = img.get_fdata() +>>> data = img.get_data() >>> data.shape (128, 96, 24, 2) -We also saw in :ref:`proxies-caching` that this call to ``get_fdata()`` will +We also saw in :ref:`proxies-caching` that this call to ``get_data()`` will (by default) load the array data into an internal image cache. The image -returns the cached copy on the next call to ``get_fdata()``: +returns the cached copy on the next call to ``get_data()``: ->>> data_again = img.get_fdata() +>>> data_again = img.get_data() >>> data is data_again True @@ -64,7 +64,7 @@ in cache, and True when it is in cache: >>> img = nib.load(example_file) >>> img.in_memory False ->>> data = img.get_fdata() +>>> data = img.get_data() >>> img.in_memory True @@ -73,10 +73,10 @@ True Using ``uncache`` ***************** -As y'all know, the proxy image has the array in cache, ``get_fdata()`` returns +As y'all know, the proxy image has the array in cache, ``get_data()`` returns the cached array: ->>> data_again = img.get_fdata() +>>> data_again = img.get_data() >>> data_again is data # same array returned from cache True @@ -85,34 +85,34 @@ You can uncache a proxy image with the ``uncache()`` method: >>> img.uncache() >>> img.in_memory False ->>> data_once_more = img.get_fdata() +>>> data_once_more = img.get_data() >>> data_once_more is data # a new copy read from disk False ``uncache()`` has no effect if the image is an array image, or if the cache is already empty. -You need to be careful when you modify arrays returned by ``get_fdata()`` on +You need to be careful when you modify arrays returned by ``get_data()`` on proxy images, because ``uncache`` will then change the result you get back -from ``get_fdata()``: +from ``get_data()``: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_fdata() # array cached and returned +>>> data = proxy_img.get_data() # array cached and returned >>> data[0, 0, 0, 0] -0.0 +0 >>> data[0, 0, 0, 0] = 99 # modify returned array ->>> data_again = proxy_img.get_fdata() # return cached array +>>> data_again = proxy_img.get_data() # return cached array >>> data_again[0, 0, 0, 0] # cached array modified -99.0 +99 So far the proxy image behaves the same as an array image. ``uncache()`` has no effect on an array image, but it does have an effect on the returned array of a proxy image: >>> proxy_img.uncache() # cached array discarded from proxy image ->>> data_once_more = proxy_img.get_fdata() # new copy of array loaded +>>> data_once_more = proxy_img.get_data() # new copy of array loaded >>> data_once_more[0, 0, 0, 0] # array modifications discarded -0.0 +0 ************* Saving memory @@ -126,8 +126,8 @@ use the ``uncache()`` method: >>> img.uncache() -Use the array proxy instead of ``get_fdata()`` -============================================== +Use the array proxy instead of ``get_data()`` +============================================= The ``dataobj`` property of a proxy image is an array proxy. We can ask the proxy to return the array directly by passing ``dataobj`` to the numpy @@ -145,25 +145,25 @@ This also works for array images, because ``np.asarray`` returns the array: >>> type(data_array) <... 'numpy.ndarray'> -If you want to avoid caching you can avoid ``get_fdata()`` and always use +If you want to avoid caching you can avoid ``get_data()`` and always use ``np.asarray(img.dataobj)``. -Use the ``caching`` keyword to ``get_fdata()`` -============================================== +Use the ``caching`` keyword to ``get_data()`` +============================================= -The default behavior of the ``get_fdata()`` function is to always fill the +The default behavior of the ``get_data()`` function is to always fill the cache, if it is empty. This corresponds to the default ``'fill'`` value to the ``caching`` keyword. So, this: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_fdata() # default caching='fill' +>>> data = proxy_img.get_data() # default caching='fill' >>> proxy_img.in_memory True is the same as this: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_fdata(caching='fill') +>>> data = proxy_img.get_data(caching='fill') >>> proxy_img.in_memory True @@ -171,21 +171,21 @@ Sometimes you may want to avoid filling the cache, if it is empty. In this case, you can use ``caching='unchanged'``: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_fdata(caching='unchanged') +>>> data = proxy_img.get_data(caching='unchanged') >>> proxy_img.in_memory False ``caching='unchanged'`` will leave the cache full if it is already full. ->>> data = proxy_img.get_fdata(caching='fill') +>>> data = proxy_img.get_data(caching='fill') >>> proxy_img.in_memory True ->>> data = proxy_img.get_fdata(caching='unchanged') +>>> data = proxy_img.get_data(caching='unchanged') >>> proxy_img.in_memory True -See the :meth:`get_fdata() docstring -` for more detail. +See the :meth:`get_data() docstring +` for more detail. ********************** Saving time and memory @@ -202,7 +202,7 @@ For example, let us say you only wanted the second volume from the example dataset. You could do this: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_fdata() +>>> data = proxy_img.get_data() >>> data.shape (128, 96, 24, 2) >>> vol1 = data[..., 1] diff --git a/doc/source/index.rst b/doc/source/index.rst index b4f2ebd596..f623c931d2 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -80,13 +80,6 @@ contributed code and discussion (in rough order of appearance): * Mark Hymers * Jasper J.F. van den Bosch * Bennet Fauber -* Kesshi Jordan -* Jon Stutters -* Serge Koudoro -* Christopher P. Cheng -* Mathias Goncalves -* Jakub Kaczmarzyk -* Dimitri Papadopoulos Orfanos License reprise =============== diff --git a/doc/source/neuro_radio_conventions.rst b/doc/source/neuro_radio_conventions.rst index a9a51ab2c2..f88c31ddf8 100644 --- a/doc/source/neuro_radio_conventions.rst +++ b/doc/source/neuro_radio_conventions.rst @@ -101,7 +101,7 @@ showing the middle slice of :download:`an image [ 0. , 2.75, 0. , -91. ], [ 0. , 0. , 2.75, -91. ], [ 0. , 0. , 0. , 1. ]]) - >>> img_data = img.get_fdata() + >>> img_data = img.get_data() >>> a_slice = img_data[:, :, 28] >>> # Need transpose to put first axis left-right, second bottom-top >>> plt.imshow(a_slice.T, cmap="gray", origin="lower") # doctest: +SKIP diff --git a/doc/source/nibabel_images.rst b/doc/source/nibabel_images.rst index f14debcc93..ffdef7fbdd 100644 --- a/doc/source/nibabel_images.rst +++ b/doc/source/nibabel_images.rst @@ -220,39 +220,21 @@ False Getting the image data the easy way =================================== -For either type of image (array or proxy) you can always get the data with the -:meth:`get_fdata() ` method. +For either type of image (array or proxy) you can always get the data with +the :meth:`get_data() ` method. -For the array image, ``get_fdata()`` just returns the data array, if it's already the required floating point type (default 64-bit float). If it isn't that type, ``get_fdata()`` casts it to one: +For the array image, ``get_data()`` just returns the data array: ->>> image_data = array_img.get_fdata() +>>> image_data = array_img.get_data() >>> image_data.shape (2, 3, 4) ->>> image_data.dtype == np.dtype(np.float64) +>>> image_data is array_data True -The cast to floating point means the array is not the one attached to the image: - ->>> image_data is array_img.dataobj -False - -Here's an image backed by a floating point array: - ->>> farray_img = nib.Nifti1Image(image_data.astype(np.float64), affine) ->>> farray_data = farray_img.get_fdata() ->>> farray_data.dtype == np.dtype(np.float64) -True - -There was no cast, so the array returned is exactly the array attached to the -image: - ->>> farray_data is farray_img.dataobj -True - -For the proxy image, the ``get_fdata()`` method fetches the array data from +For the proxy image, the ``get_data()`` method fetches the array data from disk using the proxy, and returns the array. ->>> image_data = img.get_fdata() +>>> image_data = img.get_data() >>> image_data.shape (128, 96, 24, 2) @@ -267,12 +249,12 @@ Proxies and caching =================== You may not want to keep loading the image data off disk every time -you call ``get_fdata()`` on a proxy image. By default, when you call -``get_fdata()`` the first time on a proxy image, the image object keeps a -cached copy of the loaded array. The next time you call ``img.get_fdata()``, +you call ``get_data()`` on a proxy image. By default, when you call +``get_data()`` the first time on a proxy image, the image object keeps a +cached copy of the loaded array. The next time you call ``img.get_data()``, the image returns the array from cache rather than loading it from disk again. ->>> data_again = img.get_fdata() +>>> data_again = img.get_data() The returned data is the same (cached) copy we returned before: diff --git a/doc/source/old/examples.txt b/doc/source/old/examples.txt index 19a44d9cb0..b84f5441bf 100644 --- a/doc/source/old/examples.txt +++ b/doc/source/old/examples.txt @@ -108,7 +108,7 @@ previously created in a separate file. First, we open the file: Now we select the first ten volumes and store them to another file, while preserving as much header information as possible - >>> nim2 = nib.Nifti1Image(nim.get_fdata()[..., :10], + >>> nim2 = nib.Nifti1Image(nim.get_data()[..., :10], ... nim.get_affine(), ... nim.header) >>> print nim2.header['dim'] @@ -127,7 +127,7 @@ Linear detrending of timeseries (SciPy module is required for this example) =========================================================================== Let's load another 4d NIfTI file and perform a linear detrending, by fitting -a straight line to the timeseries of each voxel and subtract that fit from +a straight line to the timeseries of each voxel and substract that fit from the data. Although this might sound complicated at first, thanks to the excellent SciPy module it is just a few lines of code. For this example we will first create a NIfTI image with just a single voxel and 50 timepoints @@ -139,11 +139,15 @@ will first create a NIfTI image with just a single voxel and 50 timepoints >>> print nim.header['dim'] [ 4 1 1 1 50 1 1 1] -Remember that the array has the time axis as its first dimension (in contrast -to the NIfTI file where it is the 4th). +Depending on the datatype of the input image the detrending process might +change the datatype from integer to float. As operations that change the +(binary) size of the NIfTI image are not supported, we need to make a copy +of the data and later create a new NIfTI image. Remember that the array has the +time axis as its first dimension (in contrast to the NIfTI file where it is +the 4th). >>> from scipy import signal - >>> data_detrended = signal.detrend(nim.get_fdata(), axis=0) + >>> data_detrended = signal.detrend(nim.get_data(), axis=0) Finally, create a new NIfTI image using header information from the original source image. diff --git a/doc/source/old/orientation.txt b/doc/source/old/orientation.txt index ef231f7e95..4efbe73db1 100644 --- a/doc/source/old/orientation.txt +++ b/doc/source/old/orientation.txt @@ -85,7 +85,7 @@ the affine after loading, as in:: img = nibabel.load('some_image.img') aff = img.get_affine() x_flipper = np.diag([-1,1,1,1]) - lr_img = nibabel.Nifti1Image(img.get_fdata(), np.dot(x_flipper, aff), img.header) + lr_img = nibabel.Nifti1Image(img.get_data, np.dot(x_flipper, aff), img.header) Affines for Analyze, SPM analyze, and NIFTI ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/scripts/make_coord_examples.py b/doc/source/scripts/make_coord_examples.py index 9079cea141..790c8b7671 100644 --- a/doc/source/scripts/make_coord_examples.py +++ b/doc/source/scripts/make_coord_examples.py @@ -50,7 +50,7 @@ t1_img, t2_img = imgs # Make fake localizer -data = t1_img.get_fdata() +data = t1_img.get_data() n_x, n_y, n_z = img.shape mid_x = round(n_x / 2) @@ -171,7 +171,7 @@ def vx2mm(pts): # resample, preserving affine epi_cmap = nca.vox2mni(epi_vox2mm) epi = rsm.resample(t2_img, epi_cmap, np.eye(4), epi_vox_shape) -epi_data = epi.get_fdata() +epi_data = epi.get_data() # Do the same kind of thing for the anatomical scan anat_vox_sizes = [2.75, 2.75, 2.75] anat_scale = npl.inv(np.diag(anat_vox_sizes + [1])) @@ -183,7 +183,7 @@ def vx2mm(pts): [data.shape[0], anat_x_len, anat_y_len], anat_vox_sizes)) anat_cmap = nca.vox2mni(anat_vox2mm) anat = rsm.resample(t1_img, anat_cmap, np.eye(4), anat_vox_shape) -anat_data = anat.get_fdata() +anat_data = anat.get_data() save_plot() nipy.save_image(epi, 'someones_epi.nii.gz', dtype_from='uint8') diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index c74386b0ac..b3faa21a1f 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -42,14 +42,11 @@ used for the lifetime of the ``ArrayProxy``. It should be set to one of ``True``, ``False``, or ``'auto'``. -Management of file handles will be performed either by ``ArrayProxy`` objects, -or by the ``indexed_gzip`` package if it is used. - -If this flag is set to ``True``, a single file handle is created and used. If -``False``, a new file handle is created every time the image is accessed. For -gzip files, if ``'auto'``, and the optional ``indexed_gzip`` dependency is -present, a single file handle is created and persisted. If ``indexed_gzip`` is -not available, behaviour is the same as if ``keep_file_open is False``. +If ``True``, a single file handle is created and used. If ``False``, a new +file handle is created every time the image is accessed. For gzip files, if +``'auto'``, and the optional ``indexed_gzip`` dependency is present, a single +file handle is created and persisted. If ``indexed_gzip`` is not available, +behaviour is the same as if ``keep_file_open is False``. If this is set to any other value, attempts to create an ``ArrayProxy`` without specifying the ``keep_file_open`` flag will result in a ``ValueError`` being @@ -163,10 +160,8 @@ def __init__(self, file_like, spec, mmap=True, keep_file_open=None): # Permit any specifier that can be interpreted as a numpy dtype self._dtype = np.dtype(self._dtype) self._mmap = mmap - # Flags to keep track of whether a single ImageOpener is created, and - # whether a single underlying file handle is created. - self._keep_file_open, self._persist_opener = \ - self._should_keep_file_open(file_like, keep_file_open) + self._keep_file_open = self._should_keep_file_open(file_like, + keep_file_open) self._lock = RLock() def __del__(self): @@ -189,64 +184,16 @@ def __setstate__(self, state): self._lock = RLock() def _should_keep_file_open(self, file_like, keep_file_open): - """Called by ``__init__``. - - This method determines how to manage ``ImageOpener`` instances, - and the underlying file handles - the behaviour depends on: - - - whether ``file_like`` is an an open file handle, or a path to a - ``'.gz'`` file, or a path to a non-gzip file. - - whether ``indexed_gzip`` is present (see - :attr:`.openers.HAVE_INDEXED_GZIP`). - - An ``ArrayProxy`` object uses two internal flags to manage - ``ImageOpener`` instances and underlying file handles. - - - The ``_persist_opener`` flag controls whether a single - ``ImageOpener`` should be created and used for the lifetime of - this ``ArrayProxy``, or whether separate ``ImageOpener`` instances - should be created on each file access. - - - The ``_keep_file_open`` flag controls qwhether the underlying file - handle should be kept open for the lifetime of this - ``ArrayProxy``, or whether the file handle should be (re-)opened - and closed on each file access. - - The internal ``_keep_file_open`` flag is only relevant if - ``file_like`` is a ``'.gz'`` file, and the ``indexed_gzip`` library is - present. - - This method returns the values to be used for the internal - ``_persist_opener`` and ``_keep_file_open`` flags; these values are - derived according to the following rules: + """Called by ``__init__``, and used to determine the final value of + ``keep_file_open``. - 1. If ``file_like`` is a file(-like) object, both flags are set to - ``False``. + The return value is derived from these rules: - 2. If ``keep_file_open`` (as passed to :meth:``__init__``) is - ``True``, both internal flags are set to ``True``. - - 3. If ``keep_file_open`` is ``False``, but ``file_like`` is not a path - to a ``.gz`` file or ``indexed_gzip`` is not present, both flags - are set to ``False``. - - 4. If ``keep_file_open`` is ``False``, ``file_like`` is a path to a - ``.gz`` file, and ``indexed_gzip`` is present, ``_persist_opener`` - is set to ``True``, and ``_keep_file_open`` is set to ``False``. - In this case, file handle management is delegated to the - ``indexed_gzip`` library. - - 5. If ``keep_file_open`` is ``'auto'``, ``file_like`` is a path to a - ``.gz`` file, and ``indexed_gzip`` is present, both internal flags - are set to ``True``. - - 6. If ``keep_file_open`` is ``'auto'``, and ``file_like`` is not a - path to a ``.gz`` file, or ``indexed_gzip`` is not present, both - internal flags are set to ``False``. - - Note that a value of ``'auto'`` for ``keep_file_open`` will become - deprecated behaviour in version 2.4.0, and support for ``'auto'`` will - be removed in version 3.0.0. + - If ``file_like`` is a file(-like) object, ``False`` is returned. + Otherwise, ``file_like`` is assumed to be a file name. + - If ``keep_file_open`` is ``auto``, and ``indexed_gzip`` is + not available, ``False`` is returned. + - Otherwise, the value of ``keep_file_open`` is returned unchanged. Parameters ---------- @@ -259,10 +206,8 @@ def _should_keep_file_open(self, file_like, keep_file_open): Returns ------- - A tuple containing: - - ``keep_file_open`` flag to control persistence of file handles - - ``persist_opener`` flag to control persistence of ``ImageOpener`` - objects. + The value of ``keep_file_open`` that will be used by this + ``ArrayProxy``, and passed through to ``ImageOpener`` instances. """ if keep_file_open is None: keep_file_open = KEEP_FILE_OPEN_DEFAULT @@ -271,15 +216,12 @@ def _should_keep_file_open(self, file_like, keep_file_open): '\'auto\', True, False}') # file_like is a handle - keep_file_open is irrelevant if hasattr(file_like, 'read') and hasattr(file_like, 'seek'): - return False, False - # if the file is a gzip file, and we have_indexed_gzip, - have_igzip = openers.HAVE_INDEXED_GZIP and file_like.endswith('.gz') - if keep_file_open == 'auto': - return have_igzip, have_igzip - elif keep_file_open: - return True, True - else: - return False, have_igzip + return False + # don't have indexed_gzip - auto -> False + if keep_file_open == 'auto' and not (openers.HAVE_INDEXED_GZIP and + file_like.endswith('.gz')): + return False + return keep_file_open @property @deprecate_with_version('ArrayProxy.header deprecated', '2.2', '3.0') @@ -290,10 +232,6 @@ def header(self): def shape(self): return self._shape - @property - def ndim(self): - return len(self.shape) - @property def dtype(self): return self._dtype @@ -327,14 +265,13 @@ def _get_fileobj(self): A newly created ``ImageOpener`` instance, or an existing one, which provides access to the file. """ - if self._persist_opener: + if self._keep_file_open: if not hasattr(self, '_opener'): self._opener = openers.ImageOpener( self.file_like, keep_open=self._keep_file_open) yield self._opener else: - with openers.ImageOpener( - self.file_like, keep_open=False) as opener: + with openers.ImageOpener(self.file_like) as opener: yield opener def get_unscaled(self): diff --git a/nibabel/casting.py b/nibabel/casting.py index ebdd96d550..0ad0d5a5ca 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -268,9 +268,6 @@ def type_info(np_type): # 80) but in calculations nexp in fact appears to be 11 as for float64 ret.update(dict(width=width)) return ret - if vals == (105, 11, 16): # correctly detected double double - ret.update(dict(nmant=nmant, nexp=nexp, width=width)) - return ret # Oh dear, we don't recognize the type information. Try some known types # and then give up. At this stage we're expecting exotic longdouble or # their complex equivalent. diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index b9919eb2e1..4b3d5fa267 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -517,28 +517,28 @@ def flush_chardata(self): # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) vertices = self.struct_state[-1] - vertices.extend(np.loadtxt(c, dtype=np.int)) + vertices.extend(np.genfromtxt(c, dtype=np.int)) c.close() elif self.write_to == 'VoxelIndices': # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) parent = self.struct_state[-1] - parent.voxel_indices_ijk.extend(np.loadtxt(c, dtype=np.int).reshape(-1, 3)) + parent.voxel_indices_ijk.extend(np.genfromtxt(c, dtype=np.int).reshape(-1, 3)) c.close() elif self.write_to == 'VertexIndices': # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) index = self.struct_state[-1] - index.extend(np.loadtxt(c, dtype=np.int)) + index.extend(np.genfromtxt(c, dtype=np.int)) c.close() elif self.write_to == 'TransformMatrix': # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) transform = self.struct_state[-1] - transform.matrix = np.loadtxt(c, dtype=np.float) + transform.matrix = np.genfromtxt(c, dtype=np.float) c.close() elif self.write_to == 'Label': diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py deleted file mode 100755 index 4b8b69381c..0000000000 --- a/nibabel/cmdline/diff.py +++ /dev/null @@ -1,365 +0,0 @@ -#!python -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Quick summary of the differences among a set of neuroimaging files -""" -from __future__ import division, print_function, absolute_import - -import re -import sys -from collections import OrderedDict -from optparse import OptionParser, Option - -import numpy as np - -import nibabel as nib -import nibabel.cmdline.utils -import hashlib -import os - - -def get_opt_parser(): - # use module docstring for help output - p = OptionParser( - usage="%s [OPTIONS] [FILE ...]\n\n" % sys.argv[0] + __doc__, - version="%prog " + nib.__version__) - - p.add_options([ - Option("-v", "--verbose", action="count", - dest="verbose", default=0, - help="Make more noise. Could be specified multiple times"), - - Option("-H", "--header-fields", - dest="header_fields", default='all', - help="Header fields (comma separated) to be printed as well" - " (if present)"), - - Option("--ma", "--data-max-abs-diff", - dest="data_max_abs_diff", - type=float, - default=0.0, - help="Maximal absolute difference in data between files" - " to tolerate."), - - Option("--mr", "--data-max-rel-diff", - dest="data_max_rel_diff", - type=float, - default=0.0, - help="Maximal relative difference in data between files to" - " tolerate. If --data-max-abs-diff is also specified," - " only the data points with absolute difference greater" - " than that value would be considered for relative" - " difference check."), - Option("--dt", "--datatype", - dest="dtype", - default=np.float64, - help="Enter a numpy datatype such as 'float32'.") - ]) - - return p - - -def are_values_different(*values): - """Generically compare values, return True if different - - Note that comparison is targetting reporting of comparison of the headers - so has following specifics: - - even a difference in data types is considered a difference, i.e. 1 != 1.0 - - nans are considered to be the "same", although generally nan != nan - """ - value0 = values[0] - - # to not recompute over again - if isinstance(value0, np.ndarray): - try: - # np.asarray for elderly numpys, e.g. 1.7.1 where for - # degenerate arrays (shape ()) it would return a pure scalar - value0_nans = np.asanyarray(np.isnan(value0)) - value0_nonnans = np.asanyarray(np.logical_not(value0_nans)) - # if value0_nans.size == 1: - # import pdb; pdb.set_trace() - if not np.any(value0_nans): - value0_nans = None - except TypeError as exc: - str_exc = str(exc) - # Not implemented in numpy 1.7.1 - if "not supported" in str_exc or "ot implemented" in str_exc: - value0_nans = None - else: - raise - - for value in values[1:]: - if type(value0) != type(value): # if types are different, then we consider them different - return True - elif isinstance(value0, np.ndarray): - if value0.dtype != value.dtype or \ - value0.shape != value.shape: - return True - # there might be nans and they need special treatment - if value0_nans is not None: - value_nans = np.isnan(value) - if np.any(value0_nans != value_nans): - return True - if np.any(value0[value0_nonnans] != value[value0_nonnans]): - return True - elif np.any(value0 != value): - return True - elif value0 is np.nan: - if value is not np.nan: - return True - elif value0 != value: - return True - - return False - - -def get_headers_diff(file_headers, names=None): - """Get difference between headers - - Parameters - ---------- - file_headers: list of actual headers (dicts) from files - names: list of header fields to test - - Returns - ------- - dict - str: list for each header field which differs, return list of - values per each file - """ - difference = OrderedDict() - fields = names - - if names is None: - fields = file_headers[0].keys() - - # for each header field - for field in fields: - values = [header.get(field) for header in file_headers] # get corresponding value - - # if these values are different, store them in a dictionary - if are_values_different(*values): - difference[field] = values - - return difference - - -def get_data_hash_diff(files, dtype=np.float64): - """Get difference between md5 values of data - - Parameters - ---------- - files: list of actual files - - Returns - ------- - list - np.array: md5 values of respective files - """ - - md5sums = [ - hashlib.md5(np.ascontiguousarray(nib.load(f).get_fdata(dtype=dtype))).hexdigest() - for f in files - ] - - if len(set(md5sums)) == 1: - return [] - - return md5sums - - -def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): - """Get difference between data - - Parameters - ---------- - files: list of (str or ndarray) - If list of strings is provided -- they must be existing file names - max_abs: float, optional - Maximal absolute difference to tolerate. - max_rel: float, optional - Maximal relative (`abs(diff)/mean(diff)`) difference to tolerate. - If `max_abs` is specified, then those data points with lesser than that - absolute difference, are not considered for relative difference testing - dtype: np, optional - Datatype to be used when extracting data from files - - Returns - ------- - diffs: OrderedDict - An ordered dict with a record per each file which has differences - with other files subsequent detected. Each record is a list of - difference records, one per each file pair. - Each difference record is an Ordered Dict with possible keys - 'abs' or 'rel' showing maximal absolute or relative differences - in the file or the record ('CMP': 'incompat') if file shapes - are incompatible. - """ - - # we are doomed to keep them in RAM now - data = [f if isinstance(f, np.ndarray) else nib.load(f).get_fdata(dtype=dtype) - for f in files] - diffs = OrderedDict() - for i, d1 in enumerate(data[:-1]): - # populate empty entries for non-compared - diffs1 = [None] * (i + 1) - - for j, d2 in enumerate(data[i + 1:], i + 1): - - if d1.shape == d2.shape: - abs_diff = np.abs(d1 - d2) - mean_abs = (np.abs(d1) + np.abs(d2)) * 0.5 - candidates = np.logical_or(mean_abs != 0, abs_diff != 0) - - if max_abs: - candidates[abs_diff <= max_abs] = False - - max_abs_diff = np.max(abs_diff) - if np.any(candidates): - rel_diff = abs_diff[candidates] / mean_abs[candidates] - if max_rel: - sub_thr = rel_diff <= max_rel - # Since we operated on sub-selected values already, we need - # to plug them back in - candidates[ - tuple((indexes[sub_thr] for indexes in np.where(candidates))) - ] = False - max_rel_diff = np.max(rel_diff) - else: - max_rel_diff = 0 - - if np.any(candidates): - - diff_rec = OrderedDict() # so that abs goes before relative - - diff_rec['abs'] = max_abs_diff.astype(dtype) - diff_rec['rel'] = max_rel_diff.astype(dtype) - diffs1.append(diff_rec) - else: - diffs1.append(None) - - else: - diffs1.append({'CMP': "incompat"}) - - if any(diffs1): - - diffs['DATA(diff %d:)' % (i + 1)] = diffs1 - - return diffs - - -def display_diff(files, diff): - """Format header differences into a nice string - - Parameters - ---------- - files: list of files that were compared so we can print their names - diff: dict of different valued header fields - - Returns - ------- - str - string-formatted table of differences - """ - output = "" - field_width = "{:<15}" - filename_width = "{:<53}" - value_width = "{:<55}" - - output += "These files are different.\n" - output += field_width.format('Field/File') - - for i, f in enumerate(files, 1): - output += "%d:%s" % (i, filename_width.format(os.path.basename(f))) - - output += "\n" - - for key, value in diff.items(): - output += field_width.format(key) - - for item in value: - if isinstance(item, dict): - item_str = ', '.join('%s: %s' % i for i in item.items()) - elif item is None: - item_str = '-' - else: - item_str = str(item) - # Value might start/end with some invisible spacing characters so we - # would "condition" it on both ends a bit - item_str = re.sub('^[ \t]+', '<', item_str) - item_str = re.sub('[ \t]+$', '>', item_str) - # and also replace some other invisible symbols with a question - # mark - item_str = re.sub('[\x00]', '?', item_str) - output += value_width.format(item_str) - - output += "\n" - - return output - - -def diff(files, header_fields='all', data_max_abs_diff=None, - data_max_rel_diff=None, dtype=np.float64): - assert len(files) >= 2, "Please enter at least two files" - - file_headers = [nib.load(f).header for f in files] - - # signals "all fields" - if header_fields == 'all': - # TODO: header fields might vary across file types, - # thus prior sensing would be needed - header_fields = file_headers[0].keys() - else: - header_fields = header_fields.split(',') - - diff = get_headers_diff(file_headers, header_fields) - - data_md5_diffs = get_data_hash_diff(files, dtype) - if data_md5_diffs: - # provide details, possibly triggering the ignore of the difference - # in data - data_diffs = get_data_diff(files, - max_abs=data_max_abs_diff, - max_rel=data_max_rel_diff, - dtype=dtype) - if data_diffs: - diff['DATA(md5)'] = data_md5_diffs - diff.update(data_diffs) - - return diff - - -def main(args=None, out=None): - """Getting the show on the road""" - - out = out or sys.stdout - parser = get_opt_parser() - (opts, files) = parser.parse_args(args) - - nibabel.cmdline.utils.verbose_level = opts.verbose - - if nibabel.cmdline.utils.verbose_level < 3: - # suppress nibabel format-compliance warnings - nib.imageglobals.logger.level = 50 - - files_diff = diff( - files, - header_fields=opts.header_fields, - data_max_abs_diff=opts.data_max_abs_diff, - data_max_rel_diff=opts.data_max_rel_diff, - dtype=opts.dtype - ) - - if files_diff: - out.write(display_diff(files, files_diff)) - raise SystemExit(1) - else: - out.write("These files are identical.\n") - raise SystemExit(0) diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index e701925870..8dcd09e261 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -5,17 +5,13 @@ Test running scripts """ -from nose.tools import assert_equal -from numpy.testing import assert_raises +from numpy.testing import (assert_almost_equal, + assert_array_equal) + +from nose.tools import (assert_true, assert_false, assert_raises, + assert_equal, assert_not_equal) -import nibabel as nib -import numpy as np from nibabel.cmdline.utils import * -from nibabel.cmdline.diff import * -from os.path import (join as pjoin) -from nibabel.testing import data_path -from collections import OrderedDict -from six import StringIO def test_table2string(): @@ -46,159 +42,3 @@ def get_test(self): assert_equal(safe_get(test, "test"), 2) assert_equal(safe_get(test, "failtest"), "-") - - -def test_get_headers_diff(): - fnames = [pjoin(data_path, f) - for f in ('standard.nii.gz', 'example4d.nii.gz')] - actual_difference = get_headers_diff([nib.load(f).header for f in fnames]) - expected_difference = OrderedDict([ - ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), - ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), - ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), - np.array([ 4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), - ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), - ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), - ("pixdim", [np.array([ 1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( - [ -1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, - 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), - ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), - ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), - ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), - ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), - np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), - ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("quatern_b", [np.array(0.0).astype(dtype="float32"), - np.array(-1.9451068140294884e-26).astype(dtype="float32")]), - ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), - ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), - ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), - ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), - ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), - ("srow_x", [np.array([ 1., 0., 0., 0.]).astype(dtype="float32"), - np.array([ -2.00000000e+00, 6.71471565e-19, 9.08102451e-18, - 1.17855103e+02]).astype(dtype="float32")]), - ("srow_y", [np.array([ 0., 3., 0., 0.]).astype(dtype="float32"), - np.array([ -6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype(dtype="float32")]), - ("srow_z", [np.array([ 0., 0., 2., 0.]).astype(dtype="float32"), - np.array([ 8.25548089e-18, 3.23207617e-01, 2.17108178e+00, - -7.24879837e+00]).astype(dtype="float32")])]) - - np.testing.assert_equal(actual_difference, expected_difference) - - -def test_display_diff(): - bogus_names = ["hellokitty.nii.gz", "privettovarish.nii.gz"] - - dict_values = OrderedDict([ - ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), - ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]) - ]) - - expected_output = "These files are different.\n" + "Field/File 1:hellokitty.nii.gz" \ - " " \ - "2:privettovarish.nii.gz \n" \ - "datatype " \ - "2 " \ - "4 \n" \ - "bitpix " \ - "8 16" \ - " " \ - "\n" - - assert_equal(display_diff(bogus_names, dict_values), expected_output) - - -def test_get_data_diff(): - # testing for identical files specifically as md5 may vary by computer - test_names = [pjoin(data_path, f) - for f in ('standard.nii.gz', 'standard.nii.gz')] - assert_equal(get_data_hash_diff(test_names), []) - - # testing the maximum relative and absolute differences' different use cases - test_array = np.arange(16).reshape(4, 4) - test_array_2 = np.arange(1, 17).reshape(4, 4) - test_array_3 = np.arange(2, 18).reshape(4, 4) - test_array_4 = np.arange(100).reshape(10, 10) - test_array_5 = np.arange(64).reshape(8, 8) - - # same shape, 2 files - assert_equal(get_data_diff([test_array, test_array_2]), - OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)])])])) - - # same shape, 3 files - assert_equal(get_data_diff([test_array, test_array_2, test_array_3]), - OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)]), - OrderedDict([('abs', 2), ('rel', 2.0)])]), - ('DATA(diff 2:)', [None, None, - OrderedDict([('abs', 1), ('rel', 0.66666666666666663)])])])) - - # same shape, 2 files, modified maximum abs/rel - assert_equal(get_data_diff([test_array, test_array_2], max_abs=2, max_rel=2), OrderedDict()) - - # different shape, 2 files - assert_equal(get_data_diff([test_array_2, test_array_4]), - OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}])])) - - # different shape, 3 files - assert_equal(get_data_diff([test_array_4, test_array_5, test_array_2]), - OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}]), - ('DATA(diff 2:)', [None, None, {'CMP': 'incompat'}])])) - - test_return = get_data_diff([test_array, test_array_2], dtype=np.float32) - assert_equal(type(test_return['DATA(diff 1:)'][1]['abs']), np.float32) - assert_equal(type(test_return['DATA(diff 1:)'][1]['rel']), np.float32) - - test_return_2 = get_data_diff([test_array, test_array_2, test_array_3]) - assert_equal(type(test_return_2['DATA(diff 1:)'][1]['abs']), np.float64) - assert_equal(type(test_return_2['DATA(diff 1:)'][1]['rel']), np.float64) - assert_equal(type(test_return_2['DATA(diff 2:)'][2]['abs']), np.float64) - assert_equal(type(test_return_2['DATA(diff 2:)'][2]['rel']), np.float64) - - -def test_main(): - test_names = [pjoin(data_path, f) - for f in ('standard.nii.gz', 'example4d.nii.gz')] - expected_difference = OrderedDict([ - ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), - ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), - ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), - ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), - ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), - ("pixdim", [np.array([1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( - [-1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, - 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), - ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), - ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), - ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), - ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), - np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), - ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("quatern_b", [np.array(0.0).astype(dtype="float32"), - np.array(-1.9451068140294884e-26).astype(dtype="float32")]), - ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), - ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), - ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), - ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), - ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), - ("srow_x", [np.array([1., 0., 0., 0.]).astype(dtype="float32"), - np.array([-2.00000000e+00, 6.71471565e-19, 9.08102451e-18, - 1.17855103e+02]).astype(dtype="float32")]), - ("srow_y", [np.array([0., 3., 0., 0.]).astype(dtype="float32"), - np.array([-6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype( - dtype="float32")]), - ("srow_z", [np.array([0., 0., 2., 0.]).astype(dtype="float32"), - np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e+00, - -7.24879837e+00]).astype(dtype="float32")]), - ('DATA(md5)', ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'])]) - - with assert_raises(SystemExit): - np.testing.assert_equal(main(test_names, StringIO()), expected_difference) - - test_names_2 = [pjoin(data_path, f) for f in ('standard.nii.gz', 'standard.nii.gz')] - - with assert_raises(SystemExit): - assert_equal(main(test_names_2, StringIO()), "These files are identical.") diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 86185a7aef..b88f02dd21 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -28,8 +28,8 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): ---------- dataobj : object Object containg image data. It should be some object that retuns an - array from ``np.asanyarray``. It should have ``shape`` and ``ndim`` - attributes or properties + array from ``np.asanyarray``. It should have a ``shape`` attribute + or property header : None or mapping or header instance, optional metadata for this image format extra : None or mapping, optional @@ -344,7 +344,7 @@ def get_fdata(self, caching='fill', dtype=np.float64): if self._fdata_cache is not None: if self._fdata_cache.dtype.type == dtype.type: return self._fdata_cache - data = np.asanyarray(self._dataobj).astype(dtype, copy=False) + data = np.asanyarray(self._dataobj).astype(dtype) if caching == 'fill': self._fdata_cache = data return data @@ -392,10 +392,6 @@ def uncache(self): def shape(self): return self._dataobj.shape - @property - def ndim(self): - return self._dataobj.ndim - @deprecate_with_version('get_shape method is deprecated.\n' 'Please use the ``img.shape`` property ' 'instead.', diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index c8abee91a0..814e7b85cd 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -52,8 +52,8 @@ class FutureWarningMixin(object): >>> with warnings.catch_warnings(record=True) as warns: ... d = D() - ... warns[0].message.args[0] - "Please, don't use this class" + ... warns[0].message + FutureWarning("Please, don't use this class",) """ warn_message = 'This class will be removed in future versions' diff --git a/nibabel/ecat.py b/nibabel/ecat.py index c2d343f739..3c0957e11d 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -680,10 +680,6 @@ def __init__(self, subheader): def shape(self): return self._shape - @property - def ndim(self): - return len(self.shape) - @property def is_proxy(self): return True diff --git a/nibabel/externals/oset.py b/nibabel/externals/oset.py deleted file mode 100644 index 6bc6ed67a3..0000000000 --- a/nibabel/externals/oset.py +++ /dev/null @@ -1,85 +0,0 @@ -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""OrderedSet implementation - -Borrowed from https://pypi.org/project/oset/ -Copyright (c) 2009, Raymond Hettinger, and others All rights reserved. -License: BSD-3 -""" - -from __future__ import absolute_import - -from collections import MutableSet - -KEY, PREV, NEXT = range(3) - - -class OrderedSet(MutableSet): - - def __init__(self, iterable=None): - self.end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.map = {} # key --> [key, prev, next] - if iterable is not None: - self |= iterable - - def __len__(self): - return len(self.map) - - def __contains__(self, key): - return key in self.map - - def __getitem__(self, key): - return list(self)[key] - - def add(self, key): - if key not in self.map: - end = self.end - curr = end[PREV] - curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end] - - def discard(self, key): - if key in self.map: - key, prev, next = self.map.pop(key) - prev[NEXT] = next - next[PREV] = prev - - def __iter__(self): - end = self.end - curr = end[NEXT] - while curr is not end: - yield curr[KEY] - curr = curr[NEXT] - - def __reversed__(self): - end = self.end - curr = end[PREV] - while curr is not end: - yield curr[KEY] - curr = curr[PREV] - - def pop(self, last=True): - if not self: - raise KeyError('set is empty') - key = next(reversed(self)) if last else next(iter(self)) - self.discard(key) - return key - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, list(self)) - - def __eq__(self, other): - if isinstance(other, OrderedSet): - return len(self) == len(other) and list(self) == list(other) - return set(self) == set(other) - - def __del__(self): - self.clear() # remove circular references \ No newline at end of file diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index edce19c6cd..4212574ef9 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -81,24 +81,24 @@ def _read_volume_info(fobj): return volume_info -def _pack_rgb(rgb): - """Pack an RGB sequence into a single integer. +def _pack_rgba(rgba): + """Pack an RGBA sequence into a single integer. Used by :func:`read_annot` and :func:`write_annot` to generate "annotation values" for a Freesurfer ``.annot`` file. Parameters ---------- - rgb : ndarray, shape (n, 3) - RGB colors + rgba : ndarray, shape (n, 4) + RGBA colors Returns ------- out : ndarray, shape (n, 1) Annotation values for each color. """ - bitshifts = 2 ** np.array([[0], [8], [16]], dtype=rgb.dtype) - return rgb.dot(bitshifts) + bitshifts = 2 ** np.array([[0], [8], [16], [24]], dtype=rgba.dtype) + return rgba.dot(bitshifts) def read_geometry(filepath, read_metadata=False, read_stamp=False): @@ -333,13 +333,9 @@ def read_annot(filepath, orig_ids=False): Annotation file format versions 1 and 2 are supported, corresponding to the "old-style" and "new-style" color table layout. - Note that the output color table ``ctab`` is in RGBT form, where T - (transparency) is 255 - alpha. - See: * https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles#Annotation * https://github.com/freesurfer/freesurfer/blob/dev/matlab/read_annotation.m - * https://github.com/freesurfer/freesurfer/blob/8b88b34/utils/colortab.c Parameters ---------- @@ -356,7 +352,7 @@ def read_annot(filepath, orig_ids=False): Annotation id at each vertex. If a vertex does not belong to any label and orig_ids=False, its id will be set to -1. ctab : ndarray, shape (n_labels, 5) - RGBT + label id colortable array. + RGBA + label id colortable array. names : list of str (python 2), list of bytes (python 3) The names of the labels. The length of the list is n_labels. """ @@ -388,7 +384,7 @@ def read_annot(filepath, orig_ids=False): ctab, names = _read_annot_ctab_new_format(fobj, -n_entries) # generate annotation values for each LUT entry - ctab[:, [4]] = _pack_rgb(ctab[:, :3]) + ctab[:, [4]] = _pack_rgba(ctab[:, :4]) if not orig_ids: ord = np.argsort(ctab[:, -1]) @@ -401,9 +397,6 @@ def read_annot(filepath, orig_ids=False): def _read_annot_ctab_old_format(fobj, n_entries): """Read in an old-style Freesurfer color table from `fobj`. - Note that the output color table ``ctab`` is in RGBT form, where T - (transparency) is 255 - alpha. - This function is used by :func:`read_annot`. Parameters @@ -419,7 +412,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): ------- ctab : ndarray, shape (n_entries, 5) - RGBT colortable array - the last column contains all zeros. + RGBA colortable array - the last column contains all zeros. names : list of str The names of the labels. The length of the list is n_entries. """ @@ -437,7 +430,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): name_length = np.fromfile(fobj, dt, 1)[0] name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] names.append(name) - # read RGBT for this entry + # read RGBA for this entry ctab[i, :4] = np.fromfile(fobj, dt, 4) return ctab, names @@ -446,9 +439,6 @@ def _read_annot_ctab_old_format(fobj, n_entries): def _read_annot_ctab_new_format(fobj, ctab_version): """Read in a new-style Freesurfer color table from `fobj`. - Note that the output color table ``ctab`` is in RGBT form, where T - (transparency) is 255 - alpha. - This function is used by :func:`read_annot`. Parameters @@ -464,7 +454,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): ------- ctab : ndarray, shape (n_labels, 5) - RGBT colortable array - the last column contains all zeros. + RGBA colortable array - the last column contains all zeros. names : list of str The names of the labels. The length of the list is n_labels. """ @@ -490,7 +480,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): name_length = np.fromfile(fobj, dt, 1)[0] name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] names.append(name) - # RGBT + # RGBA ctab[idx, :4] = np.fromfile(fobj, dt, 4) return ctab, names @@ -499,13 +489,9 @@ def _read_annot_ctab_new_format(fobj, ctab_version): def write_annot(filepath, labels, ctab, names, fill_ctab=True): """Write out a "new-style" Freesurfer annotation file. - Note that the color table ``ctab`` is in RGBT form, where T (transparency) - is 255 - alpha. - See: * https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles#Annotation * https://github.com/freesurfer/freesurfer/blob/dev/matlab/write_annotation.m - * https://github.com/freesurfer/freesurfer/blob/8b88b34/utils/colortab.c Parameters ---------- @@ -514,7 +500,7 @@ def write_annot(filepath, labels, ctab, names, fill_ctab=True): labels : ndarray, shape (n_vertices,) Annotation id at each vertex. ctab : ndarray, shape (n_labels, 5) - RGBT + label id colortable array. + RGBA + label id colortable array. names : list of str The names of the labels. The length of the list is n_labels. fill_ctab : {True, False} optional @@ -537,8 +523,8 @@ def write_string(s): # Generate annotation values for each ctab entry if fill_ctab: - ctab = np.hstack((ctab[:, :4], _pack_rgb(ctab[:, :3]))) - elif not np.array_equal(ctab[:, [4]], _pack_rgb(ctab[:, :3])): + ctab = np.hstack((ctab[:, :4], _pack_rgba(ctab[:, :4]))) + elif not np.array_equal(ctab[:, [4]], _pack_rgba(ctab[:, :4])): warnings.warn('Annotation values in {} will be incorrect'.format( filepath)) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index bf92bd962c..927d6126c0 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -276,15 +276,10 @@ def set_zooms(self, zooms): ndims = self._ndims() if len(zooms) > ndims: raise HeaderDataError('Expecting %d zoom values' % ndims) - if np.any(zooms[:3] <= 0): - raise HeaderDataError('Spatial (first three) zooms must be ' - 'positive; got {!r}' - ''.format(tuple(zooms[:3]))) + if np.any(zooms <= 0): + raise HeaderDataError('zooms must be positive') hdr['delta'] = zooms[:3] if len(zooms) == 4: - if zooms[3] < 0: - raise HeaderDataError('TR must be non-negative; got {!r}' - ''.format(zooms[3])) hdr['tr'] = zooms[3] def get_data_shape(self): diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 1b6065f351..83da38ed20 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -16,7 +16,7 @@ from .. import (read_geometry, read_morph_data, read_annot, read_label, write_geometry, write_morph_data, write_annot) -from ..io import _pack_rgb +from ..io import _pack_rgba from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data from ...fileslice import strided_scalar @@ -236,7 +236,8 @@ def test_read_write_annot(): # Generate the annotation values for each LUT entry rgbal[:, 4] = (rgbal[:, 0] + rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16)) + rgbal[:, 2] * (2 ** 16) + + rgbal[:, 3] * (2 ** 24)) annot_path = 'c.annot' with InTemporaryDirectory(): write_annot(annot_path, labels, rgbal, names, fill_ctab=False) @@ -286,7 +287,8 @@ def test_write_annot_fill_ctab(): rgbal = np.hstack((rgba, np.zeros((nlabels, 1), dtype=np.int32))) rgbal[:, 4] = (rgbal[:, 0] + rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16)) + rgbal[:, 2] * (2 ** 16) + + rgbal[:, 3] * (2 ** 24)) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) assert_true( @@ -305,7 +307,7 @@ def gen_old_annot_file(fpath, nverts, labels, rgba, names): dt = '>i' vdata = np.zeros((nverts, 2), dtype=dt) vdata[:, 0] = np.arange(nverts) - vdata[:, [1]] = _pack_rgb(rgba[labels, :3]) + vdata[:, [1]] = _pack_rgba(rgba[labels, :]) fbytes = b'' # number of vertices fbytes += struct.pack(dt, nverts) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 47e54080c3..776c461e18 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -159,8 +159,6 @@ def test_set_zooms(): (1, 1, 1, 1, 5)): with assert_raises(HeaderDataError): h.set_zooms(zooms) - # smoke test for tr=0 - h.set_zooms((1, 1, 1, 0)) def bad_dtype_mgh(): diff --git a/nibabel/info.py b/nibabel/info.py index fd9c955246..83982dd8f2 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -17,7 +17,7 @@ # We usually use `dev` as `_version_extra` to label this as a development # (pre-release) version. _version_major = 2 -_version_minor = 4 +_version_minor = 3 _version_micro = 0 _version_extra = 'dev' # _version_extra = '' diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 57042f32f0..5eb077ada0 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -252,10 +252,6 @@ def __init__(self, minc_file): def shape(self): return self._shape - @property - def ndim(self): - return len(self.shape) - @property def is_proxy(self): return True diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 056d0dbee9..24c1808df5 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1573,23 +1573,14 @@ def set_slice_times(self, slice_times): so_recoder = self._field_recoders['slice_code'] labels = so_recoder.value_set('label') labels.remove('unknown') - - matching_labels = [] for label in labels: if np.all(st_order == self._slice_time_order( label, n_timed)): - matching_labels.append(label) - - if not matching_labels: + break + else: raise HeaderDataError('slice ordering of %s fits ' 'with no known scheme' % st_order) - if len(matching_labels) > 1: - warnings.warn( - 'Multiple slice orders satisfy: %s. Choosing the first one' - % ', '.join(matching_labels) - ) - label = matching_labels[0] # Set values into header hdr['slice_start'] = slice_start hdr['slice_end'] = slice_end diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 87e1ac81e6..5fd460b4e1 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -622,10 +622,6 @@ def __init__(self, file_like, header, mmap=True, scaling='dv'): def shape(self): return self._shape - @property - def ndim(self): - return len(self.shape) - @property def dtype(self): return self._dtype diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 187d5940df..fd6cfd5a44 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -349,135 +349,52 @@ def __init__(self, *args, **kwargs): def _count_ImageOpeners(proxy, data, voxels): CountingImageOpener.num_openers = 0 - # expected data is defined in the test_keep_file_open_* tests for i in range(voxels.shape[0]): x, y, z = [int(c) for c in voxels[i, :]] assert proxy[x, y, z] == x * 100 + y * 10 + z return CountingImageOpener.num_openers -@contextlib.contextmanager -def patch_keep_file_open_default(value): - # Patch arrayproxy.KEEP_FILE_OPEN_DEFAULT with the given value - with mock.patch('nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT', value): - yield - - def test_keep_file_open_true_false_invalid(): # Test the behaviour of the keep_file_open __init__ flag, when it is set to - # True or False. Expected behaviour is as follows: - # keep_open | igzip present | persist ImageOpener | igzip.drop_handles - # | and is gzip file | | - # ----------|------------------|---------------------|------------------- - # False | False | False | n/a - # False | True | True | True - # True | False | True | n/a - # True | True | True | False - # 'auto' | False | False | n/a - # 'auto' | True | True | False - # - # Each test tuple contains: - # - file type - gzipped ('gz') or not ('bin'), or an open file handle - # ('open') - # - keep_file_open value passed to ArrayProxy - # - whether or not indexed_gzip is present - # - expected value for internal ArrayProxy._persist_opener flag - # - expected value for internal ArrayProxy._keep_file_open flag - tests = [ - # open file handle - kfo and have_igzip are both irrelevant - ('open', False, False, False, False), - ('open', False, True, False, False), - ('open', True, False, False, False), - ('open', True, True, False, False), - ('open', 'auto', False, False, False), - ('open', 'auto', True, False, False), - # non-gzip file - have_igzip is irrelevant, decision should be made - # solely from kfo flag - ('bin', False, False, False, False), - ('bin', False, True, False, False), - ('bin', True, False, True, True), - ('bin', True, True, True, True), - ('bin', 'auto', False, False, False), - ('bin', 'auto', True, False, False), - # gzip file. If igzip is present, we persist the ImageOpener. If kfo - # is 'auto': - # - if igzip is present, kfo -> True - # - otherwise, kfo -> False - ('gz', False, False, False, False), - ('gz', False, True, True, False), - ('gz', True, False, True, True), - ('gz', True, True, True, True), - ('gz', 'auto', False, False, False), - ('gz', 'auto', True, True, True)] - + # True or False. + CountingImageOpener.num_openers = 0 + fname = 'testdata' dtype = np.float32 data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) voxels = np.random.randint(0, 10, (10, 3)) - - for test in tests: - filetype, kfo, have_igzip, exp_persist, exp_kfo = test - with InTemporaryDirectory(), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener), \ - patch_indexed_gzip(have_igzip): - fname = 'testdata.{}'.format(filetype) - # create the test data file - if filetype == 'gz': - with gzip.open(fname, 'wb') as fobj: - fobj.write(data.tostring(order='F')) - else: - with open(fname, 'wb') as fobj: - fobj.write(data.tostring(order='F')) - # pass in a file name or open file handle. If the latter, we open - # two file handles, because we're going to create two proxies - # below. - if filetype == 'open': - fobj1 = open(fname, 'rb') - fobj2 = open(fname, 'rb') - else: - fobj1 = fname - fobj2 = fname - try: - proxy = ArrayProxy(fobj1, ((10, 10, 10), dtype), - keep_file_open=kfo) - # We also test that we get the same behaviour when the - # KEEP_FILE_OPEN_DEFAULT flag is changed - with patch_keep_file_open_default(kfo): - proxy_def = ArrayProxy(fobj2, ((10, 10, 10), dtype)) - # check internal flags - assert proxy._persist_opener == exp_persist - assert proxy._keep_file_open == exp_kfo - assert proxy_def._persist_opener == exp_persist - assert proxy_def._keep_file_open == exp_kfo - # check persist_opener behaviour - whether one imageopener is - # created for the lifetime of the ArrayProxy, or one is - # created on each access - if exp_persist: - assert _count_ImageOpeners(proxy, data, voxels) == 1 - assert _count_ImageOpeners(proxy_def, data, voxels) == 1 - else: - assert _count_ImageOpeners(proxy, data, voxels) == 10 - assert _count_ImageOpeners(proxy_def, data, voxels) == 10 - # if indexed_gzip is active, check that the file object was - # created correctly - the _opener.fobj will be a - # MockIndexedGzipFile, defined in test_openers.py - if filetype == 'gz' and have_igzip: - assert proxy._opener.fobj._drop_handles == (not exp_kfo) - # if we were using an open file handle, check that the proxy - # didn't close it - if filetype == 'open': - assert not fobj1.closed - assert not fobj2.closed - finally: - del proxy - del proxy_def - if filetype == 'open': - fobj1.close() - fobj2.close() - # Test invalid values of keep_file_open with InTemporaryDirectory(): - fname = 'testdata' with open(fname, 'wb') as fobj: fobj.write(data.tostring(order='F')) + # Test that ArrayProxy(keep_file_open=True) only creates one file + # handle, and that ArrayProxy(keep_file_open=False) creates a file + # handle on every data access. + with mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): + proxy_no_kfp = ArrayProxy(fname, ((10, 10, 10), dtype), + keep_file_open=False) + assert not proxy_no_kfp._keep_file_open + assert _count_ImageOpeners(proxy_no_kfp, data, voxels) == 10 + proxy_kfp = ArrayProxy(fname, ((10, 10, 10), dtype), + keep_file_open=True) + assert proxy_kfp._keep_file_open + assert _count_ImageOpeners(proxy_kfp, data, voxels) == 1 + del proxy_kfp + del proxy_no_kfp + # Test that the keep_file_open flag has no effect if an open file + # handle is passed in + with open(fname, 'rb') as fobj: + for kfo in (True, False, 'auto'): + proxy = ArrayProxy(fobj, ((10, 10, 10), dtype), + keep_file_open=kfo) + assert proxy._keep_file_open is False + for i in range(voxels.shape[0]): + x, y, z = [int(c) for c in voxels[i, :]] + assert proxy[x, y, z] == x * 100 + y * 10 + z + assert not fobj.closed + del proxy + assert not fobj.closed + assert fobj.closed + # Test invalid values of keep_file_open with assert_raises(ValueError): ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=55) with assert_raises(ValueError): @@ -486,6 +403,109 @@ def test_keep_file_open_true_false_invalid(): ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='cauto') +def test_keep_file_open_auto(): + # Test the behaviour of the keep_file_open __init__ flag, when it is set to + # 'auto'. + # if indexed_gzip is present, the ArrayProxy should persist its ImageOpener. + # Otherwise the ArrayProxy should drop openers. + dtype = np.float32 + data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) + voxels = np.random.randint(0, 10, (10, 3)) + with InTemporaryDirectory(): + fname = 'testdata.gz' + with gzip.open(fname, 'wb') as fobj: + fobj.write(data.tostring(order='F')) + # If have_indexed_gzip, then the arrayproxy should create one + # ImageOpener + with patch_indexed_gzip(True), \ + mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): + CountingImageOpener.num_openers = 0 + proxy = ArrayProxy(fname, ((10, 10, 10), dtype), + keep_file_open='auto') + assert proxy._keep_file_open == 'auto' + assert _count_ImageOpeners(proxy, data, voxels) == 1 + # If no have_indexed_gzip, then keep_file_open should be False + with patch_indexed_gzip(False), \ + mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): + CountingImageOpener.num_openers = 0 + proxy = ArrayProxy(fname, ((10, 10, 10), dtype), + keep_file_open='auto') + assert proxy._keep_file_open is False + assert _count_ImageOpeners(proxy, data, voxels) == 10 + # If not a gzip file, keep_file_open should be False + fname = 'testdata' + with open(fname, 'wb') as fobj: + fobj.write(data.tostring(order='F')) + # regardless of whether indexed_gzip is present or not + with patch_indexed_gzip(True), \ + mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): + CountingImageOpener.num_openers = 0 + proxy = ArrayProxy(fname, ((10, 10, 10), dtype), + keep_file_open='auto') + assert proxy._keep_file_open is False + assert _count_ImageOpeners(proxy, data, voxels) == 10 + with patch_indexed_gzip(False), \ + mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): + CountingImageOpener.num_openers = 0 + proxy = ArrayProxy(fname, ((10, 10, 10), dtype), + keep_file_open='auto') + assert proxy._keep_file_open is False + assert _count_ImageOpeners(proxy, data, voxels) == 10 + + +@contextlib.contextmanager +def patch_keep_file_open_default(value): + # Patch arrayproxy.KEEP_FILE_OPEN_DEFAULT with the given value + with mock.patch('nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT', value): + yield + + +def test_keep_file_open_default(): + # Test the behaviour of the keep_file_open __init__ flag, when the + # arrayproxy.KEEP_FILE_OPEN_DEFAULT value is changed + dtype = np.float32 + data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) + with InTemporaryDirectory(): + fname = 'testdata.gz' + with gzip.open(fname, 'wb') as fobj: + fobj.write(data.tostring(order='F')) + # If KEEP_FILE_OPEN_DEFAULT is False, ArrayProxy instances should + # interpret keep_file_open as False + with patch_keep_file_open_default(False): + with patch_indexed_gzip(False): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert proxy._keep_file_open is False + with patch_indexed_gzip(True): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert proxy._keep_file_open is False + # If KEEP_FILE_OPEN_DEFAULT is True, ArrayProxy instances should + # interpret keep_file_open as True + with patch_keep_file_open_default(True): + with patch_indexed_gzip(False): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert proxy._keep_file_open is True + with patch_indexed_gzip(True): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert proxy._keep_file_open is True + # If KEEP_FILE_OPEN_DEFAULT is auto, ArrayProxy instances should + # interpret it as auto if indexed_gzip is present, False otherwise. + with patch_keep_file_open_default('auto'): + with patch_indexed_gzip(False): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert proxy._keep_file_open is False + with patch_indexed_gzip(True): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert proxy._keep_file_open == 'auto' + # KEEP_FILE_OPEN_DEFAULT=any other value should cuse an error to be + # raised + with patch_keep_file_open_default('badvalue'): + assert_raises(ValueError, ArrayProxy, fname, ((10, 10, 10), + dtype)) + with patch_keep_file_open_default(None): + assert_raises(ValueError, ArrayProxy, fname, ((10, 10, 10), + dtype)) + + def test_pickle_lock(): # Test that ArrayProxy can be pickled, and that thread lock is created diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py deleted file mode 100644 index 4f99ca145f..0000000000 --- a/nibabel/tests/test_diff.py +++ /dev/null @@ -1,74 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" Test diff -""" -from __future__ import division, print_function, absolute_import - -from os.path import (dirname, join as pjoin, abspath) -import numpy as np - - -DATA_PATH = abspath(pjoin(dirname(__file__), 'data')) - -from nibabel.cmdline.diff import are_values_different - - -def test_diff_values_int(): - large = 10**30 - assert not are_values_different(0, 0) - assert not are_values_different(1, 1) - assert not are_values_different(large, large) - assert are_values_different(0, 1) - assert are_values_different(1, 2) - assert are_values_different(1, large) - - -def test_diff_values_float(): - assert not are_values_different(0., 0.) - assert not are_values_different(0., 0., 0.) # can take more - assert not are_values_different(1.1, 1.1) - assert are_values_different(0., 1.1) - assert are_values_different(0., 0, 1.1) - assert are_values_different(1., 2.) - - -def test_diff_values_mixed(): - assert are_values_different(1.0, 1) - assert are_values_different(1.0, "1") - assert are_values_different(1, "1") - assert are_values_different(1, None) - assert are_values_different(np.ndarray([0]), 'hey') - assert not are_values_different(None, None) - - -def test_diff_values_array(): - from numpy import nan, array, inf - a_int = array([1, 2]) - a_float = a_int.astype(float) - - assert are_values_different(a_int, a_float) - assert are_values_different(a_int, a_int, a_float) - assert are_values_different(np.arange(3), np.arange(1, 4)) - assert are_values_different(np.arange(3), np.arange(4)) - assert are_values_different(np.arange(4), np.arange(4).reshape((2, 2))) - # no broadcasting should kick in - shape difference - assert are_values_different(array([1]), array([1, 1])) - assert not are_values_different(a_int, a_int) - assert not are_values_different(a_float, a_float) - - # nans - we consider them "the same" for the purpose of these comparisons - assert not are_values_different(nan, nan) - assert not are_values_different(nan, nan, nan) - assert are_values_different(nan, nan, 1) - assert are_values_different(1, nan, nan) - assert not are_values_different(array([nan, nan]), array([nan, nan])) - assert not are_values_different(array([nan, nan]), array([nan, nan]), array([nan, nan])) - assert not are_values_different(array([nan, 1]), array([nan, 1])) - assert are_values_different(array([nan, nan]), array([nan, 1])) - assert are_values_different(array([0, nan]), array([nan, 0])) - assert are_values_different(array([1, 2, 3, nan]), array([nan, 3, 5, 4])) - assert are_values_different(nan, 1.0) - assert are_values_different(array([1, 2, 3, nan]), array([3, 4, 5, nan])) - # and some inf should not be a problem - assert not are_values_different(array([0, inf]), array([0, inf])) - assert are_values_different(array([0, inf]), array([inf, 0])) diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 96376270b1..3022265df4 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -30,16 +30,6 @@ LD_INFO = type_info(np.longdouble) -def dtt2dict(dtt): - """ Create info dictionary from numpy type - """ - info = np.finfo(dtt) - return dict(min=info.min, max=info.max, - nexp=info.nexp, nmant=info.nmant, - minexp=info.minexp, maxexp=info.maxexp, - width=np.dtype(dtt).itemsize) - - def test_type_info(): # Test routine to get min, max, nmant, nexp for dtt in np.sctypes['int'] + np.sctypes['uint']: @@ -52,35 +42,42 @@ def test_type_info(): assert_equal(infod['min'].dtype.type, dtt) assert_equal(infod['max'].dtype.type, dtt) for dtt in IEEE_floats + [np.complex64, np.complex64]: + info = np.finfo(dtt) infod = type_info(dtt) - assert_equal(dtt2dict(dtt), infod) + assert_equal(dict(min=info.min, max=info.max, + nexp=info.nexp, nmant=info.nmant, + minexp=info.minexp, maxexp=info.maxexp, + width=np.dtype(dtt).itemsize), + infod) assert_equal(infod['min'].dtype.type, dtt) assert_equal(infod['max'].dtype.type, dtt) # What is longdouble? - ld_dict = dtt2dict(np.longdouble) - dbl_dict = dtt2dict(np.float64) + info = np.finfo(np.longdouble) + dbl_info = np.finfo(np.float64) infod = type_info(np.longdouble) - vals = tuple(ld_dict[k] for k in ('nmant', 'nexp', 'width')) + width = np.dtype(np.longdouble).itemsize + vals = (info.nmant, info.nexp, width) # Information for PPC head / tail doubles from: # https://developer.apple.com/library/mac/#documentation/Darwin/Reference/Manpages/man3/float.3.html if vals in ((52, 11, 8), # longdouble is same as double (63, 15, 12), (63, 15, 16), # intel 80 bit (112, 15, 16), # real float128 (106, 11, 16)): # PPC head, tail doubles, expected values - pass - elif vals == (105, 11, 16): # bust info for PPC head / tail longdoubles - # min and max broken, copy from infod - ld_dict.update({k: infod[k] for k in ('min', 'max')}) - elif vals == (1, 1, 16): # another bust info for PPC head / tail longdoubles - ld_dict = dbl_dict.copy() - ld_dict.update(dict(nmant=106, width=16)) + assert_equal(dict(min=info.min, max=info.max, + minexp=info.minexp, maxexp=info.maxexp, + nexp=info.nexp, nmant=info.nmant, width=width), + infod) + elif vals == (1, 1, 16): # bust info for PPC head / tail longdoubles + assert_equal(dict(min=dbl_info.min, max=dbl_info.max, + minexp=-1022, maxexp=1024, + nexp=11, nmant=106, width=16), + infod) elif vals == (52, 15, 12): - width = ld_dict['width'] - ld_dict = dbl_dict.copy() - ld_dict['width'] = width + exp_res = type_info(np.float64) + exp_res['width'] = width + assert_equal(exp_res, infod) else: - raise ValueError("Unexpected float type {} to test".format(np.longdouble)) - assert_equal(ld_dict, infod) + raise ValueError("Unexpected float type to test") def test_nmant(): @@ -106,7 +103,7 @@ def test_check_nmant_nexp(): # Check against type_info for t in ok_floats(): ti = type_info(t) - if ti['nmant'] not in (105, 106): # This check does not work for PPC double pair + if ti['nmant'] != 106: # This check does not work for PPC double pair assert_true(_check_nmant(t, ti['nmant'])) # Test fails for longdouble after blacklisting of OSX powl as of numpy # 1.12 - see https://github.com/numpy/numpy/issues/8307 diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index ba51878715..01b9ff4fdb 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -27,7 +27,6 @@ import warnings from functools import partial -from itertools import product from six import string_types import numpy as np @@ -196,23 +195,156 @@ class DataInterfaceMixin(GetSetDtypeMixin): Use this mixin if your image has a ``dataobj`` property that contains an array or an array-like thing. """ - meth_names = ('get_fdata', 'get_data') - def validate_data_interface(self, imaker, params): # Check get data returns array, and caches img = imaker() assert_equal(img.shape, img.dataobj.shape) - assert_equal(img.ndim, len(img.shape)) assert_data_similar(img.dataobj, params) - for meth_name in self.meth_names: + meth_names = ('get_fdata', 'get_data') + for meth_name in meth_names: if params['is_proxy']: - self._check_proxy_interface(imaker, meth_name) - else: # Array image - self._check_array_interface(imaker, meth_name) + # Parameters assert this is an array proxy + img = imaker() + # Does is_proxy agree? + assert_true(is_proxy(img.dataobj)) + # Confirm it is not a numpy array + assert_false(isinstance(img.dataobj, np.ndarray)) + # Confirm it can be converted to a numpy array with asarray + proxy_data = np.asarray(img.dataobj) + proxy_copy = proxy_data.copy() + # Not yet cached, proxy image: in_memory is False + assert_false(img.in_memory) + # Load with caching='unchanged' + method = getattr(img, meth_name) + data = method(caching='unchanged') + # Still not cached + assert_false(img.in_memory) + # Default load, does caching + data = method() + # Data now cached. in_memory is True if either of the get_data + # or get_fdata caches are not-None + assert_true(img.in_memory) + # We previously got proxy_data from disk, but data, which we + # have just fetched, is a fresh copy. + assert_false(proxy_data is data) + # asarray on dataobj, applied above, returns same numerical + # values. This might not be true get_fdata operating on huge + # integers, but lets assume that's not true here. + assert_array_equal(proxy_data, data) + # Now caching='unchanged' does nothing, returns cached version + data_again = method(caching='unchanged') + assert_true(data is data_again) + # caching='fill' does nothing because the cache is already full + data_yet_again = method(caching='fill') + assert_true(data is data_yet_again) + # changing array data does not change proxy data, or reloaded + # data + data[:] = 42 + assert_array_equal(proxy_data, proxy_copy) + assert_array_equal(np.asarray(img.dataobj), proxy_copy) + # It does change the result of get_data + assert_array_equal(method(), 42) + # until we uncache + img.uncache() + # Which unsets in_memory + assert_false(img.in_memory) + assert_array_equal(method(), proxy_copy) + # Check caching='fill' does cache data + img = imaker() + method = getattr(img, meth_name) + assert_false(img.in_memory) + data = method(caching='fill') + assert_true(img.in_memory) + data_again = method() + assert_true(data is data_again) + # Check the interaction of caching with get_data, get_fdata. + # Caching for `get_data` should have no effect on caching for + # get_fdata, and vice versa. + # Modify the cached data + data[:] = 43 + # Load using the other data fetch method + other_name = set(meth_names).difference({meth_name}).pop() + other_method = getattr(img, other_name) + other_data = other_method() + # We get the original data, not the modified cache + assert_array_equal(proxy_data, other_data) + assert_false(np.all(data == other_data)) + # We can modify the other cache, without affecting the first + other_data[:] = 44 + assert_array_equal(other_method(), 44) + assert_false(np.all(method() == other_method())) + # Check that caching refreshes for new floating point type. + if meth_name == 'get_fdata': + img.uncache() + fdata = img.get_fdata() + assert_equal(fdata.dtype, np.float64) + fdata[:] = 42 + fdata_back = img.get_fdata() + assert_array_equal(fdata_back, 42) + assert_equal(fdata_back.dtype, np.float64) + # New data dtype, no caching, doesn't use or alter cache + fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') + # We get back the original read, not the modified cache + assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) + assert_equal(fdata_new_dt.dtype, np.float32) + # The original cache stays in place, for default float64 + assert_array_equal(img.get_fdata(), 42) + # And for not-default float32, because we haven't cached + fdata_new_dt[:] = 43 + fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') + assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) + # Until we reset with caching='fill', at which point we + # drop the original float64 cache, and have a float32 cache + fdata_new_dt = img.get_fdata(caching='fill', dtype='f4') + assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) + # We're using the cache, for dtype='f4' reads + fdata_new_dt[:] = 43 + assert_array_equal(img.get_fdata(dtype='f4'), 43) + # We've lost the cache for float64 reads (no longer 42) + assert_array_equal(img.get_fdata(), proxy_data) + else: # not proxy + for caching in (None, 'fill', 'unchanged'): + img = imaker() + method = getattr(img, meth_name) + get_data_func = (method if caching is None else + partial(method, caching=caching)) + assert_true(isinstance(img.dataobj, np.ndarray)) + assert_true(img.in_memory) + data = get_data_func() + # Returned data same object as underlying dataobj if using + # old ``get_data`` method, or using newer ``get_fdata`` + # method, where original array was float64. + dataobj_is_data = (img.dataobj.dtype == np.float64 + or method == img.get_data) + # Set something to the output array. + data[:] = 42 + get_result_changed = np.all(get_data_func() == 42) + assert_equal(get_result_changed, + dataobj_is_data or caching != 'unchanged') + if dataobj_is_data: + assert_true(data is img.dataobj) + # Changing array data changes + # data + assert_array_equal(np.asarray(img.dataobj), 42) + # Uncache has no effect + img.uncache() + assert_array_equal(get_data_func(), 42) + else: + assert_false(data is img.dataobj) + assert_false(np.all(np.asarray(img.dataobj) == 42)) + # Uncache does have an effect + img.uncache() + assert_false(np.all(get_data_func() == 42)) + # in_memory is always true for array images, regardless of + # cache state. + img.uncache() + assert_true(img.in_memory) + # Values to get_(f)data caching parameter must be 'fill' or + # 'unchanged' + assert_raises(ValueError, img.get_data, caching='something') + assert_raises(ValueError, img.get_fdata, caching='something') # Data shape is same as image shape - assert_equal(img.shape, getattr(img, meth_name)().shape) - # Data ndim is same as image ndim - assert_equal(img.ndim, getattr(img, meth_name)().ndim) + assert_equal(img.shape, method().shape) # Values to get_data caching parameter must be 'fill' or # 'unchanged' assert_raises(ValueError, img.get_data, caching='something') @@ -222,159 +354,6 @@ def validate_data_interface(self, imaker, params): # So is in_memory assert_raises(AttributeError, setattr, img, 'in_memory', False) - def _check_proxy_interface(self, imaker, meth_name): - # Parameters assert this is an array proxy - img = imaker() - # Does is_proxy agree? - assert_true(is_proxy(img.dataobj)) - # Confirm it is not a numpy array - assert_false(isinstance(img.dataobj, np.ndarray)) - # Confirm it can be converted to a numpy array with asarray - proxy_data = np.asarray(img.dataobj) - proxy_copy = proxy_data.copy() - # Not yet cached, proxy image: in_memory is False - assert_false(img.in_memory) - # Load with caching='unchanged' - method = getattr(img, meth_name) - data = method(caching='unchanged') - # Still not cached - assert_false(img.in_memory) - # Default load, does caching - data = method() - # Data now cached. in_memory is True if either of the get_data - # or get_fdata caches are not-None - assert_true(img.in_memory) - # We previously got proxy_data from disk, but data, which we - # have just fetched, is a fresh copy. - assert_false(proxy_data is data) - # asarray on dataobj, applied above, returns same numerical - # values. This might not be true get_fdata operating on huge - # integers, but lets assume that's not true here. - assert_array_equal(proxy_data, data) - # Now caching='unchanged' does nothing, returns cached version - data_again = method(caching='unchanged') - assert_true(data is data_again) - # caching='fill' does nothing because the cache is already full - data_yet_again = method(caching='fill') - assert_true(data is data_yet_again) - # changing array data does not change proxy data, or reloaded - # data - data[:] = 42 - assert_array_equal(proxy_data, proxy_copy) - assert_array_equal(np.asarray(img.dataobj), proxy_copy) - # It does change the result of get_data - assert_array_equal(method(), 42) - # until we uncache - img.uncache() - # Which unsets in_memory - assert_false(img.in_memory) - assert_array_equal(method(), proxy_copy) - # Check caching='fill' does cache data - img = imaker() - method = getattr(img, meth_name) - assert_false(img.in_memory) - data = method(caching='fill') - assert_true(img.in_memory) - data_again = method() - assert_true(data is data_again) - # Check the interaction of caching with get_data, get_fdata. - # Caching for `get_data` should have no effect on caching for - # get_fdata, and vice versa. - # Modify the cached data - data[:] = 43 - # Load using the other data fetch method - other_name = set(self.meth_names).difference({meth_name}).pop() - other_method = getattr(img, other_name) - other_data = other_method() - # We get the original data, not the modified cache - assert_array_equal(proxy_data, other_data) - assert_false(np.all(data == other_data)) - # We can modify the other cache, without affecting the first - other_data[:] = 44 - assert_array_equal(other_method(), 44) - assert_false(np.all(method() == other_method())) - if meth_name != 'get_fdata': - return - # Check that caching refreshes for new floating point type. - img.uncache() - fdata = img.get_fdata() - assert_equal(fdata.dtype, np.float64) - fdata[:] = 42 - fdata_back = img.get_fdata() - assert_array_equal(fdata_back, 42) - assert_equal(fdata_back.dtype, np.float64) - # New data dtype, no caching, doesn't use or alter cache - fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') - # We get back the original read, not the modified cache - assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) - assert_equal(fdata_new_dt.dtype, np.float32) - # The original cache stays in place, for default float64 - assert_array_equal(img.get_fdata(), 42) - # And for not-default float32, because we haven't cached - fdata_new_dt[:] = 43 - fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') - assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) - # Until we reset with caching='fill', at which point we - # drop the original float64 cache, and have a float32 cache - fdata_new_dt = img.get_fdata(caching='fill', dtype='f4') - assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) - # We're using the cache, for dtype='f4' reads - fdata_new_dt[:] = 43 - assert_array_equal(img.get_fdata(dtype='f4'), 43) - # We've lost the cache for float64 reads (no longer 42) - assert_array_equal(img.get_fdata(), proxy_data) - - def _check_array_interface(self, imaker, meth_name): - for caching in (None, 'fill', 'unchanged'): - self._check_array_caching(imaker, meth_name, caching) - - def _check_array_caching(self, imaker, meth_name, caching): - img = imaker() - method = getattr(img, meth_name) - get_data_func = (method if caching is None else - partial(method, caching=caching)) - assert_true(isinstance(img.dataobj, np.ndarray)) - assert_true(img.in_memory) - data = get_data_func() - # Returned data same object as underlying dataobj if using - # old ``get_data`` method, or using newer ``get_fdata`` - # method, where original array was float64. - arr_dtype = img.dataobj.dtype - dataobj_is_data = arr_dtype == np.float64 or method == img.get_data - # Set something to the output array. - data[:] = 42 - get_result_changed = np.all(get_data_func() == 42) - assert_equal(get_result_changed, - dataobj_is_data or caching != 'unchanged') - if dataobj_is_data: - assert_true(data is img.dataobj) - # Changing array data changes - # data - assert_array_equal(np.asarray(img.dataobj), 42) - # Uncache has no effect - img.uncache() - assert_array_equal(get_data_func(), 42) - else: - assert_false(data is img.dataobj) - assert_false(np.all(np.asarray(img.dataobj) == 42)) - # Uncache does have an effect - img.uncache() - assert_false(np.all(get_data_func() == 42)) - # in_memory is always true for array images, regardless of - # cache state. - img.uncache() - assert_true(img.in_memory) - if meth_name != 'get_fdata': - return - # Return original array from get_fdata only if the input array is the - # requested dtype. - float_types = np.sctypes['float'] - if arr_dtype not in float_types: - return - for float_type in float_types: - data = get_data_func(dtype=float_type) - assert_equal(data is img.dataobj, arr_dtype == float_type) - def validate_data_deprecated(self, imaker, params): # Check _data property still exists, but raises warning img = imaker() @@ -397,17 +376,6 @@ def validate_shape(self, imaker, params): # Read only assert_raises(AttributeError, setattr, img, 'shape', np.eye(4)) - def validate_ndim(self, imaker, params): - # Validate shape - img = imaker() - # Same as expected ndim - assert_equal(img.ndim, len(params['shape'])) - # Same as array ndim if passed - if 'data' in params: - assert_equal(img.ndim, params['data'].ndim) - # Read only - assert_raises(AttributeError, setattr, img, 'ndim', 5) - def validate_shape_deprecated(self, imaker, params): # Check deprecated get_shape API img = imaker() @@ -417,6 +385,7 @@ def validate_shape_deprecated(self, imaker, params): assert_equal(len(w), 1) + class HeaderShapeMixin(object): """ Tests that header shape can be set and got @@ -500,49 +469,40 @@ class MakeImageAPI(LoadImageAPI): header_maker = None # Example shapes for created images example_shapes = ((2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)) - # Supported dtypes for storing to disk - storable_dtypes = (np.uint8, np.int16, np.float32) def obj_params(self): # Return any obj_params from superclass for func, params in super(MakeImageAPI, self).obj_params(): yield func, params - # Create new images + # Create a new images aff = np.diag([1, 2, 3, 1]) def make_imaker(arr, aff, header=None): return lambda: self.image_maker(arr, aff, header) + for shape in self.example_shapes: + for dtype in (np.uint8, np.int16, np.float32): + arr = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) + hdr = self.header_maker() + hdr.set_data_dtype(dtype) + func = make_imaker(arr.copy(), aff, hdr) + params = dict( + dtype=dtype, + affine=aff, + data=arr, + shape=shape, + is_proxy=False) + yield func, params + if not self.can_save: + return + # Add a proxy image + # We assume that loading from a fileobj creates a proxy image + params['is_proxy'] = True - def make_prox_imaker(arr, aff, hdr): - - def prox_imaker(): - img = self.image_maker(arr, aff, hdr) - rt_img = bytesio_round_trip(img) - return self.image_maker(rt_img.dataobj, aff, rt_img.header) - - return prox_imaker - - for shape, stored_dtype in product(self.example_shapes, - self.storable_dtypes): - # To make sure we do not trigger scaling, always use the - # stored_dtype for the input array. - arr = np.arange(np.prod(shape), dtype=stored_dtype).reshape(shape) - hdr = self.header_maker() - hdr.set_data_dtype(stored_dtype) - func = make_imaker(arr.copy(), aff, hdr) - params = dict( - dtype=stored_dtype, - affine=aff, - data=arr, - shape=shape, - is_proxy=False) - yield make_imaker(arr.copy(), aff, hdr), params - if not self.can_save: - continue - # Create proxy images from these array images, by storing via BytesIO. - # We assume that loading from a fileobj creates a proxy image. - params['is_proxy'] = True - yield make_prox_imaker(arr.copy(), aff, hdr), params + def prox_imaker(): + img = self.image_maker(arr, aff, hdr) + rt_img = bytesio_round_trip(img) + return self.image_maker(rt_img.dataobj, aff, rt_img.header) + yield prox_imaker, params class ImageHeaderAPI(MakeImageAPI): @@ -560,8 +520,6 @@ class TestAnalyzeAPI(ImageHeaderAPI): has_scaling = False can_save = True standard_extension = '.img' - # Supported dtypes for storing to disk - storable_dtypes = (np.uint8, np.int16, np.int32, np.float32, np.float64) class TestSpatialImageAPI(TestAnalyzeAPI): diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 78f876ec7d..1c6fb989b5 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -38,12 +38,7 @@ from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) -from ..testing import ( - clear_and_catch_warnings, - data_path, - runif_extra_has, - suppress_warnings, -) +from ..testing import data_path, suppress_warnings, runif_extra_has from . import test_analyze as tana from . import test_spm99analyze as tspm @@ -563,22 +558,6 @@ def test_slice_times(self): assert_equal(hdr['slice_end'], 5) assert_array_almost_equal(hdr['slice_duration'], 0.1) - # Ambiguous case - hdr2 = self.header_class() - hdr2.set_dim_info(slice=2) - hdr2.set_slice_duration(0.1) - hdr2.set_data_shape((1, 1, 2)) - with clear_and_catch_warnings() as w: - warnings.simplefilter("always") - hdr2.set_slice_times([0.1, 0]) - assert len(w) == 1 - # but always must be choosing sequential one first - assert_equal(hdr2.get_value_label('slice_code'), 'sequential decreasing') - # and the other direction - hdr2.set_slice_times([0, 0.1]) - assert_equal(hdr2.get_value_label('slice_code'), 'sequential increasing') - - def test_intents(self): ehdr = self.header_class() ehdr.set_intent('t test', (10,), name='some score') diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 6b5f231fc3..ca1654bf9a 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -107,7 +107,7 @@ def test_BinOpener(): class MockIndexedGzipFile(GzipFile): def __init__(self, *args, **kwargs): - self._drop_handles = kwargs.pop('drop_handles', False) + kwargs.pop('drop_handles', False) super(MockIndexedGzipFile, self).__init__(*args, **kwargs) diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 7280c5552d..285674083b 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -108,14 +108,6 @@ def validate_shape(self, pmaker, params): # Read only assert_raises(AttributeError, setattr, prox, 'shape', params['shape']) - def validate_ndim(self, pmaker, params): - # Check shape - prox, fio, hdr = pmaker() - assert_equal(prox.ndim, len(params['shape'])) - # Read only - assert_raises(AttributeError, setattr, prox, - 'ndim', len(params['shape'])) - def validate_is_proxy(self, pmaker, params): # Check shape prox, fio, hdr = pmaker() diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 2c17c33fd1..9756a16747 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -67,40 +67,6 @@ def check_nib_ls_example4d(opts=[], hdrs_str="", other_str=""): assert_equal(fname, stdout[:len(fname)]) assert_re_in(expected_re, stdout[len(fname):]) - -def check_nib_diff_examples(): - fnames = [pjoin(DATA_PATH, f) - for f in ('standard.nii.gz', 'example4d.nii.gz')] - code, stdout, stderr = run_command(['nib-diff'] + fnames, check_code=False) - checked_fields = ["Field/File", "regular", "dim_info", "dim", "datatype", "bitpix", "pixdim", "slice_end", - "xyzt_units", "cal_max", "descrip", "qform_code", "sform_code", "quatern_b", - "quatern_c", "quatern_d", "qoffset_x", "qoffset_y", "qoffset_z", "srow_x", - "srow_y", "srow_z", "DATA(md5)", "DATA(diff 1:)"] - for item in checked_fields: - assert_true(item in stdout) - - fnames2 = [pjoin(DATA_PATH, f) - for f in ('example4d.nii.gz', 'example4d.nii.gz')] - code, stdout, stderr = run_command(['nib-diff'] + fnames2, check_code=False) - assert_equal(stdout, "These files are identical.") - - fnames3 = [pjoin(DATA_PATH, f) - for f in ('standard.nii.gz', 'example4d.nii.gz', 'example_nifti2.nii.gz')] - code, stdout, stderr = run_command(['nib-diff'] + fnames3, check_code=False) - for item in checked_fields: - assert_true(item in stdout) - - fnames4 = [pjoin(DATA_PATH, f) - for f in ('standard.nii.gz', 'standard.nii.gz', 'standard.nii.gz')] - code, stdout, stderr = run_command(['nib-diff'] + fnames4, check_code=False) - assert_equal(stdout, "These files are identical.") - - code, stdout, stderr = run_command(['nib-diff', '--dt', 'float64'] + fnames, check_code=False) - for item in checked_fields: - assert_true(item in stdout) - - - @script_test def test_nib_ls(): yield check_nib_ls_example4d @@ -184,11 +150,6 @@ def test_help(): assert_equal(stderr, '') -@script_test -def test_nib_diff(): - yield check_nib_diff_examples - - @script_test def test_nib_nifti_dx(): # Test nib-nifti-dx script diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 40d5ebc41e..f528555d05 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -64,7 +64,7 @@ def test_assert_allclose_safely(): def assert_warn_len_equal(mod, n_in_context): mod_warns = mod.__warningregistry__ - # Python 3 appears to clear any pre-existing warnings of the same type, + # Python 3.4 appears to clear any pre-existing warnings of the same type, # when raising warnings inside a catch_warnings block. So, there is a # warning generated by the tests within the context manager, but no # previous warnings. @@ -84,15 +84,18 @@ def test_clear_and_catch_warnings(): assert_equal(my_mod.__warningregistry__, {}) # Without specified modules, don't clear warnings during context with clear_and_catch_warnings(): + warnings.simplefilter('ignore') warnings.warn('Some warning') assert_warn_len_equal(my_mod, 1) # Confirm that specifying module keeps old warning, does not add new with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') warnings.warn('Another warning') assert_warn_len_equal(my_mod, 1) # Another warning, no module spec does add to warnings dict, except on - # Python 3 (see comments in `assert_warn_len_equal`) + # Python 3.4 (see comments in `assert_warn_len_equal`) with clear_and_catch_warnings(): + warnings.simplefilter('ignore') warnings.warn('Another warning') assert_warn_len_equal(my_mod, 2) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index b7a510e337..e442b508d8 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -13,7 +13,6 @@ import warnings import gzip import bz2 -from collections import OrderedDict from os.path import exists, splitext from operator import mul from functools import reduce @@ -23,7 +22,6 @@ from .casting import (shared_range, type_info, OK_FLOATS) from .openers import Opener from .deprecated import deprecate_with_version -from .externals.oset import OrderedSet sys_is_le = sys.byteorder == 'little' native_code = sys_is_le and '<' or '>' @@ -80,7 +78,7 @@ class Recoder(object): 2 ''' - def __init__(self, codes, fields=('code',), map_maker=OrderedDict): + def __init__(self, codes, fields=('code',), map_maker=dict): ''' Create recoder object ``codes`` give a sequence of code, alias sequences @@ -99,7 +97,7 @@ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): Parameters ---------- - codes : sequence of sequences + codes : seqence of sequences Each sequence defines values (codes) that are equivalent fields : {('code',) string sequence}, optional names by which elements in sequences can be accessed @@ -135,15 +133,13 @@ def add_codes(self, code_syn_seqs): Examples -------- - >>> code_syn_seqs = ((2, 'two'), (1, 'one')) + >>> code_syn_seqs = ((1, 'one'), (2, 'two')) >>> rc = Recoder(code_syn_seqs) >>> rc.value_set() == set((1,2)) True >>> rc.add_codes(((3, 'three'), (1, 'first'))) >>> rc.value_set() == set((1,2,3)) True - >>> print(rc.value_set()) # set is actually ordered - OrderedSet([2, 1, 3]) ''' for code_syns in code_syn_seqs: # Add all the aliases @@ -190,7 +186,7 @@ def keys(self): return self.field1.keys() def value_set(self, name=None): - ''' Return OrderedSet of possible returned values for column + ''' Return set of possible returned values for column By default, the column is the first column. @@ -216,7 +212,7 @@ def value_set(self, name=None): d = self.field1 else: d = self.__dict__[name] - return OrderedSet(d.values()) + return set(d.values()) # Endian code aliases diff --git a/setup.py b/setup.py index 27f85d3e99..b0f5bc093c 100755 --- a/setup.py +++ b/setup.py @@ -119,7 +119,6 @@ def main(**extra_args): pjoin('bin', 'nib-nifti-dx'), pjoin('bin', 'nib-tck2trk'), pjoin('bin', 'nib-trk2tck'), - pjoin('bin', 'nib-diff'), ], cmdclass = cmdclass, **extra_args From 73366d9fd22fc264b46a27770c913933084eed88 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Fri, 5 Oct 2018 11:14:14 -0400 Subject: [PATCH 02/26] ENH: enable debug session for appveyor --- .travis.yml | 151 ------------------------------------ appveyor.yml | 18 +++-- nibabel/__init__.py | 7 ++ nibabel/tests/test_minc1.py | 13 +++- 4 files changed, 31 insertions(+), 158 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 28ac4fa5f4..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,151 +0,0 @@ -# vim ft=yaml -# Multiple lines can be made a single "virtual line" because of how Travis -# munges each line before executing it to print out the exit status. It's okay -# for it to be on multiple physical lines, so long as you remember: - There -# can't be any leading "-"s - All newlines will be removed, so use ";"s - -language: python - -# Run jobs on container-based infrastructure, can be overridden per job -sudo: false - -cache: - directories: - - $HOME/.cache/pip -env: - global: - - DEPENDS="six numpy scipy matplotlib h5py pillow pydicom" - - OPTIONAL_DEPENDS="" - - INSTALL_TYPE="setup" - - EXTRA_WHEELS="https://5cf40426d9f06eb7461d-6fe47d9331aba7cd62fc36c7196769e4.ssl.cf2.rackcdn.com" - - PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" - - EXTRA_PIP_FLAGS="--find-links=$EXTRA_WHEELS" - - PRE_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" -python: - - 3.4 - - 3.5 - - 3.6 -matrix: - include: - - python: 2.7 - env: - - COVERAGE=1 - # Absolute minimum dependencies - - python: 2.7 - env: - - DEPENDS="numpy==1.7.1" - # Absolute minimum dependencies plus oldest MPL - # Check these against: - # nibabel/info.py - # doc/source/installation.rst - # requirements.txt - - python: 2.7 - env: - - DEPENDS="numpy==1.7.1 matplotlib==1.3.1" - # Minimum pydicom dependency - - python: 2.7 - env: - - DEPENDS="numpy==1.7.1 pydicom==0.9.9 pillow==2.6" - # pydicom master branch - - python: 3.5 - env: - - DEPENDS="numpy git+https://github.com/pydicom/pydicom.git@master" - # test 2.7 against pre-release builds of everything - - python: 2.7 - env: - - EXTRA_PIP_FLAGS="$PRE_PIP_FLAGS" - # test 3.5 against pre-release builds of everything - - python: 3.5 - env: - - EXTRA_PIP_FLAGS="$PRE_PIP_FLAGS" - # Documentation doctests - - python: 2.7 - env: - - DOC_DOC_TEST=1 - - python: 2.7 - env: - - INSTALL_TYPE=sdist - - python: 2.7 - env: - - INSTALL_TYPE=wheel - - python: 2.7 - env: - - INSTALL_TYPE=requirements - - python: 2.7 - env: - - STYLE=1 - - python: 3.5 - env: - - STYLE=1 - - python: 3.5 - env: - - DOC_DOC_TEST=1 - # Run tests with indexed_gzip present - - python: 2.7 - env: - - OPTIONAL_DEPENDS="indexed_gzip" - - python: 3.5 - env: - - OPTIONAL_DEPENDS="indexed_gzip" -before_install: - - source tools/travis_tools.sh - - python -m pip install --upgrade pip - - pip install --upgrade virtualenv - - virtualenv --python=python venv - - source venv/bin/activate - - python --version # just to check - - pip install -U pip wheel # needed at one point - - retry pip install nose flake8 mock # always - - pip install $EXTRA_PIP_FLAGS $DEPENDS $OPTIONAL_DEPENDS - - if [ "${COVERAGE}" == "1" ]; then - pip install coverage; - pip install coveralls; - pip install codecov; - fi -# command to install dependencies -install: - - | - if [ "$INSTALL_TYPE" == "setup" ]; then - python setup.py install - elif [ "$INSTALL_TYPE" == "sdist" ]; then - python setup_egg.py egg_info # check egg_info while we're here - python setup_egg.py sdist - pip install $EXTRA_PIP_FLAGS dist/*.tar.gz - elif [ "$INSTALL_TYPE" == "wheel" ]; then - pip install wheel - python setup_egg.py bdist_wheel - pip install $EXTRA_PIP_FLAGS dist/*.whl - elif [ "$INSTALL_TYPE" == "requirements" ]; then - pip install $EXTRA_PIP_FLAGS -r requirements.txt - python setup.py install - fi - # Point to nibabel data directory - - export NIBABEL_DATA_DIR="$PWD/nibabel-data" -# command to run tests, e.g. python setup.py test -script: - - | - if [ "${STYLE}" == "1" ]; then - # Run styles only on core nibabel code. - flake8 nibabel - else - # Change into an innocuous directory and find tests from installation - mkdir for_testing - cd for_testing - if [ "${COVERAGE}" == "1" ]; then - cp ../.coveragerc .; - COVER_ARGS="--with-coverage --cover-package nibabel"; - fi - if [ "$DOC_DOC_TEST" == "1" ]; then - cd ../doc; - pip install -r ../doc-requirements.txt - make html; - make doctest; - else - nosetests --with-doctest $COVER_ARGS nibabel; - fi - fi -after_success: - - if [ "${COVERAGE}" == "1" ]; then coveralls; codecov; fi - -notifications: - webhooks: http://nipy.bic.berkeley.edu:54856/travis diff --git a/appveyor.yml b/appveyor.yml index e41aee90c8..3ccf6831fa 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -2,16 +2,12 @@ # CI on Windows via appveyor environment: - + appveyor_build_worker_cloud: gce matrix: - - PYTHON: C:\Python27 - - PYTHON: C:\Python27-x64 - PYTHON: C:\Python34 - PYTHON: C:\Python34-x64 - PYTHON: C:\Python35 - PYTHON: C:\Python35-x64 - - PYTHON: C:\Python36 - - PYTHON: C:\Python36-x64 install: # Prepend newly installed Python to the PATH of this build (this cannot be @@ -31,4 +27,14 @@ test_script: # Change into an innocuous directory and find tests from installation - mkdir for_testing - cd for_testing - - nosetests --with-doctest nibabel + # Print Python, numpy versions + - python -c "import sys, numpy; print('Python', sys.version); print('numpy', numpy.__version__)" + # Show all environment variables to ease possible future debugging + - set + - nosetests --with-doctest -s -v nibabel + + +on_failure: + # enable the next to let the build VM block for up to 60min to log in via RDP and debug + # - ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1')) + diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 4c3c7ae55c..201c480b94 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -86,3 +86,10 @@ def test(*args, **kwargs): def get_info(): return _get_pkg_info(os.path.dirname(__file__)) + +def print_numpy_info(): + import numpy as np + print("NUMPY: ID(numpy): %d ID(numpy.float64): %d" % (id(np), id(np.float64))) + +def setup_package(): + print_numpy_info() \ No newline at end of file diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index cb59d921eb..5d6fb4ea80 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## from __future__ import division, print_function, absolute_import +import sys from os.path import join as pjoin @@ -156,7 +157,17 @@ def test_mincfile(self): for tp in self.test_files: mnc_obj = self.opener(tp['fname'], 'r') mnc = self.file_class(mnc_obj) - assert_equal(mnc.get_data_dtype().type, tp['dtype']) + try: + assert_equal(mnc.get_data_dtype().type, tp['dtype']) + except AssertionError: + from nibabel import print_numpy_info + print() + for l, t in (('mnc.get_data_dtype().type', mnc.get_data_dtype().type), + ("tp['dtype']", tp['dtype'])): + print("%30s ID: %s, __module__: %s, id(sys[__module__]): %s" + % (l, id(t), t.__module__, id(sys.modules[t.__module__]))) + print_numpy_info() + raise assert_equal(mnc.get_data_shape(), tp['shape']) assert_equal(mnc.get_zooms(), tp['zooms']) assert_array_equal(mnc.get_affine(), tp['affine']) From 44e8d4c3c052f278009c29da87c7ea17fa21a02c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 06:49:55 -0700 Subject: [PATCH 03/26] MNT: Empty commit to trigger AppVeyor From 4511c194bd0864796a42e845fb0b17b79d075a8a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 06:50:04 -0700 Subject: [PATCH 04/26] MNT: Empty commit to trigger AppVeyor From 9a8687735f342129f18c1d4fcdcfdb1f2f11041c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 06:50:14 -0700 Subject: [PATCH 05/26] MNT: Empty commit to trigger AppVeyor From b71760364bfb26b0f8e4fe24d432e74082da04ce Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 06:52:44 -0700 Subject: [PATCH 06/26] REVERT: Restore 68cf6715cf7977f597f3d6b743467a46e8d397c0 for testing --- .gitignore | 1 - Changelog | 21 - bin/nib-dicomfs | 210 +++++- bin/nib-nifti-dx | 26 +- bin/nib-tck2trk | 18 - bin/nib-trk2tck | 18 - bin/parrec2nii | 2 +- nibabel/arrayproxy.py | 41 +- nibabel/benchmarks/bench_array_to_file.py | 1 - .../benchmarks/bench_arrayproxy_slicing.py | 18 +- nibabel/brikhead.py | 627 ------------------ nibabel/cmdline/dicomfs.py | 241 ------- nibabel/cmdline/ls.py | 7 +- nibabel/cmdline/nifti_dx.py | 38 -- nibabel/cmdline/tck2trk.py | 56 -- nibabel/cmdline/tests/__init__.py | 0 nibabel/cmdline/trk2tck.py | 39 -- nibabel/freesurfer/mghformat.py | 2 +- nibabel/imageclasses.py | 13 +- nibabel/openers.py | 24 +- .../parrec2nii.py => parrec2nii_cmd.py} | 0 nibabel/spatialimages.py | 127 +--- nibabel/tests/data/bad_attribute+orig.HEAD | 133 ---- nibabel/tests/data/bad_datatype+orig.HEAD | 133 ---- nibabel/tests/data/example4d+orig.HEAD | 133 ---- nibabel/tests/data/scaled+tlrc.HEAD | 116 ---- nibabel/tests/test_arrayproxy.py | 148 ++--- nibabel/tests/test_brikhead.py | 150 ----- nibabel/tests/test_image_api.py | 10 +- nibabel/tests/test_openers.py | 49 +- .../{cmdline => }/tests/test_parrec2nii.py | 12 +- nibabel/tests/test_scripts.py | 112 +--- nibabel/tests/test_spatialimages.py | 134 +--- setup.py | 2 - 34 files changed, 380 insertions(+), 2282 deletions(-) delete mode 100644 bin/nib-tck2trk delete mode 100644 bin/nib-trk2tck delete mode 100644 nibabel/brikhead.py delete mode 100644 nibabel/cmdline/dicomfs.py delete mode 100644 nibabel/cmdline/nifti_dx.py delete mode 100644 nibabel/cmdline/tck2trk.py delete mode 100644 nibabel/cmdline/tests/__init__.py delete mode 100644 nibabel/cmdline/trk2tck.py rename nibabel/{cmdline/parrec2nii.py => parrec2nii_cmd.py} (100%) delete mode 100644 nibabel/tests/data/bad_attribute+orig.HEAD delete mode 100644 nibabel/tests/data/bad_datatype+orig.HEAD delete mode 100644 nibabel/tests/data/example4d+orig.HEAD delete mode 100644 nibabel/tests/data/scaled+tlrc.HEAD delete mode 100644 nibabel/tests/test_brikhead.py rename nibabel/{cmdline => }/tests/test_parrec2nii.py (91%) diff --git a/.gitignore b/.gitignore index df018f0ead..d6996550dc 100644 --- a/.gitignore +++ b/.gitignore @@ -16,7 +16,6 @@ .project .pydevproject *.py.orig -.DS_Store # Not sure what the next one is for *.kpf diff --git a/Changelog b/Changelog index b96ddd40a7..4c65cf0bfe 100644 --- a/Changelog +++ b/Changelog @@ -24,27 +24,6 @@ Gerhard (SG) and Eric Larson (EL). References like "pr/298" refer to github pull request numbers. -Upcoming Release -================ - -New features ------------- -* Image slicing for SpatialImages (pr/550) (CM) - -Enhancements ------------- -* Simplfiy MGHImage and add footer fields (pr/569) (CM, reviewed by MB) - -Bug fixes ---------- - -Maintenance ------------ - -API changes and deprecations ----------------------------- - - 2.2.1 (Wednesday 22 November 2017) ================================== diff --git a/bin/nib-dicomfs b/bin/nib-dicomfs index 05b6a50afc..115fd4e486 100755 --- a/bin/nib-dicomfs +++ b/bin/nib-dicomfs @@ -9,7 +9,213 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Copyright (C) 2011 Christian Haselgrove -from nibabel.cmdline.dicomfs import main +import sys +import os +import stat +import errno +import time +import locale +import logging +import fuse +import nibabel as nib +import nibabel.dft as dft + +from optparse import OptionParser, Option + +uid = os.getuid() +gid = os.getgid() +encoding = locale.getdefaultlocale()[1] + +fuse.fuse_python_api = (0, 2) + +logger = logging.getLogger('nibabel.dft') + +class FileHandle: + + def __init__(self, fno): + self.fno = fno + self.keep_cache = False + self.direct_io = False + return + + def __str__(self): + return 'FileHandle(%d)' % self.fno + +class DICOMFS(fuse.Fuse): + + def __init__(self, *args, **kwargs): + self.followlinks = kwargs.pop('followlinks', False) + fuse.Fuse.__init__(self, *args, **kwargs) + self.fhs = {} + return + + def get_paths(self): + paths = {} + for study in dft.get_studies(self.dicom_path, self.followlinks): + pd = paths.setdefault(study.patient_name_or_uid(), {}) + patient_info = 'patient information\n' + patient_info = 'name: %s\n' % study.patient_name + patient_info += 'ID: %s\n' % study.patient_id + patient_info += 'birth date: %s\n' % study.patient_birth_date + patient_info += 'sex: %s\n' % study.patient_sex + pd['INFO'] = patient_info.encode('ascii', 'replace') + study_datetime = '%s_%s' % (study.date, study.time) + study_info = 'study info\n' + study_info += 'UID: %s\n' % study.uid + study_info += 'date: %s\n' % study.date + study_info += 'time: %s\n' % study.time + study_info += 'comments: %s\n' % study.comments + d = {'INFO': study_info.encode('ascii', 'replace')} + for series in study.series: + series_info = 'series info\n' + series_info += 'UID: %s\n' % series.uid + series_info += 'number: %s\n' % series.number + series_info += 'description: %s\n' % series.description + series_info += 'rows: %d\n' % series.rows + series_info += 'columns: %d\n' % series.columns + series_info += 'bits allocated: %d\n' % series.bits_allocated + series_info += 'bits stored: %d\n' % series.bits_stored + series_info += 'storage instances: %d\n' % len(series.storage_instances) + d[series.number] = {'INFO': series_info.encode('ascii', 'replace'), + '%s.nii' % series.number: (series.nifti_size, series.as_nifti), + '%s.png' % series.number: (series.png_size, series.as_png)} + pd[study_datetime] = d + return paths + + def match_path(self, path): + wd = self.get_paths() + if path == '/': + logger.debug('return root') + return wd + for part in path.lstrip('/').split('/'): + logger.debug("path:%s part:%s" % (path, part)) + if part not in wd: + return None + wd = wd[part] + logger.debug('return') + return wd + + def readdir(self, path, fh): + logger.info('readdir %s' % (path,)) + matched_path = self.match_path(path) + if matched_path is None: + return -errno.ENOENT + logger.debug('matched %s' % (matched_path,)) + fnames = [ k.encode('ascii', 'replace') for k in matched_path.keys() ] + fnames.append('.') + fnames.append('..') + return [ fuse.Direntry(f) for f in fnames ] + + def getattr(self, path): + logger.debug('getattr %s' % path) + matched_path = self.match_path(path) + logger.debug('matched: %s' % (matched_path,)) + now = time.time() + st = fuse.Stat() + if isinstance(matched_path, dict): + st.st_mode = stat.S_IFDIR | 0755 + st.st_ctime = now + st.st_mtime = now + st.st_atime = now + st.st_uid = uid + st.st_gid = gid + st.st_nlink = len(matched_path) + return st + if isinstance(matched_path, str): + st.st_mode = stat.S_IFREG | 0644 + st.st_ctime = now + st.st_mtime = now + st.st_atime = now + st.st_uid = uid + st.st_gid = gid + st.st_size = len(matched_path) + st.st_nlink = 1 + return st + if isinstance(matched_path, tuple): + st.st_mode = stat.S_IFREG | 0644 + st.st_ctime = now + st.st_mtime = now + st.st_atime = now + st.st_uid = uid + st.st_gid = gid + st.st_size = matched_path[0]() + st.st_nlink = 1 + return st + return -errno.ENOENT + + def open(self, path, flags): + logger.debug('open %s' % (path,)) + matched_path = self.match_path(path) + if matched_path is None: + return -errno.ENOENT + for i in range(1, 10): + if i not in self.fhs: + if isinstance(matched_path, str): + self.fhs[i] = matched_path + elif isinstance(matched_path, tuple): + self.fhs[i] = matched_path[1]() + else: + raise -errno.EFTYPE + return FileHandle(i) + raise -errno.ENFILE + + # not done + def read(self, path, size, offset, fh): + logger.debug('read') + logger.debug(path) + logger.debug(size) + logger.debug(offset) + logger.debug(fh) + return self.fhs[fh.fno][offset:offset+size] + + def release(self, path, flags, fh): + logger.debug('release') + logger.debug(path) + logger.debug(fh) + del self.fhs[fh.fno] + return + +def get_opt_parser(): + # use module docstring for help output + p = OptionParser( + usage="%s [OPTIONS] " + % os.path.basename(sys.argv[0]), + version="%prog " + nib.__version__) + + p.add_options([ + Option("-v", "--verbose", action="count", + dest="verbose", default=0, + help="make noise. Could be specified multiple times"), + ]) + + p.add_options([ + Option("-L", "--follow-links", action="store_true", + dest="followlinks", default=False, + help="Follow symbolic links in DICOM directory"), + ]) + return p if __name__ == '__main__': - main() \ No newline at end of file + parser = get_opt_parser() + (opts, files) = parser.parse_args() + + if opts.verbose: + logger.addHandler(logging.StreamHandler(sys.stdout)) + logger.setLevel(opts.verbose > 1 and logging.DEBUG or logging.INFO) + + if len(files) != 2: + sys.stderr.write("Please provide two arguments:\n%s\n" % parser.usage) + sys.exit(1) + + fs = DICOMFS(dash_s_do='setsingle', followlinks=opts.followlinks) + fs.parse(['-f', '-s', files[1]]) + fs.dicom_path = files[0].decode(encoding) + try: + fs.main() + except fuse.FuseError: + # fuse prints the error message + sys.exit(1) + + sys.exit(0) + +# eof diff --git a/bin/nib-nifti-dx b/bin/nib-nifti-dx index d317585286..40122acd16 100755 --- a/bin/nib-nifti-dx +++ b/bin/nib-nifti-dx @@ -8,8 +8,32 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Print nifti diagnostics for header files ''' +from __future__ import division, print_function, absolute_import + +import sys + +from optparse import OptionParser + +import nibabel as nib + + +def main(): + """ Go go team """ + parser = OptionParser( + usage="%s [FILE ...]\n\n" % sys.argv[0] + __doc__, + version="%prog " + nib.__version__) + (opts, files) = parser.parse_args() + + for fname in files: + with nib.openers.ImageOpener(fname) as fobj: + hdr = fobj.read(nib.nifti1.header_dtype.itemsize) + result = nib.Nifti1Header.diagnose_binaryblock(hdr) + if len(result): + print('Picky header check output for "%s"\n' % fname) + print(result + '\n') + else: + print('Header for "%s" is clean' % fname) -from nibabel.cmdline.nifti_dx import main if __name__ == '__main__': main() diff --git a/bin/nib-tck2trk b/bin/nib-tck2trk deleted file mode 100644 index 896e67a5d1..0000000000 --- a/bin/nib-tck2trk +++ /dev/null @@ -1,18 +0,0 @@ -#!python -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Convert tractograms (TCK -> TRK). -""" - -from nibabel.cmdline.tck2trk import main - - -if __name__ == '__main__': - main() diff --git a/bin/nib-trk2tck b/bin/nib-trk2tck deleted file mode 100644 index 85509e7447..0000000000 --- a/bin/nib-trk2tck +++ /dev/null @@ -1,18 +0,0 @@ -#!python -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Convert tractograms (TRK -> TCK). -""" - -from nibabel.cmdline.trk2tck import main - - -if __name__ == '__main__': - main() diff --git a/bin/parrec2nii b/bin/parrec2nii index 27a1abca05..4856af9986 100755 --- a/bin/parrec2nii +++ b/bin/parrec2nii @@ -2,7 +2,7 @@ """PAR/REC to NIfTI converter """ -from nibabel.cmdline.parrec2nii import main +from nibabel.parrec2nii_cmd import main if __name__ == '__main__': diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index b3faa21a1f..e95f519e02 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -34,7 +34,7 @@ from .volumeutils import array_from_file, apply_read_scaling from .fileslice import fileslice from .keywordonly import kw_only_meth -from . import openers +from .openers import ImageOpener, HAVE_INDEXED_GZIP """This flag controls whether a new file handle is created every time an image @@ -43,18 +43,14 @@ ``True``, ``False``, or ``'auto'``. If ``True``, a single file handle is created and used. If ``False``, a new -file handle is created every time the image is accessed. For gzip files, if -``'auto'``, and the optional ``indexed_gzip`` dependency is present, a single -file handle is created and persisted. If ``indexed_gzip`` is not available, -behaviour is the same as if ``keep_file_open is False``. +file handle is created every time the image is accessed. If ``'auto'``, and +the optional ``indexed_gzip`` dependency is present, a single file handle is +created and persisted. If ``indexed_gzip`` is not available, behaviour is the +same as if ``keep_file_open is False``. If this is set to any other value, attempts to create an ``ArrayProxy`` without specifying the ``keep_file_open`` flag will result in a ``ValueError`` being raised. - -.. warning:: Setting this flag to a value of ``'auto'`` will become deprecated - behaviour in version 2.4.0. Support for ``'auto'`` will be removed - in version 3.0.0. """ KEEP_FILE_OPEN_DEFAULT = False @@ -191,9 +187,9 @@ def _should_keep_file_open(self, file_like, keep_file_open): - If ``file_like`` is a file(-like) object, ``False`` is returned. Otherwise, ``file_like`` is assumed to be a file name. - - If ``keep_file_open`` is ``auto``, and ``indexed_gzip`` is - not available, ``False`` is returned. - - Otherwise, the value of ``keep_file_open`` is returned unchanged. + - if ``file_like`` ends with ``'gz'``, and the ``indexed_gzip`` + library is available, ``True`` is returned. + - Otherwise, ``False`` is returned. Parameters ---------- @@ -207,21 +203,23 @@ def _should_keep_file_open(self, file_like, keep_file_open): ------- The value of ``keep_file_open`` that will be used by this - ``ArrayProxy``, and passed through to ``ImageOpener`` instances. + ``ArrayProxy``. """ if keep_file_open is None: keep_file_open = KEEP_FILE_OPEN_DEFAULT - if keep_file_open not in ('auto', True, False): + # if keep_file_open is True/False, we do what the user wants us to do + if isinstance(keep_file_open, bool): + return keep_file_open + if keep_file_open != 'auto': raise ValueError('keep_file_open should be one of {None, ' '\'auto\', True, False}') + # file_like is a handle - keep_file_open is irrelevant if hasattr(file_like, 'read') and hasattr(file_like, 'seek'): return False - # don't have indexed_gzip - auto -> False - if keep_file_open == 'auto' and not (openers.HAVE_INDEXED_GZIP and - file_like.endswith('.gz')): - return False - return keep_file_open + # Otherwise, if file_like is gzipped, and we have_indexed_gzip, we set + # keep_file_open to True, else we set it to False + return HAVE_INDEXED_GZIP and file_like.endswith('gz') @property @deprecate_with_version('ArrayProxy.header deprecated', '2.2', '3.0') @@ -267,11 +265,10 @@ def _get_fileobj(self): """ if self._keep_file_open: if not hasattr(self, '_opener'): - self._opener = openers.ImageOpener( - self.file_like, keep_open=self._keep_file_open) + self._opener = ImageOpener(self.file_like, keep_open=True) yield self._opener else: - with openers.ImageOpener(self.file_like) as opener: + with ImageOpener(self.file_like, keep_open=False) as opener: yield opener def get_unscaled(self): diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index 36921a106a..f55b8a2583 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -16,7 +16,6 @@ from __future__ import division, print_function import sys -from io import BytesIO # NOQA import numpy as np diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index c880aa0700..321a0779d5 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -15,6 +15,7 @@ """ from timeit import timeit +import contextlib import gc import itertools as it import numpy as np @@ -50,7 +51,7 @@ ('?', '?', '?', ':'), ] -KEEP_OPENS = [False, True, 'auto'] +KEEP_OPENS = [False, True] if HAVE_INDEXED_GZIP: HAVE_IGZIP = [False, True] @@ -58,6 +59,16 @@ HAVE_IGZIP = [False] +@contextlib.contextmanager +def patch_indexed_gzip(have_igzip): + + atts = ['nibabel.openers.HAVE_INDEXED_GZIP', + 'nibabel.arrayproxy.HAVE_INDEXED_GZIP'] + + with mock.patch(atts[0], have_igzip), mock.patch(atts[1], have_igzip): + yield + + def bench_arrayproxy_slicing(): print_git_title('\nArrayProxy gzip slicing') @@ -143,15 +154,14 @@ def fmt_sliceobj(sliceobj): # load uncompressed and compressed versions of the image img = nib.load(testfile, keep_file_open=keep_open) - with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', have_igzip): + with patch_indexed_gzip(have_igzip): imggz = nib.load(testfilegz, keep_file_open=keep_open) def basefunc(): img.dataobj[fix_sliceobj(sliceobj)] def testfunc(): - with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', - have_igzip): + with patch_indexed_gzip(have_igzip): imggz.dataobj[fix_sliceobj(sliceobj)] # make sure nothing is floating around from the previous test diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py deleted file mode 100644 index 9e521e61b6..0000000000 --- a/nibabel/brikhead.py +++ /dev/null @@ -1,627 +0,0 @@ -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" -Class for reading AFNI BRIK/HEAD datasets - -See https://afni.nimh.nih.gov/pub/dist/doc/program_help/README.attributes.html -for information on what is required to have a valid BRIK/HEAD dataset. - -Unless otherwise noted, descriptions AFNI attributes in the code refer to this -document. - -Notes ------ - -In the AFNI HEAD file, the first two values of the attribute DATASET_RANK -determine the shape of the data array stored in the corresponding BRIK file. -The first value, DATASET_RANK[0], must be set to 3 denoting a 3D image. The -second value, DATASET_RANK[1], determines how many "sub-bricks" (in AFNI -parlance) / volumes there are along the fourth (traditionally, but not -exclusively) time axis. Thus, DATASET_RANK[1] will (at least as far as I (RM) -am aware) always be >= 1. This permits sub-brick indexing common in AFNI -programs (e.g., example4d+orig'[0]'). -""" -from __future__ import print_function, division - -from copy import deepcopy -import os -import re - -import numpy as np -from six import string_types - -from .arrayproxy import ArrayProxy -from .fileslice import strided_scalar -from .keywordonly import kw_only_meth -from .spatialimages import ( - SpatialImage, - SpatialHeader, - HeaderDataError, - ImageDataError -) -from .volumeutils import Recoder - -# used for doc-tests -filepath = os.path.dirname(os.path.realpath(__file__)) -datadir = os.path.realpath(os.path.join(filepath, 'tests/data')) - -_attr_dic = { - 'string': str, - 'integer': int, - 'float': float -} - -_endian_dict = { - 'LSB_FIRST': '<', - 'MSB_FIRST': '>', -} - -_dtype_dict = { - 0: 'B', - 1: 'h', - 3: 'f', - 5: 'D', -} - -space_codes = Recoder(( - (0, 'unknown', ''), - (1, 'scanner', 'ORIG'), - (3, 'talairach', 'TLRC'), - (4, 'mni', 'MNI')), fields=('code', 'label', 'space')) - - -class AFNIImageError(ImageDataError): - """Error when reading AFNI BRIK files""" - - -class AFNIHeaderError(HeaderDataError): - """Error when reading AFNI HEAD file""" - - -DATA_OFFSET = 0 -TYPE_RE = re.compile('type\s*=\s*(string|integer|float)-attribute\s*\n') -NAME_RE = re.compile('name\s*=\s*(\w+)\s*\n') - - -def _unpack_var(var): - """ - Parses key : value pair from `var` - - Parameters - ---------- - var : str - Entry from HEAD file - - Returns - ------- - name : str - Name of attribute - value : object - Value of attribute - - Examples - -------- - >>> var = "type = integer-attribute\\nname = BRICK_TYPES\\ncount = 1\\n1\\n" - >>> name, attr = _unpack_var(var) - >>> print(name, attr) - BRICK_TYPES 1 - >>> var = "type = string-attribute\\nname = TEMPLATE_SPACE\\ncount = 5\\n'ORIG~" - >>> name, attr = _unpack_var(var) - >>> print(name, attr) - TEMPLATE_SPACE ORIG - """ - - err_msg = ('Please check HEAD file to ensure it is AFNI compliant. ' - 'Offending attribute:\n%s' % var) - atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) - if len(atype) != 1: - raise AFNIHeaderError('Invalid attribute type entry in HEAD file. ' - '%s' % err_msg) - if len(aname) != 1: - raise AFNIHeaderError('Invalid attribute name entry in HEAD file. ' - '%s' % err_msg) - atype = _attr_dic.get(atype[0], str) - attr = ' '.join(var.strip().splitlines()[3:]) - if atype is not str: - try: - attr = [atype(f) for f in attr.split()] - except ValueError: - raise AFNIHeaderError('Failed to read variable from HEAD file due ' - 'to improper type casting. %s' % err_msg) - else: - # AFNI string attributes will always start with open single quote and - # end with a tilde (NUL). These attributes CANNOT contain tildes (so - # stripping is safe), but can contain single quotes (so we replace) - attr = attr.replace('\'', '', 1).rstrip('~') - - return aname[0], attr[0] if len(attr) == 1 else attr - - -def _get_datatype(info): - """ - Gets datatype of BRIK file associated with HEAD file yielding `info` - - Parameters - ---------- - info : dict - As obtained by :func:`parse_AFNI_header` - - Returns - ------- - dt : np.dtype - Datatype of BRIK file associated with HEAD - - Notes - ----- - ``BYTEORDER_STRING`` may be absent, signifying platform native byte order, - or contain one of "LSB_FIRST" or "MSB_FIRST". - - ``BRICK_TYPES`` gives the storage data type for each sub-brick, with - 0=uint, 1=int16, 3=float32, 5=complex64 (see ``_dtype_dict``). This should - generally be the same value for each sub-brick in the dataset. - """ - bo = info['BYTEORDER_STRING'] - bt = info['BRICK_TYPES'] - if isinstance(bt, list): - if np.unique(bt).size > 1: - raise AFNIImageError('Can\'t load file with multiple data types.') - bt = bt[0] - bo = _endian_dict.get(bo, '=') - bt = _dtype_dict.get(bt, None) - if bt is None: - raise AFNIImageError('Can\'t deduce image data type.') - return np.dtype(bo + bt) - - -def parse_AFNI_header(fobj): - """ - Parses `fobj` to extract information from HEAD file - - Parameters - ---------- - fobj : file-like object - AFNI HEAD file object or filename. If file object, should - implement at least ``read`` - - Returns - ------- - info : dict - Dictionary containing AFNI-style key:value pairs from HEAD file - - Examples - -------- - >>> fname = os.path.join(datadir, 'example4d+orig.HEAD') - >>> info = parse_AFNI_header(fname) - >>> print(info['BYTEORDER_STRING']) - LSB_FIRST - >>> print(info['BRICK_TYPES']) - [1, 1, 1] - """ - # edge case for being fed a filename instead of a file object - if isinstance(fobj, string_types): - with open(fobj, 'rt') as src: - return parse_AFNI_header(src) - # unpack variables in HEAD file - head = fobj.read().split('\n\n') - return {key: value for key, value in map(_unpack_var, head)} - - -class AFNIArrayProxy(ArrayProxy): - """ Proxy object for AFNI image array. - - Attributes - ---------- - scaling : np.ndarray - Scaling factor (one factor per volume/sub-brick) for data. Default is - None - """ - - @kw_only_meth(2) - def __init__(self, file_like, header, mmap=True, keep_file_open=None): - """ - Initialize AFNI array proxy - - Parameters - ---------- - file_like : file-like object - File-like object or filename. If file-like object, should implement - at least ``read`` and ``seek``. - header : ``AFNIHeader`` object - mmap : {True, False, 'c', 'r'}, optional, keyword only - `mmap` controls the use of numpy memory mapping for reading data. - If False, do not try numpy ``memmap`` for data array. If one of - {'c', 'r'}, try numpy memmap with ``mode=mmap``. A `mmap` value of - True gives the same behavior as ``mmap='c'``. If `file_like` - cannot be memory-mapped, ignore `mmap` value and read array from - file. - keep_file_open : { None, 'auto', True, False }, optional, keyword only - `keep_file_open` controls whether a new file handle is created - every time the image is accessed, or a single file handle is - created and used for the lifetime of this ``ArrayProxy``. If - ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behavior is the same as if - ``keep_file_open is False``. If ``file_like`` refers to an open - file handle, this setting has no effect. The default value - (``None``) will result in the value of - ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT` being used. - """ - super(AFNIArrayProxy, self).__init__(file_like, - header, - mmap=mmap, - keep_file_open=keep_file_open) - self._scaling = header.get_data_scaling() - - @property - def scaling(self): - return self._scaling - - def __array__(self): - raw_data = self.get_unscaled() - # datatype may change if applying self._scaling - return raw_data if self.scaling is None else raw_data * self.scaling - - def __getitem__(self, slicer): - raw_data = super(AFNIArrayProxy, self).__getitem__(slicer) - # apply volume specific scaling (may change datatype!) - if self.scaling is not None: - fake_data = strided_scalar(self._shape) - _, scaling = np.broadcast_arrays(fake_data, self.scaling) - raw_data = raw_data * scaling[slicer] - return raw_data - - -class AFNIHeader(SpatialHeader): - """Class for AFNI header""" - - def __init__(self, info): - """ - Initialize AFNI header object - - Parameters - ---------- - info : dict - Information from HEAD file as obtained by :func:`parse_AFNI_header` - - Examples - -------- - >>> fname = os.path.join(datadir, 'example4d+orig.HEAD') - >>> header = AFNIHeader(parse_AFNI_header(fname)) - >>> header.get_data_dtype() - dtype('int16') - >>> header.get_zooms() - (3.0, 3.0, 3.0, 3.0) - >>> header.get_data_shape() - (33, 41, 25, 3) - """ - self.info = info - dt = _get_datatype(self.info) - super(AFNIHeader, self).__init__(data_dtype=dt, - shape=self._calc_data_shape(), - zooms=self._calc_zooms()) - - @classmethod - def from_header(klass, header=None): - if header is None: - raise AFNIHeaderError('Cannot create AFNIHeader from nothing.') - if type(header) == klass: - return header.copy() - raise AFNIHeaderError('Cannot create AFNIHeader from non-AFNIHeader.') - - @classmethod - def from_fileobj(klass, fileobj): - info = parse_AFNI_header(fileobj) - return klass(info) - - def copy(self): - return AFNIHeader(deepcopy(self.info)) - - def _calc_data_shape(self): - """ - Calculate the output shape of the image data - - Returns length 3 tuple for 3D image, length 4 tuple for 4D. - - Returns - ------- - (x, y, z, t) : tuple of int - - Notes - ----- - ``DATASET_RANK[0]`` gives number of spatial dimensions (and apparently - must be 3). ``DATASET_RANK[1]`` gives the number of sub-bricks. - ``DATASET_DIMENSIONS`` is length 3, giving the number of voxels in i, - j, k. - """ - dset_rank = self.info['DATASET_RANK'] - shape = tuple(self.info['DATASET_DIMENSIONS'][:dset_rank[0]]) - n_vols = dset_rank[1] - return shape + (n_vols,) - - def _calc_zooms(self): - """ - Get image zooms from header data - - Spatial axes are first three indices, time axis is last index. If - dataset is not a time series the last value will be zero. - - Returns - ------- - zooms : tuple - - Notes - ----- - Gets zooms from attributes ``DELTA`` and ``TAXIS_FLOATS``. - - ``DELTA`` gives (x,y,z) voxel sizes. - - ``TAXIS_FLOATS`` should be length 5, with first entry giving "Time - origin", and second giving "Time step (TR)". - """ - xyz_step = tuple(np.abs(self.info['DELTA'])) - t_step = self.info.get('TAXIS_FLOATS', (0, 0,)) - if len(t_step) > 0: - t_step = (t_step[1],) - return xyz_step + t_step - - def get_space(self): - """ - Return label for anatomical space to which this dataset is aligned. - - Returns - ------- - space : str - AFNI "space" designation; one of [ORIG, ANAT, TLRC, MNI] - - Notes - ----- - There appears to be documentation for these spaces at - https://afni.nimh.nih.gov/pub/dist/atlases/elsedemo/AFNI_atlas_spaces.niml - """ - listed_space = self.info.get('TEMPLATE_SPACE', 0) - space = space_codes.space[listed_space] - return space - - def get_affine(self): - """ - Returns affine of dataset - - Examples - -------- - >>> fname = os.path.join(datadir, 'example4d+orig.HEAD') - >>> header = AFNIHeader(parse_AFNI_header(fname)) - >>> header.get_affine() - array([[ -3. , -0. , -0. , 49.5 ], - [ -0. , -3. , -0. , 82.312 ], - [ 0. , 0. , 3. , -52.3511], - [ 0. , 0. , 0. , 1. ]]) - """ - # AFNI default is RAI- == LPS+ == DICOM order. We need to flip RA sign - # to align with nibabel RAS+ system - affine = np.asarray(self.info['IJK_TO_DICOM_REAL']).reshape(3, 4) - affine = np.row_stack((affine * [[-1], [-1], [1]], - [0, 0, 0, 1])) - return affine - - def get_data_scaling(self): - """ - AFNI applies volume-specific data scaling - - Examples - -------- - >>> fname = os.path.join(datadir, 'scaled+tlrc.HEAD') - >>> header = AFNIHeader(parse_AFNI_header(fname)) - >>> header.get_data_scaling() - array([ 3.88336300e-08]) - """ - # BRICK_FLOAT_FACS has one value per sub-brick, such that the scaled - # values for sub-brick array [n] are the values read from disk * - # BRICK_FLOAT_FACS[n] - floatfacs = self.info.get('BRICK_FLOAT_FACS', None) - if floatfacs is None or not np.any(floatfacs): - return None - scale = np.ones(self.info['DATASET_RANK'][1]) - floatfacs = np.atleast_1d(floatfacs) - scale[floatfacs.nonzero()] = floatfacs[floatfacs.nonzero()] - return scale - - def get_slope_inter(self): - """ - Use `self.get_data_scaling()` instead - - Holdover because ``AFNIArrayProxy`` (inheriting from ``ArrayProxy``) - requires this functionality so as to not error. - """ - return None, None - - def get_data_offset(self): - """Data offset in BRIK file - - Offset is always 0. - """ - return DATA_OFFSET - - def get_volume_labels(self): - """ - Returns volume labels - - Returns - ------- - labels : list of str - Labels for volumes along fourth dimension - - Examples - -------- - >>> header = AFNIHeader(parse_AFNI_header(os.path.join(datadir, 'example4d+orig.HEAD'))) - >>> header.get_volume_labels() - ['#0', '#1', '#2'] - """ - labels = self.info.get('BRICK_LABS', None) - if labels is not None: - labels = labels.split('~') - return labels - - -class AFNIImage(SpatialImage): - """ - AFNI Image file - - Can be loaded from either the BRIK or HEAD file (but MUST specify one!) - - Examples - -------- - >>> import nibabel as nib - >>> brik = nib.load(os.path.join(datadir, 'example4d+orig.BRIK.gz')) - >>> brik.shape - (33, 41, 25, 3) - >>> brik.affine - array([[ -3. , -0. , -0. , 49.5 ], - [ -0. , -3. , -0. , 82.312 ], - [ 0. , 0. , 3. , -52.3511], - [ 0. , 0. , 0. , 1. ]]) - >>> head = load(os.path.join(datadir, 'example4d+orig.HEAD')) - >>> np.array_equal(head.get_data(), brik.get_data()) - True - """ - - header_class = AFNIHeader - valid_exts = ('.brik', '.head') - files_types = (('image', '.brik'), ('header', '.head')) - _compressed_suffixes = ('.gz', '.bz2', '.Z') - makeable = False - rw = False - ImageArrayProxy = AFNIArrayProxy - - @classmethod - @kw_only_meth(1) - def from_file_map(klass, file_map, mmap=True, keep_file_open=None): - """ - Creates an AFNIImage instance from `file_map` - - Parameters - ---------- - file_map : dict - dict with keys ``image, header`` and values being fileholder - objects for the respective BRIK and HEAD files - mmap : {True, False, 'c', 'r'}, optional, keyword only - `mmap` controls the use of numpy memory mapping for reading image - array data. If False, do not try numpy ``memmap`` for data array. - If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A - `mmap` value of True gives the same behavior as ``mmap='c'``. If - image data file cannot be memory-mapped, ignore `mmap` value and - read array from file. - keep_file_open : {None, 'auto', True, False}, optional, keyword only - `keep_file_open` controls whether a new file handle is created - every time the image is accessed, or a single file handle is - created and used for the lifetime of this ``ArrayProxy``. If - ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behavior is the same as if - ``keep_file_open is False``. If ``file_like`` refers to an open - file handle, this setting has no effect. The default value - (``None``) will result in the value of - ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT` being used. - """ - with file_map['header'].get_prepare_fileobj('rt') as hdr_fobj: - hdr = klass.header_class.from_fileobj(hdr_fobj) - imgf = file_map['image'].fileobj - imgf = file_map['image'].filename if imgf is None else imgf - data = klass.ImageArrayProxy(imgf, hdr.copy(), mmap=mmap, - keep_file_open=keep_file_open) - return klass(data, hdr.get_affine(), header=hdr, extra=None, - file_map=file_map) - - @classmethod - @kw_only_meth(1) - def from_filename(klass, filename, mmap=True, keep_file_open=None): - """ - Creates an AFNIImage instance from `filename` - - Parameters - ---------- - filename : str - Path to BRIK or HEAD file to be loaded - mmap : {True, False, 'c', 'r'}, optional, keyword only - `mmap` controls the use of numpy memory mapping for reading image - array data. If False, do not try numpy ``memmap`` for data array. - If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A - `mmap` value of True gives the same behavior as ``mmap='c'``. If - image data file cannot be memory-mapped, ignore `mmap` value and - read array from file. - keep_file_open : {None, 'auto', True, False}, optional, keyword only - `keep_file_open` controls whether a new file handle is created - every time the image is accessed, or a single file handle is - created and used for the lifetime of this ``ArrayProxy``. If - ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behavior is the same as if - ``keep_file_open is False``. If ``file_like`` refers to an open - file handle, this setting has no effect. The default value - (``None``) will result in the value of - ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT` being used. - """ - file_map = klass.filespec_to_file_map(filename) - return klass.from_file_map(file_map, mmap=mmap, - keep_file_open=keep_file_open) - - @classmethod - def filespec_to_file_map(klass, filespec): - """ - Make `file_map` from filename `filespec` - - AFNI BRIK files can be compressed, but HEAD files cannot - see - afni.nimh.nih.gov/pub/dist/doc/program_help/README.compression.html. - Thus, if you have AFNI files my_image.HEAD and my_image.BRIK.gz and you - want to load the AFNI BRIK / HEAD pair, you can specify: - * The HEAD filename - e.g., my_image.HEAD - * The BRIK filename w/o compressed extension - e.g., my_image.BRIK - * The full BRIK filename - e.g., my_image.BRIK.gz - - Parameters - ---------- - filespec : str - Filename that might be for this image file type. - - Returns - ------- - file_map : dict - dict with keys ``image`` and ``header`` where values are fileholder - objects for the respective BRIK and HEAD files - - Raises - ------ - ImageFileError - If `filespec` is not recognizable as being a filename for this - image type. - """ - file_map = super(AFNIImage, klass).filespec_to_file_map(filespec) - # check for AFNI-specific BRIK/HEAD compression idiosyncrasies - for key, fholder in file_map.items(): - fname = fholder.filename - if key == 'header' and not os.path.exists(fname): - for ext in klass._compressed_suffixes: - fname = fname[:-len(ext)] if fname.endswith(ext) else fname - elif key == 'image' and not os.path.exists(fname): - for ext in klass._compressed_suffixes: - if os.path.exists(fname + ext): - fname += ext - break - file_map[key].filename = fname - return file_map - - load = from_filename - - -load = AFNIImage.load diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py deleted file mode 100644 index c54c07f966..0000000000 --- a/nibabel/cmdline/dicomfs.py +++ /dev/null @@ -1,241 +0,0 @@ -#!python -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# Copyright (C) 2011 Christian Haselgrove - -import sys -import os -import stat -import errno -import time -import locale -import logging - - -class dummy_fuse(object): - """Dummy fuse "module" so that nose does not blow during doctests""" - Fuse = object - - -try: - import fuse - uid = os.getuid() - gid = os.getgid() -except ImportError: - fuse = dummy_fuse - -import nibabel as nib -import nibabel.dft as dft - -from optparse import OptionParser, Option - -encoding = locale.getdefaultlocale()[1] - -fuse.fuse_python_api = (0, 2) - -logger = logging.getLogger('nibabel.dft') - - -class FileHandle: - - def __init__(self, fno): - self.fno = fno - self.keep_cache = False - self.direct_io = False - return - - def __str__(self): - return 'FileHandle(%d)' % self.fno - - -class DICOMFS(fuse.Fuse): - - def __init__(self, *args, **kwargs): - if fuse is dummy_fuse: - raise RuntimeError( - "fuse module is not available, install it to use DICOMFS") - self.followlinks = kwargs.pop('followlinks', False) - self.dicom_path = kwargs.pop('dicom_path', None) - fuse.Fuse.__init__(self, *args, **kwargs) - self.fhs = {} - return - - def get_paths(self): - paths = {} - for study in dft.get_studies(self.dicom_path, self.followlinks): - pd = paths.setdefault(study.patient_name_or_uid(), {}) - patient_info = 'patient information\n' - patient_info = 'name: %s\n' % study.patient_name - patient_info += 'ID: %s\n' % study.patient_id - patient_info += 'birth date: %s\n' % study.patient_birth_date - patient_info += 'sex: %s\n' % study.patient_sex - pd['INFO'] = patient_info.encode('ascii', 'replace') - study_datetime = '%s_%s' % (study.date, study.time) - study_info = 'study info\n' - study_info += 'UID: %s\n' % study.uid - study_info += 'date: %s\n' % study.date - study_info += 'time: %s\n' % study.time - study_info += 'comments: %s\n' % study.comments - d = {'INFO': study_info.encode('ascii', 'replace')} - for series in study.series: - series_info = 'series info\n' - series_info += 'UID: %s\n' % series.uid - series_info += 'number: %s\n' % series.number - series_info += 'description: %s\n' % series.description - series_info += 'rows: %d\n' % series.rows - series_info += 'columns: %d\n' % series.columns - series_info += 'bits allocated: %d\n' % series.bits_allocated - series_info += 'bits stored: %d\n' % series.bits_stored - series_info += 'storage instances: %d\n' % len(series.storage_instances) - d[series.number] = {'INFO': series_info.encode('ascii', 'replace'), - '%s.nii' % series.number: (series.nifti_size, series.as_nifti), - '%s.png' % series.number: (series.png_size, series.as_png)} - pd[study_datetime] = d - return paths - - def match_path(self, path): - wd = self.get_paths() - if path == '/': - logger.debug('return root') - return wd - for part in path.lstrip('/').split('/'): - logger.debug("path:%s part:%s" % (path, part)) - if part not in wd: - return None - wd = wd[part] - logger.debug('return') - return wd - - def readdir(self, path, fh): - logger.info('readdir %s' % (path,)) - matched_path = self.match_path(path) - if matched_path is None: - return -errno.ENOENT - logger.debug('matched %s' % (matched_path,)) - fnames = [k.encode('ascii', 'replace') for k in matched_path.keys()] - fnames.append('.') - fnames.append('..') - return [fuse.Direntry(f) for f in fnames] - - def getattr(self, path): - logger.debug('getattr %s' % path) - matched_path = self.match_path(path) - logger.debug('matched: %s' % (matched_path,)) - now = time.time() - st = fuse.Stat() - if isinstance(matched_path, dict): - st.st_mode = stat.S_IFDIR | 0o755 - st.st_ctime = now - st.st_mtime = now - st.st_atime = now - st.st_uid = uid - st.st_gid = gid - st.st_nlink = len(matched_path) - return st - if isinstance(matched_path, str): - st.st_mode = stat.S_IFREG | 0o644 - st.st_ctime = now - st.st_mtime = now - st.st_atime = now - st.st_uid = uid - st.st_gid = gid - st.st_size = len(matched_path) - st.st_nlink = 1 - return st - if isinstance(matched_path, tuple): - st.st_mode = stat.S_IFREG | 0o644 - st.st_ctime = now - st.st_mtime = now - st.st_atime = now - st.st_uid = uid - st.st_gid = gid - st.st_size = matched_path[0]() - st.st_nlink = 1 - return st - return -errno.ENOENT - - def open(self, path, flags): - logger.debug('open %s' % (path,)) - matched_path = self.match_path(path) - if matched_path is None: - return -errno.ENOENT - for i in range(1, 10): - if i not in self.fhs: - if isinstance(matched_path, str): - self.fhs[i] = matched_path - elif isinstance(matched_path, tuple): - self.fhs[i] = matched_path[1]() - else: - raise -errno.EFTYPE - return FileHandle(i) - raise -errno.ENFILE - - # not done - def read(self, path, size, offset, fh): - logger.debug('read') - logger.debug(path) - logger.debug(size) - logger.debug(offset) - logger.debug(fh) - return self.fhs[fh.fno][offset:offset + size] - - def release(self, path, flags, fh): - logger.debug('release') - logger.debug(path) - logger.debug(fh) - del self.fhs[fh.fno] - return - - -def get_opt_parser(): - # use module docstring for help output - p = OptionParser( - usage="%s [OPTIONS] " - % os.path.basename(sys.argv[0]), - version="%prog " + nib.__version__) - - p.add_options([ - Option("-v", "--verbose", action="count", - dest="verbose", default=0, - help="make noise. Could be specified multiple times"), - ]) - - p.add_options([ - Option("-L", "--follow-links", action="store_true", - dest="followlinks", default=False, - help="Follow symbolic links in DICOM directory"), - ]) - return p - - -def main(args=None): - parser = get_opt_parser() - (opts, files) = parser.parse_args(args=args) - - if opts.verbose: - logger.addHandler(logging.StreamHandler(sys.stdout)) - logger.setLevel(opts.verbose > 1 and logging.DEBUG or logging.INFO) - - if len(files) != 2: - sys.stderr.write("Please provide two arguments:\n%s\n" % parser.usage) - sys.exit(1) - - fs = DICOMFS( - dash_s_do='setsingle', - followlinks=opts.followlinks, - dicom_path=files[0].decode(encoding) - ) - fs.parse(['-f', '-s', files[1]]) - try: - fs.main() - except fuse.FuseError: - # fuse prints the error message - sys.exit(1) - - sys.exit(0) diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index f919700247..98f75e21dc 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -21,7 +21,8 @@ import nibabel.cmdline.utils from nibabel.cmdline.utils import _err, verbose, table2string, ap, safe_get -__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' \ +__author__ = 'Yaroslav Halchenko' +__copyright__ = 'Copyright (c) 2011-2016 Yaroslav Halchenko ' \ 'and NiBabel contributors' __license__ = 'MIT' @@ -152,11 +153,11 @@ def proc_file(f, opts): return row -def main(args=None): +def main(): """Show must go on""" parser = get_opt_parser() - (opts, files) = parser.parse_args(args=args) + (opts, files) = parser.parse_args() nibabel.cmdline.utils.verbose_level = opts.verbose diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py deleted file mode 100644 index e478b5a5c2..0000000000 --- a/nibabel/cmdline/nifti_dx.py +++ /dev/null @@ -1,38 +0,0 @@ -#!python -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Print nifti diagnostics for header files ''' - -import sys -from optparse import OptionParser - -import nibabel as nib - -__author__ = 'Matthew Brett' -__copyright__ = 'Copyright (c) 2011-18 Matthew Brett ' \ - 'and NiBabel contributors' -__license__ = 'MIT' - - -def main(args=None): - """ Go go team """ - parser = OptionParser( - usage="%s [FILE ...]\n\n" % sys.argv[0] + __doc__, - version="%prog " + nib.__version__) - (opts, files) = parser.parse_args(args=args) - - for fname in files: - with nib.openers.ImageOpener(fname) as fobj: - hdr = fobj.read(nib.nifti1.header_dtype.itemsize) - result = nib.Nifti1Header.diagnose_binaryblock(hdr) - if len(result): - print('Picky header check output for "%s"\n' % fname) - print(result + '\n') - else: - print('Header for "%s" is clean' % fname) diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py deleted file mode 100644 index deb3adcd5f..0000000000 --- a/nibabel/cmdline/tck2trk.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Convert tractograms (TCK -> TRK). -""" -import os -import argparse - -import nibabel as nib - -from nibabel.streamlines import Field -from nibabel.orientations import aff2axcodes - - -def parse_args(): - DESCRIPTION = "Convert tractograms (TCK -> TRK)." - parser = argparse.ArgumentParser(description=DESCRIPTION) - parser.add_argument("anatomy", - help="reference anatomical image (.nii|.nii.gz.") - parser.add_argument("tractograms", metavar="tractogram", nargs="+", - help="list of tractograms (.tck).") - parser.add_argument("-f", "--force", action="store_true", - help="overwrite existing output files.") - - args = parser.parse_args() - return args, parser - - -def main(): - args, parser = parse_args() - - try: - nii = nib.load(args.anatomy) - except Exception: - parser.error("Expecting anatomical image as first agument.") - - for tractogram in args.tractograms: - tractogram_format = nib.streamlines.detect_format(tractogram) - if tractogram_format is not nib.streamlines.TckFile: - print("Skipping non TCK file: '{}'".format(tractogram)) - continue - - filename, _ = os.path.splitext(tractogram) - output_filename = filename + '.trk' - if os.path.isfile(output_filename) and not args.force: - msg = "Skipping existing file: '{}'. Use -f to overwrite." - print(msg.format(output_filename)) - continue - - # Build header using infos from the anatomical image. - header = {} - header[Field.VOXEL_TO_RASMM] = nii.affine.copy() - header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3] - header[Field.DIMENSIONS] = nii.shape[:3] - header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine)) - - tck = nib.streamlines.load(tractogram) - nib.streamlines.save(tck.tractogram, output_filename, header=header) diff --git a/nibabel/cmdline/tests/__init__.py b/nibabel/cmdline/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py deleted file mode 100644 index a55f7e95af..0000000000 --- a/nibabel/cmdline/trk2tck.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Convert tractograms (TRK -> TCK). -""" - -import os -import argparse - -import nibabel as nib - - -def parse_args(): - DESCRIPTION = "Convert tractograms (TRK -> TCK)." - parser = argparse.ArgumentParser(description=DESCRIPTION) - parser.add_argument("tractograms", metavar="tractogram", nargs="+", - help="list of tractograms (.trk).") - parser.add_argument("-f", "--force", action="store_true", - help="overwrite existing output files.") - - args = parser.parse_args() - return args, parser - - -def main(): - args, parser = parse_args() - for tractogram in args.tractograms: - tractogram_format = nib.streamlines.detect_format(tractogram) - if tractogram_format is not nib.streamlines.TrkFile: - print("Skipping non TRK file: '{}'".format(tractogram)) - continue - - filename, _ = os.path.splitext(tractogram) - output_filename = filename + '.tck' - if os.path.isfile(output_filename) and not args.force: - msg = "Skipping existing file: '{}'. Use -f to overwrite." - print(msg.format(output_filename)) - continue - - trk = nib.streamlines.load(tractogram) - nib.streamlines.save(trk.tractogram, output_filename) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 927d6126c0..b3dd1f9bc5 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -255,7 +255,7 @@ def get_zooms(self): .. _mghformat: https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat#line-82 ''' # Do not return time zoom (TR) if 3D image - tzoom = (self['tr'],) if self._ndims() > 3 else () + tzoom = (self['tr'],)[:self._ndims() > 3] return tuple(self._structarr['delta']) + tzoom def set_zooms(self, zooms): diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index c1a0b7133a..f136a070be 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -9,7 +9,6 @@ ''' Define supported image classes and names ''' from .analyze import AnalyzeImage -from .brikhead import AFNIImage from .cifti2 import Cifti2Image from .freesurfer import MGHImage from .gifti import GiftiImage @@ -32,7 +31,7 @@ Cifti2Image, Nifti2Image, # Cifti2 before Nifti2 Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, Minc1Image, Minc2Image, MGHImage, - PARRECImage, GiftiImage, AFNIImage] + PARRECImage, GiftiImage] # DEPRECATED: mapping of names to classes and class functionality @@ -89,12 +88,7 @@ def __getitem__(self, *args, **kwargs): 'ext': '.par', 'has_affine': True, 'makeable': False, - 'rw': False}, - afni={'class': AFNIImage, - 'ext': '.brik', - 'has_affine': True, - 'makeable': False, - 'rw': False}) + 'rw': False}) class ExtMapRecoder(Recoder): @@ -113,7 +107,6 @@ def __getitem__(self, *args, **kwargs): ('mgh', '.mgh'), ('mgz', '.mgz'), ('par', '.par'), - ('brik', '.brik') )) # Image classes known to require spatial axes to be first in index ordering. @@ -121,7 +114,7 @@ def __getitem__(self, *args, **kwargs): # here. KNOWN_SPATIAL_FIRST = (Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, - MGHImage, PARRECImage, AFNIImage) + MGHImage, PARRECImage) def spatial_axes_first(img): diff --git a/nibabel/openers.py b/nibabel/openers.py index f64ab23b37..0f57fa406a 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -18,22 +18,16 @@ # is indexed_gzip present and modern? try: - import indexed_gzip as igzip - version = igzip.__version__ + from indexed_gzip import SafeIndexedGzipFile, __version__ as version HAVE_INDEXED_GZIP = True - # < 0.7 - no good - if StrictVersion(version) < StrictVersion('0.7.0'): + if StrictVersion(version) < StrictVersion('0.6.0'): warnings.warn('indexed_gzip is present, but too old ' - '(>= 0.7.0 required): {})'.format(version)) + '(>= 0.6.0 required): {})'.format(version)) HAVE_INDEXED_GZIP = False - # >= 0.8 SafeIndexedGzipFile renamed to IndexedGzipFile - elif StrictVersion(version) < StrictVersion('0.8.0'): - IndexedGzipFile = igzip.SafeIndexedGzipFile - else: - IndexedGzipFile = igzip.IndexedGzipFile - del igzip, version + + del version except ImportError: HAVE_INDEXED_GZIP = False @@ -86,11 +80,9 @@ def readinto(self, buf): def _gzip_open(filename, mode='rb', compresslevel=9, keep_open=False): - # use indexed_gzip if possible for faster read access. If keep_open == - # True, we tell IndexedGzipFile to keep the file handle open. Otherwise - # the IndexedGzipFile will close/open the file on each read. - if HAVE_INDEXED_GZIP and mode == 'rb': - gzip_file = IndexedGzipFile(filename, drop_handles=not keep_open) + # use indexed_gzip if possible for faster read access + if keep_open and mode == 'rb' and HAVE_INDEXED_GZIP: + gzip_file = SafeIndexedGzipFile(filename) # Fall-back to built-in GzipFile (wrapped with the BufferedGzipFile class # defined above) diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/parrec2nii_cmd.py similarity index 100% rename from nibabel/cmdline/parrec2nii.py rename to nibabel/parrec2nii_cmd.py diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 9c3fef4fbb..b88b3e8538 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -140,7 +140,6 @@ from .filebasedimages import ImageFileError # flake8: noqa; for back-compat from .viewers import OrthoSlicer3D from .volumeutils import shape_zoom_affine -from .fileslice import canonical_slicers from .deprecated import deprecate_with_version from .orientations import apply_orientation, inv_ornt_aff @@ -322,103 +321,9 @@ class ImageDataError(Exception): pass -class SpatialFirstSlicer(object): - ''' Slicing interface that returns a new image with an updated affine - - Checks that an image's first three axes are spatial - ''' - def __init__(self, img): - # Local import to avoid circular import on module load - from .imageclasses import spatial_axes_first - if not spatial_axes_first(img): - raise ValueError("Cannot predict position of spatial axes for " - "Image type " + img.__class__.__name__) - self.img = img - - def __getitem__(self, slicer): - try: - slicer = self.check_slicing(slicer) - except ValueError as err: - raise IndexError(*err.args) - - dataobj = self.img.dataobj[slicer] - if any(dim == 0 for dim in dataobj.shape): - raise IndexError("Empty slice requested") - - affine = self.slice_affine(slicer) - return self.img.__class__(dataobj.copy(), affine, self.img.header) - - def check_slicing(self, slicer, return_spatial=False): - ''' Canonicalize slicers and check for scalar indices in spatial dims - - Parameters - ---------- - slicer : object - something that can be used to slice an array as in - ``arr[sliceobj]`` - return_spatial : bool - return only slices along spatial dimensions (x, y, z) - - Returns - ------- - slicer : object - Validated slicer object that will slice image's `dataobj` - without collapsing spatial dimensions - ''' - slicer = canonical_slicers(slicer, self.img.shape) - # We can get away with this because we've checked the image's - # first three axes are spatial. - # More general slicers will need to be smarter, here. - spatial_slices = slicer[:3] - for subslicer in spatial_slices: - if subslicer is None: - raise IndexError("New axis not permitted in spatial dimensions") - elif isinstance(subslicer, int): - raise IndexError("Scalar indices disallowed in spatial dimensions; " - "Use `[x]` or `x:x+1`.") - return spatial_slices if return_spatial else slicer - - def slice_affine(self, slicer): - """ Retrieve affine for current image, if sliced by a given index - - Applies scaling if down-sampling is applied, and adjusts the intercept - to account for any cropping. - - Parameters - ---------- - slicer : object - something that can be used to slice an array as in - ``arr[sliceobj]`` - - Returns - ------- - affine : (4,4) ndarray - Affine with updated scale and intercept - """ - slicer = self.check_slicing(slicer, return_spatial=True) - - # Transform: - # sx 0 0 tx - # 0 sy 0 ty - # 0 0 sz tz - # 0 0 0 1 - transform = np.eye(4, dtype=int) - - for i, subslicer in enumerate(slicer): - if isinstance(subslicer, slice): - if subslicer.step == 0: - raise ValueError("slice step cannot be 0") - transform[i, i] = subslicer.step if subslicer.step is not None else 1 - transform[i, 3] = subslicer.start or 0 - # If slicer is None, nothing to do - - return self.img.affine.dot(transform) - - class SpatialImage(DataobjImage): ''' Template class for volumetric (3D/4D) images ''' header_class = SpatialHeader - ImageSlicer = SpatialFirstSlicer def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): @@ -556,38 +461,12 @@ def from_image(klass, img): klass.header_class.from_header(img.header), extra=img.extra.copy()) - @property - def slicer(self): - """ Slicer object that returns cropped and subsampled images - - The image is resliced in the current orientation; no rotation or - resampling is performed, and no attempt is made to filter the image - to avoid `aliasing`_. - - The affine matrix is updated with the new intercept (and scales, if - down-sampling is used), so that all values are found at the same RAS - locations. - - Slicing may include non-spatial dimensions. - However, this method does not currently adjust the repetition time in - the image header. - - .. _aliasing: https://en.wikipedia.org/wiki/Aliasing - """ - return self.ImageSlicer(self) - - def __getitem__(self, idx): ''' No slicing or dictionary interface for images - - Use the slicer attribute to perform cropping and subsampling at your - own risk. ''' - raise TypeError( - "Cannot slice image objects; consider using `img.slicer[slice]` " - "to generate a sliced image (see documentation for caveats) or " - "slicing image array data with `img.dataobj[slice]` or " - "`img.get_data()[slice]`") + raise TypeError("Cannot slice image objects; consider slicing image " + "array data with `img.dataobj[slice]` or " + "`img.get_data()[slice]`") def orthoview(self): """Plot the image using OrthoSlicer3D diff --git a/nibabel/tests/data/bad_attribute+orig.HEAD b/nibabel/tests/data/bad_attribute+orig.HEAD deleted file mode 100644 index 95fbdeb309..0000000000 --- a/nibabel/tests/data/bad_attribute+orig.HEAD +++ /dev/null @@ -1,133 +0,0 @@ - -type = string-attribute -name = DATASET_NAME -count = 5 -'none~ - -type = string-attribute -name = TYPESTRING -count = 15 -'3DIM_HEAD_ANAT~ - -type = string-attribute -name = IDCODE_STRING -count = 27 -'AFN_-zxZ0OyZs8eEtm9syGBNdA~ - -type = string-attribute -name = IDCODE_DATE -count = 25 -'Sun Oct 1 21:13:09 2017~ - -type = integer-attribute -name = SCENE_DATA -count = 8 - 0 2 0 -999 -999 - -999 -999 -999 - -type = string-attribute -name = LABEL_1 -count = 5 -'none~ - -type = string-attribute -name = LABEL_2 -count = 5 -'none~ - -type = integer-attribute -name = ORIENT_SPECIFIC -count = 3 - 0 3 4 - -type = float-attribute -name = ORIGIN -count = 3 - -49.5 -82.312 -52.3511 - -type = float-attribute -name = DELTA -count = 3 - 3 3 3 - -type = float-attribute -name = IJK_TO_DICOM -count = 12 - 3 0 0 -49.5 0 - 3 0 -82.312 0 0 - 3 -52.3511 - -type = float-attribute -name = IJK_TO_DICOM_REAL -count = 12 - 3 0 0 -49.5 0 - 3 0 -82.312 0 0 - 3 -52.3511 - -type = float-attribute -name = BRICK_STATS -count = 6 - 0 13722 0 10051 0 - 9968 - -type = integer-attribute -name = TAXIS_NUMS -count = 8 - 3 25 77002 -999 -999 - -999 -999 -999 - -type = float-attribute -name = TAXIS_FLOATS -count = 8 - 0 3 0 -52.3511 3 - -999999 -999999 -999999 - -type = float-attribute -name = TAXIS_OFFSETS -count = 25 - 0.3260869 1.826087 0.3913043 1.891304 0.4565217 - 1.956521 0.5217391 2.021739 0.5869564 2.086956 - 0.6521738 2.152174 0.7173912 2.217391 0.7826086 - 2.282609 0.8478259 2.347826 0.9130433 2.413044 - 0.9782607 2.478261 1.043478 2.543479 1.108696 - -type = integer-attribute -name = DATASET_RANK -count = 8 - 3 3 0 0 0 - 0 0 0 - -type = integer-attribute -name = DATASET_DIMENSIONS -count = 5 - 33 41 25 0 0 - -type = integer-attribute -name = BRICK_TYPES -count = 3 - 1 1 1 - -type = float-attribute -name = BRICK_FLOAT_FACS -count = 3 - 0 0 0 - -type = string-attribute -name = TEMPLATE_SPACE -count = 5 -'ORIG~ - -type = integer-attribute -name = INT_CMAP -count = 1 - 0 - -type = integer-attribute -name = BYTEORDER_STRING -count = 10 -'LSB_FIRST~ - -type = string-attribute -name = BRICK_LABS -count = 9 -'#0~#1~#2~ diff --git a/nibabel/tests/data/bad_datatype+orig.HEAD b/nibabel/tests/data/bad_datatype+orig.HEAD deleted file mode 100644 index 27b3a56abb..0000000000 --- a/nibabel/tests/data/bad_datatype+orig.HEAD +++ /dev/null @@ -1,133 +0,0 @@ - -type = string-attribute -name = DATASET_NAME -count = 5 -'none~ - -type = string-attribute -name = TYPESTRING -count = 15 -'3DIM_HEAD_ANAT~ - -type = string-attribute -name = IDCODE_STRING -count = 27 -'AFN_-zxZ0OyZs8eEtm9syGBNdA~ - -type = string-attribute -name = IDCODE_DATE -count = 25 -'Sun Oct 1 21:13:09 2017~ - -type = integer-attribute -name = SCENE_DATA -count = 8 - 0 2 0 -999 -999 - -999 -999 -999 - -type = string-attribute -name = LABEL_1 -count = 5 -'none~ - -type = string-attribute -name = LABEL_2 -count = 5 -'none~ - -type = integer-attribute -name = ORIENT_SPECIFIC -count = 3 - 0 3 4 - -type = float-attribute -name = ORIGIN -count = 3 - -49.5 -82.312 -52.3511 - -type = float-attribute -name = DELTA -count = 3 - 3 3 3 - -type = float-attribute -name = IJK_TO_DICOM -count = 12 - 3 0 0 -49.5 0 - 3 0 -82.312 0 0 - 3 -52.3511 - -type = float-attribute -name = IJK_TO_DICOM_REAL -count = 12 - 3 0 0 -49.5 0 - 3 0 -82.312 0 0 - 3 -52.3511 - -type = float-attribute -name = BRICK_STATS -count = 6 - 0 13722 0 10051 0 - 9968 - -type = integer-attribute -name = TAXIS_NUMS -count = 8 - 3 25 77002 -999 -999 - -999 -999 -999 - -type = float-attribute -name = TAXIS_FLOATS -count = 8 - 0 3 0 -52.3511 3 - -999999 -999999 -999999 - -type = float-attribute -name = TAXIS_OFFSETS -count = 25 - 0.3260869 1.826087 0.3913043 1.891304 0.4565217 - 1.956521 0.5217391 2.021739 0.5869564 2.086956 - 0.6521738 2.152174 0.7173912 2.217391 0.7826086 - 2.282609 0.8478259 2.347826 0.9130433 2.413044 - 0.9782607 2.478261 1.043478 2.543479 1.108696 - -type = integer-attribute -name = DATASET_RANK -count = 8 - 3 3 0 0 0 - 0 0 0 - -type = integer-attribute -name = DATASET_DIMENSIONS -count = 5 - 33 41 25 0 0 - -type = integer-attribute -name = BRICK_TYPES -count = 3 - 1 3 5 - -type = float-attribute -name = BRICK_FLOAT_FACS -count = 3 - 0 0 0 - -type = string-attribute -name = TEMPLATE_SPACE -count = 5 -'ORIG~ - -type = integer-attribute -name = INT_CMAP -count = 1 - 0 - -type = string-attribute -name = BYTEORDER_STRING -count = 10 -'LSB_FIRST~ - -type = string-attribute -name = BRICK_LABS -count = 9 -'#0~#1~#2~ diff --git a/nibabel/tests/data/example4d+orig.HEAD b/nibabel/tests/data/example4d+orig.HEAD deleted file mode 100644 index a43b839d0a..0000000000 --- a/nibabel/tests/data/example4d+orig.HEAD +++ /dev/null @@ -1,133 +0,0 @@ - -type = string-attribute -name = DATASET_NAME -count = 5 -'none~ - -type = string-attribute -name = TYPESTRING -count = 15 -'3DIM_HEAD_ANAT~ - -type = string-attribute -name = IDCODE_STRING -count = 27 -'AFN_-zxZ0OyZs8eEtm9syGBNdA~ - -type = string-attribute -name = IDCODE_DATE -count = 25 -'Sun Oct 1 21:13:09 2017~ - -type = integer-attribute -name = SCENE_DATA -count = 8 - 0 2 0 -999 -999 - -999 -999 -999 - -type = string-attribute -name = LABEL_1 -count = 5 -'none~ - -type = string-attribute -name = LABEL_2 -count = 5 -'none~ - -type = integer-attribute -name = ORIENT_SPECIFIC -count = 3 - 0 3 4 - -type = float-attribute -name = ORIGIN -count = 3 - -49.5 -82.312 -52.3511 - -type = float-attribute -name = DELTA -count = 3 - 3 3 3 - -type = float-attribute -name = IJK_TO_DICOM -count = 12 - 3 0 0 -49.5 0 - 3 0 -82.312 0 0 - 3 -52.3511 - -type = float-attribute -name = IJK_TO_DICOM_REAL -count = 12 - 3 0 0 -49.5 0 - 3 0 -82.312 0 0 - 3 -52.3511 - -type = float-attribute -name = BRICK_STATS -count = 6 - 0 13722 0 10051 0 - 9968 - -type = integer-attribute -name = TAXIS_NUMS -count = 8 - 3 25 77002 -999 -999 - -999 -999 -999 - -type = float-attribute -name = TAXIS_FLOATS -count = 8 - 0 3 0 -52.3511 3 - -999999 -999999 -999999 - -type = float-attribute -name = TAXIS_OFFSETS -count = 25 - 0.3260869 1.826087 0.3913043 1.891304 0.4565217 - 1.956521 0.5217391 2.021739 0.5869564 2.086956 - 0.6521738 2.152174 0.7173912 2.217391 0.7826086 - 2.282609 0.8478259 2.347826 0.9130433 2.413044 - 0.9782607 2.478261 1.043478 2.543479 1.108696 - -type = integer-attribute -name = DATASET_RANK -count = 8 - 3 3 0 0 0 - 0 0 0 - -type = integer-attribute -name = DATASET_DIMENSIONS -count = 5 - 33 41 25 0 0 - -type = integer-attribute -name = BRICK_TYPES -count = 3 - 1 1 1 - -type = float-attribute -name = BRICK_FLOAT_FACS -count = 3 - 0 0 0 - -type = string-attribute -name = TEMPLATE_SPACE -count = 5 -'ORIG~ - -type = integer-attribute -name = INT_CMAP -count = 1 - 0 - -type = string-attribute -name = BYTEORDER_STRING -count = 10 -'LSB_FIRST~ - -type = string-attribute -name = BRICK_LABS -count = 9 -'#0~#1~#2~ diff --git a/nibabel/tests/data/scaled+tlrc.HEAD b/nibabel/tests/data/scaled+tlrc.HEAD deleted file mode 100644 index a13b054e2d..0000000000 --- a/nibabel/tests/data/scaled+tlrc.HEAD +++ /dev/null @@ -1,116 +0,0 @@ - -type = string-attribute -name = TYPESTRING -count = 15 -'3DIM_HEAD_ANAT~ - -type = string-attribute -name = IDCODE_STRING -count = 27 -'AFN_vLKn9e5VumKelWXNeq4SWA~ - -type = string-attribute -name = IDCODE_DATE -count = 25 -'Tue Jan 23 20:05:10 2018~ - -type = integer-attribute -name = SCENE_DATA -count = 8 - 2 2 0 -999 -999 - -999 -999 -999 - -type = string-attribute -name = LABEL_1 -count = 5 -'zyxt~ - -type = string-attribute -name = LABEL_2 -count = 5 -'zyxt~ - -type = string-attribute -name = DATASET_NAME -count = 5 -'zyxt~ - -type = integer-attribute -name = ORIENT_SPECIFIC -count = 3 - 1 2 4 - -type = float-attribute -name = ORIGIN -count = 3 - 66 87 -54 - -type = float-attribute -name = DELTA -count = 3 - -3 -3 3 - -type = float-attribute -name = IJK_TO_DICOM -count = 12 - -3 0 0 66 0 - -3 0 87 0 0 - 3 -54 - -type = float-attribute -name = IJK_TO_DICOM_REAL -count = 12 - -3 0 0 66 0 - -3 0 87 0 0 - 3 -54 - -type = float-attribute -name = BRICK_STATS -count = 2 - 1.941682e-07 0.001272461 - -type = integer-attribute -name = DATASET_RANK -count = 8 - 3 1 0 0 0 - 0 0 0 - -type = integer-attribute -name = DATASET_DIMENSIONS -count = 5 - 47 54 43 0 0 - -type = integer-attribute -name = BRICK_TYPES -count = 1 - 1 - -type = float-attribute -name = BRICK_FLOAT_FACS -count = 1 - 3.883363e-08 - -type = string-attribute -name = BRICK_LABS -count = 3 -'#0~ - -type = string-attribute -name = BRICK_KEYWORDS -count = 1 -'~ - -type = string-attribute -name = TEMPLATE_SPACE -count = 5 -'TLRC~ - -type = integer-attribute -name = INT_CMAP -count = 1 - 0 - -type = string-attribute -name = BYTEORDER_STRING -count = 10 -'LSB_FIRST~ diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index fd6cfd5a44..537ed8f87d 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -20,7 +20,8 @@ import numpy as np -from ..arrayproxy import (ArrayProxy, is_proxy, reshape_dataobj) +from ..arrayproxy import (ArrayProxy, KEEP_FILE_OPEN_DEFAULT, is_proxy, + reshape_dataobj) from ..openers import ImageOpener from ..nifti1 import Nifti1Header @@ -341,20 +342,15 @@ def check_mmap(hdr, offset, proxy_class, # An image opener class which counts how many instances of itself have been # created class CountingImageOpener(ImageOpener): + num_openers = 0 + def __init__(self, *args, **kwargs): + super(CountingImageOpener, self).__init__(*args, **kwargs) CountingImageOpener.num_openers += 1 -def _count_ImageOpeners(proxy, data, voxels): - CountingImageOpener.num_openers = 0 - for i in range(voxels.shape[0]): - x, y, z = [int(c) for c in voxels[i, :]] - assert proxy[x, y, z] == x * 100 + y * 10 + z - return CountingImageOpener.num_openers - - def test_keep_file_open_true_false_invalid(): # Test the behaviour of the keep_file_open __init__ flag, when it is set to # True or False. @@ -369,15 +365,22 @@ def test_keep_file_open_true_false_invalid(): # Test that ArrayProxy(keep_file_open=True) only creates one file # handle, and that ArrayProxy(keep_file_open=False) creates a file # handle on every data access. - with mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): + with mock.patch('nibabel.arrayproxy.ImageOpener', CountingImageOpener): proxy_no_kfp = ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=False) assert not proxy_no_kfp._keep_file_open - assert _count_ImageOpeners(proxy_no_kfp, data, voxels) == 10 + for i in range(voxels.shape[0]): + x , y, z = [int(c) for c in voxels[i, :]] + assert proxy_no_kfp[x, y, z] == x * 100 + y * 10 + z + assert CountingImageOpener.num_openers == i + 1 + CountingImageOpener.num_openers = 0 proxy_kfp = ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=True) assert proxy_kfp._keep_file_open - assert _count_ImageOpeners(proxy_kfp, data, voxels) == 1 + for i in range(voxels.shape[0]): + x , y, z = [int(c) for c in voxels[i, :]] + assert proxy_kfp[x, y, z] == x * 100 + y * 10 + z + assert CountingImageOpener.num_openers == 1 del proxy_kfp del proxy_no_kfp # Test that the keep_file_open flag has no effect if an open file @@ -386,15 +389,20 @@ def test_keep_file_open_true_false_invalid(): for kfo in (True, False, 'auto'): proxy = ArrayProxy(fobj, ((10, 10, 10), dtype), keep_file_open=kfo) - assert proxy._keep_file_open is False + if kfo == 'auto': + kfo = False + assert proxy._keep_file_open is kfo for i in range(voxels.shape[0]): - x, y, z = [int(c) for c in voxels[i, :]] assert proxy[x, y, z] == x * 100 + y * 10 + z assert not fobj.closed del proxy assert not fobj.closed assert fobj.closed # Test invalid values of keep_file_open + with assert_raises(ValueError): + ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=0) + with assert_raises(ValueError): + ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=1) with assert_raises(ValueError): ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=55) with assert_raises(ValueError): @@ -403,99 +411,69 @@ def test_keep_file_open_true_false_invalid(): ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='cauto') +@contextlib.contextmanager +def patch_keep_file_open_default(value): + # Patch arrayproxy.KEEP_FILE_OPEN_DEFAULT with the given value + with mock.patch('nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT', value): + yield + + def test_keep_file_open_auto(): # Test the behaviour of the keep_file_open __init__ flag, when it is set to - # 'auto'. - # if indexed_gzip is present, the ArrayProxy should persist its ImageOpener. - # Otherwise the ArrayProxy should drop openers. + # 'auto' dtype = np.float32 - data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) - voxels = np.random.randint(0, 10, (10, 3)) + data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) with InTemporaryDirectory(): fname = 'testdata.gz' with gzip.open(fname, 'wb') as fobj: fobj.write(data.tostring(order='F')) - # If have_indexed_gzip, then the arrayproxy should create one - # ImageOpener - with patch_indexed_gzip(True), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): - CountingImageOpener.num_openers = 0 + # If have_indexed_gzip, then keep_file_open should be True + with patch_indexed_gzip(True): proxy = ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='auto') - assert proxy._keep_file_open == 'auto' - assert _count_ImageOpeners(proxy, data, voxels) == 1 + assert proxy._keep_file_open # If no have_indexed_gzip, then keep_file_open should be False - with patch_indexed_gzip(False), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): - CountingImageOpener.num_openers = 0 - proxy = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open='auto') - assert proxy._keep_file_open is False - assert _count_ImageOpeners(proxy, data, voxels) == 10 - # If not a gzip file, keep_file_open should be False - fname = 'testdata' - with open(fname, 'wb') as fobj: - fobj.write(data.tostring(order='F')) - # regardless of whether indexed_gzip is present or not - with patch_indexed_gzip(True), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): - CountingImageOpener.num_openers = 0 - proxy = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open='auto') - assert proxy._keep_file_open is False - assert _count_ImageOpeners(proxy, data, voxels) == 10 - with patch_indexed_gzip(False), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): - CountingImageOpener.num_openers = 0 + with patch_indexed_gzip(False): proxy = ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='auto') - assert proxy._keep_file_open is False - assert _count_ImageOpeners(proxy, data, voxels) == 10 - - -@contextlib.contextmanager -def patch_keep_file_open_default(value): - # Patch arrayproxy.KEEP_FILE_OPEN_DEFAULT with the given value - with mock.patch('nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT', value): - yield + assert not proxy._keep_file_open def test_keep_file_open_default(): # Test the behaviour of the keep_file_open __init__ flag, when the # arrayproxy.KEEP_FILE_OPEN_DEFAULT value is changed dtype = np.float32 - data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) + data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) with InTemporaryDirectory(): fname = 'testdata.gz' with gzip.open(fname, 'wb') as fobj: fobj.write(data.tostring(order='F')) - # If KEEP_FILE_OPEN_DEFAULT is False, ArrayProxy instances should - # interpret keep_file_open as False - with patch_keep_file_open_default(False): - with patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is False - with patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is False - # If KEEP_FILE_OPEN_DEFAULT is True, ArrayProxy instances should - # interpret keep_file_open as True - with patch_keep_file_open_default(True): - with patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is True - with patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is True - # If KEEP_FILE_OPEN_DEFAULT is auto, ArrayProxy instances should - # interpret it as auto if indexed_gzip is present, False otherwise. - with patch_keep_file_open_default('auto'): - with patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is False - with patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open == 'auto' + # The default value of KEEP_FILE_OPEN_DEFAULT should cause + # keep_file_open to be False, regardless of whether or not indexed_gzip + # is present + assert KEEP_FILE_OPEN_DEFAULT is False + with patch_indexed_gzip(False): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert not proxy._keep_file_open + with patch_indexed_gzip(True): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert not proxy._keep_file_open + # KEEP_FILE_OPEN_DEFAULT=True should cause keep_file_open to be True, + # regardless of whether or not indexed_gzip is present + with patch_keep_file_open_default(True), patch_indexed_gzip(True): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert proxy._keep_file_open + with patch_keep_file_open_default(True), patch_indexed_gzip(False): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert proxy._keep_file_open + # KEEP_FILE_OPEN_DEFAULT=auto should cause keep_file_open to be True + # or False, depending on whether indeed_gzip is present, + with patch_keep_file_open_default('auto'), patch_indexed_gzip(True): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert proxy._keep_file_open + with patch_keep_file_open_default('auto'), patch_indexed_gzip(False): + proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) + assert not proxy._keep_file_open # KEEP_FILE_OPEN_DEFAULT=any other value should cuse an error to be # raised with patch_keep_file_open_default('badvalue'): diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py deleted file mode 100644 index c1632c06c2..0000000000 --- a/nibabel/tests/test_brikhead.py +++ /dev/null @@ -1,150 +0,0 @@ -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from __future__ import division, print_function, absolute_import - -from os.path import join as pjoin - -import numpy as np - -from .. import load, Nifti1Image -from .. import brikhead - -from nose.tools import (assert_true, assert_equal, assert_raises) -from numpy.testing import assert_array_equal -from ..testing import data_path - -from .test_fileslice import slicer_samples -from .test_helpers import assert_data_similar - -EXAMPLE_IMAGES = [ - dict( - head=pjoin(data_path, 'example4d+orig.HEAD'), - fname=pjoin(data_path, 'example4d+orig.BRIK.gz'), - shape=(33, 41, 25, 3), - dtype=np.int16, - affine=np.array([[-3.0,0,0,49.5], - [0,-3.0,0,82.312], - [0,0,3.0,-52.3511], - [0,0,0,1.0]]), - zooms=(3., 3., 3., 3.), - data_summary=dict( - min=0, - max=13722, - mean=4266.76024636), - is_proxy=True, - space='ORIG', - labels=['#0', '#1', '#2'], - scaling=None), - dict( - head=pjoin(data_path, 'scaled+tlrc.HEAD'), - fname=pjoin(data_path, 'scaled+tlrc.BRIK'), - shape=(47, 54, 43, 1.), - dtype=np.int16, - affine=np.array([[3.0,0,0,-66.], - [0,3.0,0,-87.], - [0,0,3.0,-54.], - [0,0,0,1.0]]), - zooms=(3., 3., 3., 0.), - data_summary=dict( - min=1.9416814999999998e-07, - max=0.0012724615542099998, - mean=0.00023919645351876782), - is_proxy=True, - space='TLRC', - labels=['#0'], - scaling=np.array([ 3.88336300e-08]), - ) -] - -EXAMPLE_BAD_IMAGES = [ - dict( - head=pjoin(data_path, 'bad_datatype+orig.HEAD'), - err=brikhead.AFNIImageError - ), - dict( - head=pjoin(data_path, 'bad_attribute+orig.HEAD'), - err=brikhead.AFNIHeaderError - ) -] - -class TestAFNIHeader(object): - module = brikhead - test_files = EXAMPLE_IMAGES - - def test_makehead(self): - for tp in self.test_files: - head1 = self.module.AFNIHeader.from_fileobj(tp['head']) - head2 = self.module.AFNIHeader.from_header(head1) - assert_equal(head1, head2) - with assert_raises(self.module.AFNIHeaderError): - self.module.AFNIHeader.from_header(header=None) - with assert_raises(self.module.AFNIHeaderError): - self.module.AFNIHeader.from_header(tp['fname']) - - -class TestAFNIImage(object): - module = brikhead - test_files = EXAMPLE_IMAGES - - def test_brikheadfile(self): - for tp in self.test_files: - brik = self.module.load(tp['fname']) - assert_equal(brik.get_data_dtype().type, tp['dtype']) - assert_equal(brik.shape, tp['shape']) - assert_equal(brik.header.get_zooms(), tp['zooms']) - assert_array_equal(brik.affine, tp['affine']) - assert_equal(brik.header.get_space(), tp['space']) - data = brik.get_data() - assert_equal(data.shape, tp['shape']) - assert_array_equal(brik.dataobj.scaling, tp['scaling']) - assert_equal(brik.header.get_volume_labels(), tp['labels']) - - def test_load(self): - # Check highest level load of brikhead works - for tp in self.test_files: - img = self.module.load(tp['head']) - data = img.get_data() - assert_equal(data.shape, tp['shape']) - # min, max, mean values - assert_data_similar(data, tp) - # check if file can be converted to nifti - ni_img = Nifti1Image.from_image(img) - assert_array_equal(ni_img.affine, tp['affine']) - assert_array_equal(ni_img.get_data(), data) - - def test_array_proxy_slicing(self): - # Test slicing of array proxy - for tp in self.test_files: - img = self.module.load(tp['fname']) - arr = img.get_data() - prox = img.dataobj - assert_true(prox.is_proxy) - for sliceobj in slicer_samples(img.shape): - assert_array_equal(arr[sliceobj], prox[sliceobj]) - - -class TestBadFiles(object): - module = brikhead - test_files = EXAMPLE_BAD_IMAGES - - def test_brikheadfile(self): - for tp in self.test_files: - with assert_raises(tp['err']): - self.module.load(tp['head']) - - -class TestBadVars(object): - module = brikhead - vars = ['type = badtype-attribute\nname = BRICK_TYPES\ncount = 1\n1\n', - 'type = integer-attribute\ncount = 1\n1\n'] - - def test_unpack_var(self): - for var in self.vars: - with assert_raises(self.module.AFNIHeaderError): - self.module._unpack_var(var) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 01b9ff4fdb..8ee7c22cc7 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -39,7 +39,7 @@ Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, MGHImage, Minc1Image, Minc2Image, is_proxy) from ..spatialimages import SpatialImage -from .. import minc1, minc2, parrec, brikhead +from .. import minc1, minc2, parrec from nose import SkipTest from nose.tools import (assert_true, assert_false, assert_raises, assert_equal) @@ -54,7 +54,7 @@ from .test_minc1 import EXAMPLE_IMAGES as MINC1_EXAMPLE_IMAGES from .test_minc2 import EXAMPLE_IMAGES as MINC2_EXAMPLE_IMAGES from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLE_IMAGES -from .test_brikhead import EXAMPLE_IMAGES as AFNI_EXAMPLE_IMAGES + class GenericImageAPI(ValidateAPI): """ General image validation API """ @@ -596,9 +596,3 @@ class TestMGHAPI(ImageHeaderAPI): has_scaling = True can_save = True standard_extension = '.mgh' - - -class TestAFNIAPI(LoadImageAPI): - loader = brikhead.load - klass = image_maker = brikhead.AFNIImage - example_images = AFNI_EXAMPLE_IMAGES diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index ca1654bf9a..ebde721732 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -12,10 +12,9 @@ from gzip import GzipFile from bz2 import BZ2File from io import BytesIO, UnsupportedOperation -from distutils.version import StrictVersion from ..py3k import asstr, asbytes -from ..openers import Opener, ImageOpener, HAVE_INDEXED_GZIP +from ..openers import Opener, ImageOpener from ..tmpdirs import InTemporaryDirectory from ..volumeutils import BinOpener @@ -68,8 +67,6 @@ def test_Opener_various(): # Check we can do all sorts of files here message = b"Oh what a giveaway" bz2_fileno = hasattr(BZ2File, 'fileno') - if HAVE_INDEXED_GZIP: - import indexed_gzip as igzip with InTemporaryDirectory(): sobj = BytesIO() for input in ('test.txt', @@ -89,11 +86,6 @@ def test_Opener_various(): assert_raises(UnsupportedOperation, fobj.fileno) elif input.endswith('.bz2') and not bz2_fileno: assert_raises(AttributeError, fobj.fileno) - # indexed gzip is used by default, and drops file - # handles by default, so we don't have a fileno. - elif input.endswith('gz') and HAVE_INDEXED_GZIP and \ - StrictVersion(igzip.__version__) >= StrictVersion('0.7.0'): - assert_raises(igzip.NoHandleError, fobj.fileno) else: # Just check there is a fileno assert_not_equal(fobj.fileno(), 0) @@ -105,10 +97,9 @@ def test_BinOpener(): BinOpener, 'test.txt', 'r') -class MockIndexedGzipFile(GzipFile): +class MockIndexedGzipFile(object): def __init__(self, *args, **kwargs): - kwargs.pop('drop_handles', False) - super(MockIndexedGzipFile, self).__init__(*args, **kwargs) + pass @contextlib.contextmanager @@ -116,11 +107,12 @@ def patch_indexed_gzip(state): # Make it look like we do (state==True) or do not (state==False) have # the indexed gzip module. if state: - values = (True, MockIndexedGzipFile) + values = (True, True, MockIndexedGzipFile) else: - values = (False, GzipFile) + values = (False, False, GzipFile) with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), \ - mock.patch('nibabel.openers.IndexedGzipFile', values[1], + mock.patch('nibabel.arrayproxy.HAVE_INDEXED_GZIP', values[1]), \ + mock.patch('nibabel.openers.SafeIndexedGzipFile', values[2], create=True): yield @@ -140,18 +132,14 @@ def test_Opener_gzip_type(): # Each test is specified by a tuple containing: # (indexed_gzip present, Opener kwargs, expected file type) tests = [ - (False, {'mode' : 'rb', 'keep_open' : True}, GzipFile), - (False, {'mode' : 'rb', 'keep_open' : False}, GzipFile), - (False, {'mode' : 'rb', 'keep_open' : 'auto'}, GzipFile), - (False, {'mode' : 'wb', 'keep_open' : True}, GzipFile), - (False, {'mode' : 'wb', 'keep_open' : False}, GzipFile), - (False, {'mode' : 'wb', 'keep_open' : 'auto'}, GzipFile), - (True, {'mode' : 'rb', 'keep_open' : True}, MockIndexedGzipFile), - (True, {'mode' : 'rb', 'keep_open' : False}, MockIndexedGzipFile), - (True, {'mode' : 'rb', 'keep_open' : 'auto'}, MockIndexedGzipFile), - (True, {'mode' : 'wb', 'keep_open' : True}, GzipFile), - (True, {'mode' : 'wb', 'keep_open' : False}, GzipFile), - (True, {'mode' : 'wb', 'keep_open' : 'auto'}, GzipFile), + (False, {'mode' : 'rb', 'keep_open' : True}, GzipFile), + (False, {'mode' : 'rb', 'keep_open' : False}, GzipFile), + (False, {'mode' : 'wb', 'keep_open' : True}, GzipFile), + (False, {'mode' : 'wb', 'keep_open' : False}, GzipFile), + (True, {'mode' : 'rb', 'keep_open' : True}, MockIndexedGzipFile), + (True, {'mode' : 'rb', 'keep_open' : False}, GzipFile), + (True, {'mode' : 'wb', 'keep_open' : True}, GzipFile), + (True, {'mode' : 'wb', 'keep_open' : False}, GzipFile), ] for test in tests: @@ -268,10 +256,11 @@ class StrictOpener(Opener): assert_true(isinstance(fobj.fobj, file_class)) elif lext == 'gz': try: - from ..openers import IndexedGzipFile + from indexed_gzip import SafeIndexedGzipFile except ImportError: - IndexedGzipFile = GzipFile - assert_true(isinstance(fobj.fobj, (GzipFile, IndexedGzipFile))) + SafeIndexedGzipFile = GzipFile + assert_true(isinstance(fobj.fobj, (GzipFile, + SafeIndexedGzipFile))) else: assert_true(isinstance(fobj.fobj, BZ2File)) diff --git a/nibabel/cmdline/tests/test_parrec2nii.py b/nibabel/tests/test_parrec2nii.py similarity index 91% rename from nibabel/cmdline/tests/test_parrec2nii.py rename to nibabel/tests/test_parrec2nii.py index c5b5831270..aa018b24d0 100644 --- a/nibabel/cmdline/tests/test_parrec2nii.py +++ b/nibabel/tests/test_parrec2nii.py @@ -6,7 +6,7 @@ from numpy import array as npa import nibabel -from nibabel.cmdline import parrec2nii +from nibabel import parrec2nii_cmd as parrec2nii from mock import Mock, MagicMock, patch from nose.tools import assert_true @@ -29,10 +29,10 @@ [ 0. , 0. , 0. , 1. ]]) -@patch('nibabel.cmdline.parrec2nii.verbose') -@patch('nibabel.cmdline.parrec2nii.io_orientation') -@patch('nibabel.cmdline.parrec2nii.nifti1') -@patch('nibabel.cmdline.parrec2nii.pr') +@patch('nibabel.parrec2nii_cmd.verbose') +@patch('nibabel.parrec2nii_cmd.io_orientation') +@patch('nibabel.parrec2nii_cmd.nifti1') +@patch('nibabel.parrec2nii_cmd.pr') def test_parrec2nii_sets_qform_sform_code1(*args): # Check that set_sform(), set_qform() are called on the new header. parrec2nii.verbose.switch = False @@ -67,7 +67,7 @@ def test_parrec2nii_sets_qform_sform_code1(*args): nhdr.set_sform.assert_called_with(AN_OLD_AFFINE, code=1) -@patch('nibabel.cmdline.parrec2nii.verbose') +@patch('nibabel.parrec2nii_cmd.verbose') def test_parrec2nii_save_load_qform_code(*args): # Tests that after parrec2nii saves file, it has the sform and qform 'code' # set to '1', which means 'scanner', so that other software, e.g. FSL picks diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 9756a16747..941e2271b0 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -8,7 +8,6 @@ import sys import os -import shutil from os.path import (dirname, join as pjoin, abspath, splitext, basename, exists) import csv @@ -16,7 +15,6 @@ import numpy as np -import nibabel as nib from ..tmpdirs import InTemporaryDirectory from ..loadsave import load from ..orientations import flip_axis, aff2axcodes, inv_ornt_aff @@ -24,7 +22,7 @@ from nose.tools import assert_true, assert_false, assert_equal from nose import SkipTest -from numpy.testing import assert_almost_equal, assert_array_equal +from numpy.testing import assert_almost_equal from .scriptrunner import ScriptRunner from .nibabel_data import needs_nibabel_data @@ -130,25 +128,6 @@ def test_nib_ls_multiple(): ) -@script_test -def test_help(): - for cmd in ['parrec2nii', 'nib-dicomfs', 'nib-ls', 'nib-nifti-dx']: - if cmd == 'nib-dicomfs': - # needs special treatment since depends on fuse module which - # might not be available. - try: - import fuse - except Exception: - continue # do not test this one - code, stdout, stderr = run_command([cmd, '--help']) - assert_equal(code, 0) - assert_re_in(".*%s" % cmd, stdout) - assert_re_in(".*Usage", stdout) - # Some third party modules might like to announce some Deprecation - # etc warnings, see e.g. https://travis-ci.org/nipy/nibabel/jobs/370353602 - if 'warning' not in stderr.lower(): - assert_equal(stderr, '') - @script_test def test_nib_nifti_dx(): @@ -378,92 +357,3 @@ def test_parrec2nii_with_data(): assert_equal(sorted(csv_keys), ['diffusion b value number', 'gradient orientation number']) assert_equal(nlines, 8) # 8 volumes present in DTI.PAR - - -@script_test -def test_nib_trk2tck(): - simple_trk = pjoin(DATA_PATH, "simple.trk") - standard_trk = pjoin(DATA_PATH, "standard.trk") - - with InTemporaryDirectory() as tmpdir: - # Copy input files to convert. - shutil.copy(simple_trk, tmpdir) - shutil.copy(standard_trk, tmpdir) - simple_trk = pjoin(tmpdir, "simple.trk") - standard_trk = pjoin(tmpdir, "standard.trk") - simple_tck = pjoin(tmpdir, "simple.tck") - standard_tck = pjoin(tmpdir, "standard.tck") - - # Convert one file. - cmd = ["nib-trk2tck", simple_trk] - code, stdout, stderr = run_command(cmd) - assert_equal(len(stdout), 0) - assert_true(os.path.isfile(simple_tck)) - trk = nib.streamlines.load(simple_trk) - tck = nib.streamlines.load(simple_tck) - assert_array_equal(tck.streamlines.data, trk.streamlines.data) - assert_true(isinstance(tck, nib.streamlines.TckFile)) - - # Skip non TRK files. - cmd = ["nib-trk2tck", simple_tck] - code, stdout, stderr = run_command(cmd) - assert_true("Skipping non TRK file" in stdout) - - # By default, refuse to overwrite existing output files. - cmd = ["nib-trk2tck", simple_trk] - code, stdout, stderr = run_command(cmd) - assert_true("Skipping existing file" in stdout) - - # Convert multiple files and with --force. - cmd = ["nib-trk2tck", "--force", simple_trk, standard_trk] - code, stdout, stderr = run_command(cmd) - assert_equal(len(stdout), 0) - trk = nib.streamlines.load(standard_trk) - tck = nib.streamlines.load(standard_tck) - assert_array_equal(tck.streamlines.data, trk.streamlines.data) - - -@script_test -def test_nib_tck2trk(): - anat = pjoin(DATA_PATH, "standard.nii.gz") - standard_tck = pjoin(DATA_PATH, "standard.tck") - - with InTemporaryDirectory() as tmpdir: - # Copy input file to convert. - shutil.copy(standard_tck, tmpdir) - standard_trk = pjoin(tmpdir, "standard.trk") - standard_tck = pjoin(tmpdir, "standard.tck") - - # Anatomical image not found as first argument. - cmd = ["nib-tck2trk", standard_tck, anat] - code, stdout, stderr = run_command(cmd, check_code=False) - assert_equal(code, 2) # Parser error. - assert_true("Expecting anatomical image as first agument" in stderr) - - # Convert one file. - cmd = ["nib-tck2trk", anat, standard_tck] - code, stdout, stderr = run_command(cmd) - assert_equal(len(stdout), 0) - assert_true(os.path.isfile(standard_trk)) - tck = nib.streamlines.load(standard_tck) - trk = nib.streamlines.load(standard_trk) - assert_array_equal(trk.streamlines.data, tck.streamlines.data) - assert_true(isinstance(trk, nib.streamlines.TrkFile)) - - # Skip non TCK files. - cmd = ["nib-tck2trk", anat, standard_trk] - code, stdout, stderr = run_command(cmd) - assert_true("Skipping non TCK file" in stdout) - - # By default, refuse to overwrite existing output files. - cmd = ["nib-tck2trk", anat, standard_tck] - code, stdout, stderr = run_command(cmd) - assert_true("Skipping existing file" in stdout) - - # Convert multiple files and with --force. - cmd = ["nib-tck2trk", "--force", anat, standard_tck, standard_tck] - code, stdout, stderr = run_command(cmd) - assert_equal(len(stdout), 0) - tck = nib.streamlines.load(standard_tck) - trk = nib.streamlines.load(standard_trk) - assert_array_equal(tck.streamlines.data, trk.streamlines.data) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index b0f571023d..bd8b834b84 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -17,7 +17,6 @@ from io import BytesIO from ..spatialimages import (SpatialHeader, SpatialImage, HeaderDataError, Header, ImageDataError) -from ..imageclasses import spatial_axes_first from unittest import TestCase from nose.tools import (assert_true, assert_false, assert_equal, @@ -386,10 +385,9 @@ def test_get_data(self): img[0, 0, 0] # Make sure the right message gets raised: assert_equal(str(exception_manager.exception), - "Cannot slice image objects; consider using " - "`img.slicer[slice]` to generate a sliced image (see " - "documentation for caveats) or slicing image array data " - "with `img.dataobj[slice]` or `img.get_data()[slice]`") + ("Cannot slice image objects; consider slicing image " + "array data with `img.dataobj[slice]` or " + "`img.get_data()[slice]`")) assert_true(in_data is img.dataobj) out_data = img.get_data() assert_true(in_data is out_data) @@ -413,132 +411,6 @@ def test_get_data(self): assert_false(rt_img.get_data() is out_data) assert_array_equal(rt_img.get_data(), in_data) - def test_slicer(self): - img_klass = self.image_class - in_data_template = np.arange(240, dtype=np.int16) - base_affine = np.eye(4) - t_axis = None - for dshape in ((4, 5, 6, 2), # Time series - (8, 5, 6)): # Volume - in_data = in_data_template.copy().reshape(dshape) - img = img_klass(in_data, base_affine.copy()) - - if not spatial_axes_first(img): - with assert_raises(ValueError): - img.slicer - continue - - assert_true(hasattr(img.slicer, '__getitem__')) - - # Note spatial zooms are always first 3, even when - spatial_zooms = img.header.get_zooms()[:3] - - # Down-sample with [::2, ::2, ::2] along spatial dimensions - sliceobj = [slice(None, None, 2)] * 3 + \ - [slice(None)] * (len(dshape) - 3) - downsampled_img = img.slicer[tuple(sliceobj)] - assert_array_equal(downsampled_img.header.get_zooms()[:3], - np.array(spatial_zooms) * 2) - - max4d = (hasattr(img.header, '_structarr') and - 'dims' in img.header._structarr.dtype.fields and - img.header._structarr['dims'].shape == (4,)) - # Check newaxis and single-slice errors - with assert_raises(IndexError): - img.slicer[None] - with assert_raises(IndexError): - img.slicer[0] - # Axes 1 and 2 are always spatial - with assert_raises(IndexError): - img.slicer[:, None] - with assert_raises(IndexError): - img.slicer[:, 0] - with assert_raises(IndexError): - img.slicer[:, :, None] - with assert_raises(IndexError): - img.slicer[:, :, 0] - if len(img.shape) == 4: - if max4d: - with assert_raises(ValueError): - img.slicer[:, :, :, None] - else: - # Reorder non-spatial axes - assert_equal(img.slicer[:, :, :, None].shape, - img.shape[:3] + (1,) + img.shape[3:]) - # 4D to 3D using ellipsis or slices - assert_equal(img.slicer[..., 0].shape, img.shape[:-1]) - assert_equal(img.slicer[:, :, :, 0].shape, img.shape[:-1]) - else: - # 3D Analyze/NIfTI/MGH to 4D - assert_equal(img.slicer[:, :, :, None].shape, img.shape + (1,)) - if len(img.shape) == 3: - # Slices exceed dimensions - with assert_raises(IndexError): - img.slicer[:, :, :, :, None] - elif max4d: - with assert_raises(ValueError): - img.slicer[:, :, :, :, None] - else: - assert_equal(img.slicer[:, :, :, :, None].shape, - img.shape + (1,)) - - # Crop by one voxel in each dimension - sliced_i = img.slicer[1:] - sliced_j = img.slicer[:, 1:] - sliced_k = img.slicer[:, :, 1:] - sliced_ijk = img.slicer[1:, 1:, 1:] - - # No scaling change - assert_array_equal(sliced_i.affine[:3, :3], img.affine[:3, :3]) - assert_array_equal(sliced_j.affine[:3, :3], img.affine[:3, :3]) - assert_array_equal(sliced_k.affine[:3, :3], img.affine[:3, :3]) - assert_array_equal(sliced_ijk.affine[:3, :3], img.affine[:3, :3]) - # Translation - assert_array_equal(sliced_i.affine[:, 3], [1, 0, 0, 1]) - assert_array_equal(sliced_j.affine[:, 3], [0, 1, 0, 1]) - assert_array_equal(sliced_k.affine[:, 3], [0, 0, 1, 1]) - assert_array_equal(sliced_ijk.affine[:, 3], [1, 1, 1, 1]) - - # No change to affines with upper-bound slices - assert_array_equal(img.slicer[:1, :1, :1].affine, img.affine) - - # Yell about step = 0 - with assert_raises(ValueError): - img.slicer[:, ::0] - with assert_raises(ValueError): - img.slicer.slice_affine((slice(None), slice(None, None, 0))) - - # Don't permit zero-length slices - with assert_raises(IndexError): - img.slicer[:0] - - # No fancy indexing - with assert_raises(IndexError): - img.slicer[[0]] - with assert_raises(IndexError): - img.slicer[[-1]] - with assert_raises(IndexError): - img.slicer[[0], [-1]] - - # Check data is consistent with slicing numpy arrays - slice_elems = (None, Ellipsis, 0, 1, -1, [0], [1], [-1], - slice(None), slice(1), slice(-1), slice(1, -1)) - for n_elems in range(6): - for _ in range(1 if n_elems == 0 else 10): - sliceobj = tuple( - np.random.choice(slice_elems, n_elems).tolist()) - try: - sliced_img = img.slicer[sliceobj] - except (IndexError, ValueError): - # Only checking valid slices - pass - else: - sliced_data = in_data[sliceobj] - assert_array_equal(sliced_data, sliced_img.get_data()) - assert_array_equal(sliced_data, sliced_img.dataobj) - assert_array_equal(sliced_data, img.dataobj[sliceobj]) - assert_array_equal(sliced_data, img.get_data()[sliceobj]) - def test_api_deprecations(self): class FakeImage(self.image_class): diff --git a/setup.py b/setup.py index b0f5bc093c..009969a3c5 100755 --- a/setup.py +++ b/setup.py @@ -117,8 +117,6 @@ def main(**extra_args): pjoin('bin', 'nib-ls'), pjoin('bin', 'nib-dicomfs'), pjoin('bin', 'nib-nifti-dx'), - pjoin('bin', 'nib-tck2trk'), - pjoin('bin', 'nib-trk2tck'), ], cmdclass = cmdclass, **extra_args From 9a0b521a5c16d02a0ca7971cfdaecaa07631963e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 06:53:53 -0700 Subject: [PATCH 07/26] MNT: Empty commit to trigger AppVeyor From 4952599ef595a8c27c3d823d68a03ef5da15211a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 06:54:18 -0700 Subject: [PATCH 08/26] MNT: Empty commit to trigger AppVeyor From 6234346b8f380385f3a62a4f8e2c4936196c4069 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 06:54:27 -0700 Subject: [PATCH 09/26] MNT: Empty commit to trigger AppVeyor From fc5d448a2d67f5b24dd3c12780cea8773a31bc1c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 12:35:18 -0700 Subject: [PATCH 10/26] MNT: Empty commit to trigger AppVeyor From 718da1550aa7071475ab605ea4db5b663da852b9 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 12:35:25 -0700 Subject: [PATCH 11/26] MNT: Empty commit to trigger AppVeyor From 0a429a2ef083b0859879e392f3f3866515251de0 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 12:35:32 -0700 Subject: [PATCH 12/26] MNT: Empty commit to trigger AppVeyor From 32a827ad0007b0e1294988e86a378b1f047d4113 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 12:35:38 -0700 Subject: [PATCH 13/26] MNT: Empty commit to trigger AppVeyor From c0e479a5460809fee81187f466604e7315574557 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 12:43:33 -0700 Subject: [PATCH 14/26] MNT: Empty commit to trigger AppVeyor From 7b49c04912a6407ad4023bc15f8a887c6c0fd5ab Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 14:01:13 -0700 Subject: [PATCH 15/26] MNT: Empty commit to trigger AppVeyor From 17e4c1636f403712cc27bb17607dbfb158c75cbb Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 15 Oct 2018 14:01:33 -0700 Subject: [PATCH 16/26] MNT: Empty commit to trigger AppVeyor From f93de2a4ea3a90ddd7b7782d7086c93ad073ca59 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 16 Oct 2018 15:18:07 -0700 Subject: [PATCH 17/26] MNT: Empty commit to trigger AppVeyor From b380db0b3a16819c3d067b7e6c28b94847fae761 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 16 Oct 2018 15:18:15 -0700 Subject: [PATCH 18/26] MNT: Empty commit to trigger AppVeyor From 549352c06fcbd1f7f01f5a7576bd0edadd5f8dab Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 16 Oct 2018 15:18:21 -0700 Subject: [PATCH 19/26] MNT: Empty commit to trigger AppVeyor From bf5377ac6372db50f10438ebade2f1ff5f2e8229 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 16 Oct 2018 15:18:27 -0700 Subject: [PATCH 20/26] MNT: Empty commit to trigger AppVeyor From 5bee815406d1e385377e63f21a2aeb42244876c6 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 16 Oct 2018 15:18:33 -0700 Subject: [PATCH 21/26] MNT: Empty commit to trigger AppVeyor From 3f8170a1d94b17d8feee48b641726ab366f523f2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 16 Oct 2018 15:18:40 -0700 Subject: [PATCH 22/26] MNT: Empty commit to trigger AppVeyor From 82c9257b3bccc54e95cd846b29b9315db7577ab7 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 17 Oct 2018 07:39:24 -0700 Subject: [PATCH 23/26] UPDATE: Restore 1e9c4d4edb16f76a0a12b37993f1e179250fc2aa for testing --- .gitignore | 1 + .mailmap | 1 + .travis.yml | 10 +- COPYING | 52 +- Changelog | 99 ++- appveyor.yml | 5 +- bin/nib-dicomfs | 210 +----- bin/nib-diff | 17 + bin/nib-nifti-dx | 26 +- bin/nib-tck2trk | 18 + bin/nib-trk2tck | 18 + bin/parrec2nii | 2 +- doc/source/conf.py | 2 +- doc/source/coordinate_systems.rst | 6 +- doc/source/devel/modified_images.rst | 6 +- doc/source/gettingstarted.rst | 2 +- doc/source/images_and_memory.rst | 62 +- doc/source/index.rst | 7 + doc/source/neuro_radio_conventions.rst | 2 +- doc/source/nibabel_images.rst | 40 +- doc/source/old/examples.txt | 14 +- doc/source/old/orientation.txt | 2 +- doc/source/scripts/make_coord_examples.py | 6 +- nibabel/arrayproxy.py | 126 +++- nibabel/benchmarks/bench_array_to_file.py | 1 + .../benchmarks/bench_arrayproxy_slicing.py | 18 +- nibabel/brikhead.py | 627 ++++++++++++++++++ nibabel/casting.py | 3 + nibabel/cifti2/parse_cifti2.py | 8 +- nibabel/cmdline/dicomfs.py | 241 +++++++ nibabel/cmdline/diff.py | 365 ++++++++++ nibabel/cmdline/ls.py | 7 +- nibabel/cmdline/nifti_dx.py | 38 ++ .../parrec2nii.py} | 0 nibabel/cmdline/tck2trk.py | 56 ++ nibabel/cmdline/tests/__init__.py | 0 .../{ => cmdline}/tests/test_parrec2nii.py | 12 +- nibabel/cmdline/tests/test_utils.py | 170 ++++- nibabel/cmdline/trk2tck.py | 39 ++ nibabel/dataobj_images.py | 10 +- nibabel/deprecated.py | 4 +- nibabel/ecat.py | 4 + nibabel/externals/oset.py | 85 +++ nibabel/freesurfer/io.py | 44 +- nibabel/freesurfer/mghformat.py | 11 +- nibabel/freesurfer/tests/test_io.py | 10 +- nibabel/freesurfer/tests/test_mghformat.py | 2 + nibabel/imageclasses.py | 13 +- nibabel/info.py | 6 +- nibabel/minc1.py | 4 + nibabel/nifti1.py | 13 +- nibabel/openers.py | 24 +- nibabel/parrec.py | 4 + nibabel/spatialimages.py | 127 +++- nibabel/tests/data/bad_attribute+orig.HEAD | 133 ++++ nibabel/tests/data/bad_datatype+orig.HEAD | 133 ++++ nibabel/tests/data/example4d+orig.HEAD | 133 ++++ nibabel/tests/data/scaled+tlrc.HEAD | 116 ++++ nibabel/tests/test_arrayproxy.py | 244 +++---- nibabel/tests/test_brikhead.py | 150 +++++ nibabel/tests/test_diff.py | 74 +++ nibabel/tests/test_floating.py | 51 +- nibabel/tests/test_image_api.py | 388 ++++++----- nibabel/tests/test_nifti1.py | 23 +- nibabel/tests/test_openers.py | 49 +- nibabel/tests/test_proxy_api.py | 8 + nibabel/tests/test_scripts.py | 151 ++++- nibabel/tests/test_spatialimages.py | 134 +++- nibabel/tests/test_testing.py | 7 +- nibabel/volumeutils.py | 14 +- setup.py | 3 + 71 files changed, 3702 insertions(+), 789 deletions(-) create mode 100755 bin/nib-diff create mode 100644 bin/nib-tck2trk create mode 100644 bin/nib-trk2tck create mode 100644 nibabel/brikhead.py create mode 100644 nibabel/cmdline/dicomfs.py create mode 100755 nibabel/cmdline/diff.py create mode 100644 nibabel/cmdline/nifti_dx.py rename nibabel/{parrec2nii_cmd.py => cmdline/parrec2nii.py} (100%) create mode 100644 nibabel/cmdline/tck2trk.py create mode 100644 nibabel/cmdline/tests/__init__.py rename nibabel/{ => cmdline}/tests/test_parrec2nii.py (91%) create mode 100644 nibabel/cmdline/trk2tck.py create mode 100644 nibabel/externals/oset.py create mode 100644 nibabel/tests/data/bad_attribute+orig.HEAD create mode 100644 nibabel/tests/data/bad_datatype+orig.HEAD create mode 100644 nibabel/tests/data/example4d+orig.HEAD create mode 100644 nibabel/tests/data/scaled+tlrc.HEAD create mode 100644 nibabel/tests/test_brikhead.py create mode 100644 nibabel/tests/test_diff.py diff --git a/.gitignore b/.gitignore index d6996550dc..df018f0ead 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ .project .pydevproject *.py.orig +.DS_Store # Not sure what the next one is for *.kpf diff --git a/.mailmap b/.mailmap index 79ce4939e6..4205e5e22e 100644 --- a/.mailmap +++ b/.mailmap @@ -29,6 +29,7 @@ Basile Pinsard bpinsard Nguyen, Ly lxn2 Ben Cipollini Ben Cipollini Chris Markiewicz Christopher J. Markiewicz +Chris Markiewicz Chris Markiewicz Chris Markiewicz Christopher J. Markiewicz Chris Markiewicz Christopher J. Markiewicz Chris Markiewicz Chris Johnson diff --git a/.travis.yml b/.travis.yml index 28ac4fa5f4..b6e69d09ba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,11 +4,10 @@ # for it to be on multiple physical lines, so long as you remember: - There # can't be any leading "-"s - All newlines will be removed, so use ";"s +dist: xenial +sudo: true language: python -# Run jobs on container-based infrastructure, can be overridden per job -sudo: false - cache: directories: - $HOME/.cache/pip @@ -22,11 +21,14 @@ env: - EXTRA_PIP_FLAGS="--find-links=$EXTRA_WHEELS" - PRE_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" python: - - 3.4 - 3.5 - 3.6 + - 3.7 matrix: include: + - python: 3.4 + dist: trusty + sudo: false - python: 2.7 env: - COVERAGE=1 diff --git a/COPYING b/COPYING index 5827950a17..6f03ba5ccd 100644 --- a/COPYING +++ b/COPYING @@ -121,36 +121,40 @@ Sphinx 0.6 doesn't work properly. OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Ordereddict +OrderedSet ----------- -In ``nibabel/externals/ordereddict.py`` +In ``nibabel/externals/oset.py`` -Copied from: https://pypi.python.org/packages/source/o/ordereddict/ordereddict-1.1.tar.gz#md5=a0ed854ee442051b249bfad0f638bbec +Copied from: https://files.pythonhosted.org/packages/d6/b1/a49498c699a3fda5d635cc1fa222ffc686ea3b5d04b84a3166c4cab0c57b/oset-0.1.3.tar.gz :: - Copyright (c) 2009 Raymond Hettinger - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation files - (the "Software"), to deal in the Software without restriction, - including without limitation the rights to use, copy, modify, merge, - publish, distribute, sublicense, and/or sell copies of the Software, - and to permit persons to whom the Software is furnished to do so, - subject to the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. + Copyright (c) 2009, Raymond Hettinger, and others All rights reserved. + + Package structured based on the one developed to odict Copyright (c) 2010, BlueDynamics Alliance, Austria + + - Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + + - Neither the name of the BlueDynamics Alliance nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY BlueDynamics Alliance AS IS AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + SHALL BlueDynamics Alliance BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + OF SUCH DAMAGE. mni_icbm152_t1_tal_nlin_asym_09a -------------------------------- diff --git a/Changelog b/Changelog index 4c65cf0bfe..d3913a7718 100644 --- a/Changelog +++ b/Changelog @@ -20,10 +20,107 @@ Nibabel releases Most work on NiBabel so far has been by Matthew Brett (MB), Michael Hanke (MH) Ben Cipollini (BC), Marc-Alexandre Côté (MC), Chris Markiewicz (CM), Stephan -Gerhard (SG) and Eric Larson (EL). +Gerhard (SG), Eric Larson (EL), Yaroslav Halchenko (YOH) and Chris Cheng (CC). References like "pr/298" refer to github pull request numbers. +2.3.1 (Tuesday 16 October 2018) +=============================== + +New features +------------ +* ``nib-diff`` command line tool for comparing image files (pr/617, pr/672, + pr/678) (CC, reviewed by YOH, Pradeep Raamana and CM) + +Enhancements +------------ +* Speed up reading of numeric arrays in CIFTI2 (pr/655) (Michiel Cottaar, + reviewed by CM) +* Add ``ndim`` property to ``ArrayProxy`` and ``DataobjImage`` (pr/674) (CM, + reviewed by MB) + +Bug fixes +--------- +* Deterministic deduction of slice ordering in degenerate cases (pr/647) + (YOH, reviewed by CM) +* Allow 0ms TR in MGH files (pr/653) (EL, reviewed by CM) +* Allow for PPC64 little-endian long doubles (pr/658) (MB, reviewed by CM) +* Correct construction of FreeSurfer annotation labels (pr/666) (CM, reviewed + by EL, Paul D. McCarthy) +* Fix logic for persisting filehandles with indexed-gzip (pr/679) (Paul D. + McCarthy, reviewed by CM) + +Maintenance +----------- +* Fix semantic error in coordinate systems documentation (pr/646) (Ariel + Rokem, reviewed by CM, MB) +* Test on Python 3.7, minor associated fixes (pr/651) (CM, reviewed by Gregory + R. Lee, MB) + +2.3 (Tuesday 12 June 2018) +========================== + +New features +------------ +* TRK <=> TCK streamlines conversion CLI tools (pr/606) (MC, reviewed by CM) +* Image slicing for SpatialImages (pr/550) (CM) + +Enhancements +------------ +* Simplfiy MGHImage and add footer fields (pr/569) (CM, reviewed by MB) +* Force sform/qform codes to be ints, rather than numpy types (pr/575) (Paul + McCarthy, reviewed by MB, CM) +* Auto-fill color table in FreeSurfer annotation file (pr/592) (Paul McCarthy, + reviewed by CM, MB) +* Set default intent code for CIFTI2 images (pr/604) (Mathias Goncalves, + reviewed by CM, Satra Ghosh, MB, Tim Coalson) +* Raise informative error on empty files (pr/611) (Pradeep Raamana, reviewed + by CM, MB) +* Accept degenerate filenames such as ``.nii`` (pr/621) (Dimitri + Papadopoulos-Orfanos, reviewed by Yaroslav Halchenko) +* Take advantage of ``IndexedGzipFile`` ``drop_handles`` flag to release + filehandles by default (pr/614) (Paul McCarthy, reviewed by CM, MB) + +Bug fixes +--------- +* Preserve first point of `LazyTractogram` (pr/588) (MC, reviewed by Nil + Goyette, CM, MB) +* Stop adding extraneous metadata padding (pr/593) (Jon Stutters, reviewed by + CM, MB) +* Accept lower-case orientation codes in TRK files (pr/600) (Kesshi Jordan, + MB, reviewed by MB, MC, CM) +* Annotation file reading (pr/592) (Paul McCarthy, reviewed by CM, MB) +* Fix buffer size calculation in ArraySequence (pr/597) (Serge Koudoro, + reviewed by MC, MB, Eleftherios Garyfallidis, CM) +* Resolve ``UnboundLocalError`` in Python 3 (pr/607) (Jakub Kaczmarzyk, + reviewed by MB, CM) +* Do not crash on non-``ImportError`` failures in optional imports (pr/618) + (Yaroslav Halchenko, reviewed by CM) +* Return original array from ``get_fdata`` for array image, if no cast + required (pr/638, MB, reviewed by CM) + +Maintenance +----------- +* Use SSH address to use key-based auth (pr/587) (CM, reviewed by MB) +* Fix doctests for numpy 1.14 array printing (pr/591) (MB, reviewed by CM) +* Refactor for pydicom 1.0 API changes (pr/599) (MB, reviewed by CM) +* Increase test coverage, remove unreachable code (pr/602) (CM, reviewed by + Yaroslav Halchenko, MB) +* Move ``nib-ls`` and other programs to a new cmdline module (pr/601, pr/615) + (Chris Cheng, reviewed by MB, Yaroslav Halchenko) +* Remove deprecated numpy indexing (EL, reviewed by CM) +* Update documentation to encourage ``get_fdata`` over ``get_data`` (pr/637, + MB, reviewed by CM) + +API changes and deprecations +---------------------------- +* Support for ``keep_file_open = 'auto'`` as a parameter to ``Opener()`` will + be deprecated in 2.4, for removal in 3.0. Accordingly, support for + ``openers.KEEP_FILE_OPEN_DEFAULT = 'auto'`` will be dropped on the same + schedule. +* Drop-in support for ``indexed_gzip < 0.7`` has been removed. + + 2.2.1 (Wednesday 22 November 2017) ================================== diff --git a/appveyor.yml b/appveyor.yml index e41aee90c8..772bfa142d 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,6 +12,8 @@ environment: - PYTHON: C:\Python35-x64 - PYTHON: C:\Python36 - PYTHON: C:\Python36-x64 + - PYTHON: C:\Python37 + - PYTHON: C:\Python37-x64 install: # Prepend newly installed Python to the PATH of this build (this cannot be @@ -20,8 +22,7 @@ install: - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% # Install the dependencies of the project. - - pip install numpy scipy matplotlib nose h5py mock - - pip install pydicom + - pip install numpy scipy matplotlib nose h5py mock pydicom - pip install . - SET NIBABEL_DATA_DIR=%CD%\nibabel-data diff --git a/bin/nib-dicomfs b/bin/nib-dicomfs index 115fd4e486..05b6a50afc 100755 --- a/bin/nib-dicomfs +++ b/bin/nib-dicomfs @@ -9,213 +9,7 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Copyright (C) 2011 Christian Haselgrove -import sys -import os -import stat -import errno -import time -import locale -import logging -import fuse -import nibabel as nib -import nibabel.dft as dft - -from optparse import OptionParser, Option - -uid = os.getuid() -gid = os.getgid() -encoding = locale.getdefaultlocale()[1] - -fuse.fuse_python_api = (0, 2) - -logger = logging.getLogger('nibabel.dft') - -class FileHandle: - - def __init__(self, fno): - self.fno = fno - self.keep_cache = False - self.direct_io = False - return - - def __str__(self): - return 'FileHandle(%d)' % self.fno - -class DICOMFS(fuse.Fuse): - - def __init__(self, *args, **kwargs): - self.followlinks = kwargs.pop('followlinks', False) - fuse.Fuse.__init__(self, *args, **kwargs) - self.fhs = {} - return - - def get_paths(self): - paths = {} - for study in dft.get_studies(self.dicom_path, self.followlinks): - pd = paths.setdefault(study.patient_name_or_uid(), {}) - patient_info = 'patient information\n' - patient_info = 'name: %s\n' % study.patient_name - patient_info += 'ID: %s\n' % study.patient_id - patient_info += 'birth date: %s\n' % study.patient_birth_date - patient_info += 'sex: %s\n' % study.patient_sex - pd['INFO'] = patient_info.encode('ascii', 'replace') - study_datetime = '%s_%s' % (study.date, study.time) - study_info = 'study info\n' - study_info += 'UID: %s\n' % study.uid - study_info += 'date: %s\n' % study.date - study_info += 'time: %s\n' % study.time - study_info += 'comments: %s\n' % study.comments - d = {'INFO': study_info.encode('ascii', 'replace')} - for series in study.series: - series_info = 'series info\n' - series_info += 'UID: %s\n' % series.uid - series_info += 'number: %s\n' % series.number - series_info += 'description: %s\n' % series.description - series_info += 'rows: %d\n' % series.rows - series_info += 'columns: %d\n' % series.columns - series_info += 'bits allocated: %d\n' % series.bits_allocated - series_info += 'bits stored: %d\n' % series.bits_stored - series_info += 'storage instances: %d\n' % len(series.storage_instances) - d[series.number] = {'INFO': series_info.encode('ascii', 'replace'), - '%s.nii' % series.number: (series.nifti_size, series.as_nifti), - '%s.png' % series.number: (series.png_size, series.as_png)} - pd[study_datetime] = d - return paths - - def match_path(self, path): - wd = self.get_paths() - if path == '/': - logger.debug('return root') - return wd - for part in path.lstrip('/').split('/'): - logger.debug("path:%s part:%s" % (path, part)) - if part not in wd: - return None - wd = wd[part] - logger.debug('return') - return wd - - def readdir(self, path, fh): - logger.info('readdir %s' % (path,)) - matched_path = self.match_path(path) - if matched_path is None: - return -errno.ENOENT - logger.debug('matched %s' % (matched_path,)) - fnames = [ k.encode('ascii', 'replace') for k in matched_path.keys() ] - fnames.append('.') - fnames.append('..') - return [ fuse.Direntry(f) for f in fnames ] - - def getattr(self, path): - logger.debug('getattr %s' % path) - matched_path = self.match_path(path) - logger.debug('matched: %s' % (matched_path,)) - now = time.time() - st = fuse.Stat() - if isinstance(matched_path, dict): - st.st_mode = stat.S_IFDIR | 0755 - st.st_ctime = now - st.st_mtime = now - st.st_atime = now - st.st_uid = uid - st.st_gid = gid - st.st_nlink = len(matched_path) - return st - if isinstance(matched_path, str): - st.st_mode = stat.S_IFREG | 0644 - st.st_ctime = now - st.st_mtime = now - st.st_atime = now - st.st_uid = uid - st.st_gid = gid - st.st_size = len(matched_path) - st.st_nlink = 1 - return st - if isinstance(matched_path, tuple): - st.st_mode = stat.S_IFREG | 0644 - st.st_ctime = now - st.st_mtime = now - st.st_atime = now - st.st_uid = uid - st.st_gid = gid - st.st_size = matched_path[0]() - st.st_nlink = 1 - return st - return -errno.ENOENT - - def open(self, path, flags): - logger.debug('open %s' % (path,)) - matched_path = self.match_path(path) - if matched_path is None: - return -errno.ENOENT - for i in range(1, 10): - if i not in self.fhs: - if isinstance(matched_path, str): - self.fhs[i] = matched_path - elif isinstance(matched_path, tuple): - self.fhs[i] = matched_path[1]() - else: - raise -errno.EFTYPE - return FileHandle(i) - raise -errno.ENFILE - - # not done - def read(self, path, size, offset, fh): - logger.debug('read') - logger.debug(path) - logger.debug(size) - logger.debug(offset) - logger.debug(fh) - return self.fhs[fh.fno][offset:offset+size] - - def release(self, path, flags, fh): - logger.debug('release') - logger.debug(path) - logger.debug(fh) - del self.fhs[fh.fno] - return - -def get_opt_parser(): - # use module docstring for help output - p = OptionParser( - usage="%s [OPTIONS] " - % os.path.basename(sys.argv[0]), - version="%prog " + nib.__version__) - - p.add_options([ - Option("-v", "--verbose", action="count", - dest="verbose", default=0, - help="make noise. Could be specified multiple times"), - ]) - - p.add_options([ - Option("-L", "--follow-links", action="store_true", - dest="followlinks", default=False, - help="Follow symbolic links in DICOM directory"), - ]) - return p +from nibabel.cmdline.dicomfs import main if __name__ == '__main__': - parser = get_opt_parser() - (opts, files) = parser.parse_args() - - if opts.verbose: - logger.addHandler(logging.StreamHandler(sys.stdout)) - logger.setLevel(opts.verbose > 1 and logging.DEBUG or logging.INFO) - - if len(files) != 2: - sys.stderr.write("Please provide two arguments:\n%s\n" % parser.usage) - sys.exit(1) - - fs = DICOMFS(dash_s_do='setsingle', followlinks=opts.followlinks) - fs.parse(['-f', '-s', files[1]]) - fs.dicom_path = files[0].decode(encoding) - try: - fs.main() - except fuse.FuseError: - # fuse prints the error message - sys.exit(1) - - sys.exit(0) - -# eof + main() \ No newline at end of file diff --git a/bin/nib-diff b/bin/nib-diff new file mode 100755 index 0000000000..2ae66dda9d --- /dev/null +++ b/bin/nib-diff @@ -0,0 +1,17 @@ +#!python +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +""" +Quick diff summary for a set of neuroimaging files +""" + +from nibabel.cmdline.diff import main + +if __name__ == '__main__': + main() diff --git a/bin/nib-nifti-dx b/bin/nib-nifti-dx index 40122acd16..d317585286 100755 --- a/bin/nib-nifti-dx +++ b/bin/nib-nifti-dx @@ -8,32 +8,8 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Print nifti diagnostics for header files ''' -from __future__ import division, print_function, absolute_import - -import sys - -from optparse import OptionParser - -import nibabel as nib - - -def main(): - """ Go go team """ - parser = OptionParser( - usage="%s [FILE ...]\n\n" % sys.argv[0] + __doc__, - version="%prog " + nib.__version__) - (opts, files) = parser.parse_args() - - for fname in files: - with nib.openers.ImageOpener(fname) as fobj: - hdr = fobj.read(nib.nifti1.header_dtype.itemsize) - result = nib.Nifti1Header.diagnose_binaryblock(hdr) - if len(result): - print('Picky header check output for "%s"\n' % fname) - print(result + '\n') - else: - print('Header for "%s" is clean' % fname) +from nibabel.cmdline.nifti_dx import main if __name__ == '__main__': main() diff --git a/bin/nib-tck2trk b/bin/nib-tck2trk new file mode 100644 index 0000000000..896e67a5d1 --- /dev/null +++ b/bin/nib-tck2trk @@ -0,0 +1,18 @@ +#!python +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +""" +Convert tractograms (TCK -> TRK). +""" + +from nibabel.cmdline.tck2trk import main + + +if __name__ == '__main__': + main() diff --git a/bin/nib-trk2tck b/bin/nib-trk2tck new file mode 100644 index 0000000000..85509e7447 --- /dev/null +++ b/bin/nib-trk2tck @@ -0,0 +1,18 @@ +#!python +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +""" +Convert tractograms (TRK -> TCK). +""" + +from nibabel.cmdline.trk2tck import main + + +if __name__ == '__main__': + main() diff --git a/bin/parrec2nii b/bin/parrec2nii index 4856af9986..27a1abca05 100755 --- a/bin/parrec2nii +++ b/bin/parrec2nii @@ -2,7 +2,7 @@ """PAR/REC to NIfTI converter """ -from nibabel.parrec2nii_cmd import main +from nibabel.cmdline.parrec2nii import main if __name__ == '__main__': diff --git a/doc/source/conf.py b/doc/source/conf.py index cb4bb8cb49..ac95cc1dd9 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -87,7 +87,7 @@ # General information about the project. project = u'NiBabel' -copyright = u'2006-2017, %(MAINTAINER)s <%(AUTHOR_EMAIL)s>' % rel +copyright = u'2006-2018, %(MAINTAINER)s <%(AUTHOR_EMAIL)s>' % rel # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/source/coordinate_systems.rst b/doc/source/coordinate_systems.rst index a45488012d..ffb24a2e78 100644 --- a/doc/source/coordinate_systems.rst +++ b/doc/source/coordinate_systems.rst @@ -34,7 +34,7 @@ We can load up the EPI image to get the image data array: >>> import nibabel as nib >>> epi_img = nib.load('downloads/someones_epi.nii.gz') - >>> epi_img_data = epi_img.get_data() + >>> epi_img_data = epi_img.get_fdata() >>> epi_img_data.shape (53, 61, 33) @@ -64,7 +64,7 @@ and look at slices in the three axes: :context: >>> anat_img = nib.load('downloads/someones_anatomy.nii.gz') - >>> anat_img_data = anat_img.get_data() + >>> anat_img_data = anat_img.get_fdata() >>> anat_img_data.shape (57, 67, 56) >>> show_slices([anat_img_data[28, :, :], @@ -255,7 +255,7 @@ axes *starts* on the right, anterior, superior of the subject, rather than *ending* on the right, anterior, superior. In other words, they would use "RAS" to refer to a coordinate system we would call "LPI". To be safe, we'll call our interpretation of the RAS convention "RAS+", meaning that Right, -Anterior, Posterior are all positive values on these axes. +Anterior, Superior are all positive values on these axes. Some people also use "right" to mean the right hand side when an observer looks at the front of the scanner, from the foot the scanner bed. diff --git a/doc/source/devel/modified_images.rst b/doc/source/devel/modified_images.rst index 6230f5bb11..5b6e203a42 100644 --- a/doc/source/devel/modified_images.rst +++ b/doc/source/devel/modified_images.rst @@ -77,10 +77,10 @@ flag when anyone asks for the data, on the basis that the user may then do something to the data and you can't know if they have:: img = nibabel.load('some_image.nii') - data = img.get_data() + data = img.get_fdata() data[:] = 0 img2 = nibabel.load('some_image.nii') - assert not np.all(img2.get_data() == img.get_data()) + assert not np.all(img2.get_fdata() == img.get_fdata()) The image consists of the data, the affine and a header. In order to keep track of the header and affine, we could cache them when loading @@ -96,7 +96,7 @@ When we need to know whether the image object and image file correspond, we could check the current header and current affine (the header may be separate from the affine for an SPM Analyze image) against their cached copies, if they are the same and the 'dirty' flag has not been set by a previous call to -``get_data()``, we know that the image file does correspond to the image +``get_fdata()``, we know that the image file does correspond to the image object. This may be OK for small bits of memory like the affine and the header, diff --git a/doc/source/gettingstarted.rst b/doc/source/gettingstarted.rst index 9502c09d7c..3e328a5209 100644 --- a/doc/source/gettingstarted.rst +++ b/doc/source/gettingstarted.rst @@ -66,7 +66,7 @@ This information is available without the need to load anything of the main image data into the memory. Of course there is also access to the image data as a NumPy_ array ->>> data = img.get_data() +>>> data = img.get_fdata() >>> data.shape (128, 96, 24, 2) >>> type(data) diff --git a/doc/source/images_and_memory.rst b/doc/source/images_and_memory.rst index 02688156e0..2ff0de14c5 100644 --- a/doc/source/images_and_memory.rst +++ b/doc/source/images_and_memory.rst @@ -19,17 +19,17 @@ disk. Nibabel does not load the image array from the proxy when you ``load`` the image. It waits until you ask for the array data. The standard way to ask -for the array data is to call the ``get_data()`` method: +for the array data is to call the ``get_fdata()`` method: ->>> data = img.get_data() +>>> data = img.get_fdata() >>> data.shape (128, 96, 24, 2) -We also saw in :ref:`proxies-caching` that this call to ``get_data()`` will +We also saw in :ref:`proxies-caching` that this call to ``get_fdata()`` will (by default) load the array data into an internal image cache. The image -returns the cached copy on the next call to ``get_data()``: +returns the cached copy on the next call to ``get_fdata()``: ->>> data_again = img.get_data() +>>> data_again = img.get_fdata() >>> data is data_again True @@ -64,7 +64,7 @@ in cache, and True when it is in cache: >>> img = nib.load(example_file) >>> img.in_memory False ->>> data = img.get_data() +>>> data = img.get_fdata() >>> img.in_memory True @@ -73,10 +73,10 @@ True Using ``uncache`` ***************** -As y'all know, the proxy image has the array in cache, ``get_data()`` returns +As y'all know, the proxy image has the array in cache, ``get_fdata()`` returns the cached array: ->>> data_again = img.get_data() +>>> data_again = img.get_fdata() >>> data_again is data # same array returned from cache True @@ -85,34 +85,34 @@ You can uncache a proxy image with the ``uncache()`` method: >>> img.uncache() >>> img.in_memory False ->>> data_once_more = img.get_data() +>>> data_once_more = img.get_fdata() >>> data_once_more is data # a new copy read from disk False ``uncache()`` has no effect if the image is an array image, or if the cache is already empty. -You need to be careful when you modify arrays returned by ``get_data()`` on +You need to be careful when you modify arrays returned by ``get_fdata()`` on proxy images, because ``uncache`` will then change the result you get back -from ``get_data()``: +from ``get_fdata()``: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_data() # array cached and returned +>>> data = proxy_img.get_fdata() # array cached and returned >>> data[0, 0, 0, 0] -0 +0.0 >>> data[0, 0, 0, 0] = 99 # modify returned array ->>> data_again = proxy_img.get_data() # return cached array +>>> data_again = proxy_img.get_fdata() # return cached array >>> data_again[0, 0, 0, 0] # cached array modified -99 +99.0 So far the proxy image behaves the same as an array image. ``uncache()`` has no effect on an array image, but it does have an effect on the returned array of a proxy image: >>> proxy_img.uncache() # cached array discarded from proxy image ->>> data_once_more = proxy_img.get_data() # new copy of array loaded +>>> data_once_more = proxy_img.get_fdata() # new copy of array loaded >>> data_once_more[0, 0, 0, 0] # array modifications discarded -0 +0.0 ************* Saving memory @@ -126,8 +126,8 @@ use the ``uncache()`` method: >>> img.uncache() -Use the array proxy instead of ``get_data()`` -============================================= +Use the array proxy instead of ``get_fdata()`` +============================================== The ``dataobj`` property of a proxy image is an array proxy. We can ask the proxy to return the array directly by passing ``dataobj`` to the numpy @@ -145,25 +145,25 @@ This also works for array images, because ``np.asarray`` returns the array: >>> type(data_array) <... 'numpy.ndarray'> -If you want to avoid caching you can avoid ``get_data()`` and always use +If you want to avoid caching you can avoid ``get_fdata()`` and always use ``np.asarray(img.dataobj)``. -Use the ``caching`` keyword to ``get_data()`` -============================================= +Use the ``caching`` keyword to ``get_fdata()`` +============================================== -The default behavior of the ``get_data()`` function is to always fill the +The default behavior of the ``get_fdata()`` function is to always fill the cache, if it is empty. This corresponds to the default ``'fill'`` value to the ``caching`` keyword. So, this: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_data() # default caching='fill' +>>> data = proxy_img.get_fdata() # default caching='fill' >>> proxy_img.in_memory True is the same as this: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_data(caching='fill') +>>> data = proxy_img.get_fdata(caching='fill') >>> proxy_img.in_memory True @@ -171,21 +171,21 @@ Sometimes you may want to avoid filling the cache, if it is empty. In this case, you can use ``caching='unchanged'``: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_data(caching='unchanged') +>>> data = proxy_img.get_fdata(caching='unchanged') >>> proxy_img.in_memory False ``caching='unchanged'`` will leave the cache full if it is already full. ->>> data = proxy_img.get_data(caching='fill') +>>> data = proxy_img.get_fdata(caching='fill') >>> proxy_img.in_memory True ->>> data = proxy_img.get_data(caching='unchanged') +>>> data = proxy_img.get_fdata(caching='unchanged') >>> proxy_img.in_memory True -See the :meth:`get_data() docstring -` for more detail. +See the :meth:`get_fdata() docstring +` for more detail. ********************** Saving time and memory @@ -202,7 +202,7 @@ For example, let us say you only wanted the second volume from the example dataset. You could do this: >>> proxy_img = nib.load(example_file) ->>> data = proxy_img.get_data() +>>> data = proxy_img.get_fdata() >>> data.shape (128, 96, 24, 2) >>> vol1 = data[..., 1] diff --git a/doc/source/index.rst b/doc/source/index.rst index f623c931d2..b4f2ebd596 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -80,6 +80,13 @@ contributed code and discussion (in rough order of appearance): * Mark Hymers * Jasper J.F. van den Bosch * Bennet Fauber +* Kesshi Jordan +* Jon Stutters +* Serge Koudoro +* Christopher P. Cheng +* Mathias Goncalves +* Jakub Kaczmarzyk +* Dimitri Papadopoulos Orfanos License reprise =============== diff --git a/doc/source/neuro_radio_conventions.rst b/doc/source/neuro_radio_conventions.rst index f88c31ddf8..a9a51ab2c2 100644 --- a/doc/source/neuro_radio_conventions.rst +++ b/doc/source/neuro_radio_conventions.rst @@ -101,7 +101,7 @@ showing the middle slice of :download:`an image [ 0. , 2.75, 0. , -91. ], [ 0. , 0. , 2.75, -91. ], [ 0. , 0. , 0. , 1. ]]) - >>> img_data = img.get_data() + >>> img_data = img.get_fdata() >>> a_slice = img_data[:, :, 28] >>> # Need transpose to put first axis left-right, second bottom-top >>> plt.imshow(a_slice.T, cmap="gray", origin="lower") # doctest: +SKIP diff --git a/doc/source/nibabel_images.rst b/doc/source/nibabel_images.rst index ffdef7fbdd..f14debcc93 100644 --- a/doc/source/nibabel_images.rst +++ b/doc/source/nibabel_images.rst @@ -220,21 +220,39 @@ False Getting the image data the easy way =================================== -For either type of image (array or proxy) you can always get the data with -the :meth:`get_data() ` method. +For either type of image (array or proxy) you can always get the data with the +:meth:`get_fdata() ` method. -For the array image, ``get_data()`` just returns the data array: +For the array image, ``get_fdata()`` just returns the data array, if it's already the required floating point type (default 64-bit float). If it isn't that type, ``get_fdata()`` casts it to one: ->>> image_data = array_img.get_data() +>>> image_data = array_img.get_fdata() >>> image_data.shape (2, 3, 4) ->>> image_data is array_data +>>> image_data.dtype == np.dtype(np.float64) True -For the proxy image, the ``get_data()`` method fetches the array data from +The cast to floating point means the array is not the one attached to the image: + +>>> image_data is array_img.dataobj +False + +Here's an image backed by a floating point array: + +>>> farray_img = nib.Nifti1Image(image_data.astype(np.float64), affine) +>>> farray_data = farray_img.get_fdata() +>>> farray_data.dtype == np.dtype(np.float64) +True + +There was no cast, so the array returned is exactly the array attached to the +image: + +>>> farray_data is farray_img.dataobj +True + +For the proxy image, the ``get_fdata()`` method fetches the array data from disk using the proxy, and returns the array. ->>> image_data = img.get_data() +>>> image_data = img.get_fdata() >>> image_data.shape (128, 96, 24, 2) @@ -249,12 +267,12 @@ Proxies and caching =================== You may not want to keep loading the image data off disk every time -you call ``get_data()`` on a proxy image. By default, when you call -``get_data()`` the first time on a proxy image, the image object keeps a -cached copy of the loaded array. The next time you call ``img.get_data()``, +you call ``get_fdata()`` on a proxy image. By default, when you call +``get_fdata()`` the first time on a proxy image, the image object keeps a +cached copy of the loaded array. The next time you call ``img.get_fdata()``, the image returns the array from cache rather than loading it from disk again. ->>> data_again = img.get_data() +>>> data_again = img.get_fdata() The returned data is the same (cached) copy we returned before: diff --git a/doc/source/old/examples.txt b/doc/source/old/examples.txt index b84f5441bf..19a44d9cb0 100644 --- a/doc/source/old/examples.txt +++ b/doc/source/old/examples.txt @@ -108,7 +108,7 @@ previously created in a separate file. First, we open the file: Now we select the first ten volumes and store them to another file, while preserving as much header information as possible - >>> nim2 = nib.Nifti1Image(nim.get_data()[..., :10], + >>> nim2 = nib.Nifti1Image(nim.get_fdata()[..., :10], ... nim.get_affine(), ... nim.header) >>> print nim2.header['dim'] @@ -127,7 +127,7 @@ Linear detrending of timeseries (SciPy module is required for this example) =========================================================================== Let's load another 4d NIfTI file and perform a linear detrending, by fitting -a straight line to the timeseries of each voxel and substract that fit from +a straight line to the timeseries of each voxel and subtract that fit from the data. Although this might sound complicated at first, thanks to the excellent SciPy module it is just a few lines of code. For this example we will first create a NIfTI image with just a single voxel and 50 timepoints @@ -139,15 +139,11 @@ will first create a NIfTI image with just a single voxel and 50 timepoints >>> print nim.header['dim'] [ 4 1 1 1 50 1 1 1] -Depending on the datatype of the input image the detrending process might -change the datatype from integer to float. As operations that change the -(binary) size of the NIfTI image are not supported, we need to make a copy -of the data and later create a new NIfTI image. Remember that the array has the -time axis as its first dimension (in contrast to the NIfTI file where it is -the 4th). +Remember that the array has the time axis as its first dimension (in contrast +to the NIfTI file where it is the 4th). >>> from scipy import signal - >>> data_detrended = signal.detrend(nim.get_data(), axis=0) + >>> data_detrended = signal.detrend(nim.get_fdata(), axis=0) Finally, create a new NIfTI image using header information from the original source image. diff --git a/doc/source/old/orientation.txt b/doc/source/old/orientation.txt index 4efbe73db1..ef231f7e95 100644 --- a/doc/source/old/orientation.txt +++ b/doc/source/old/orientation.txt @@ -85,7 +85,7 @@ the affine after loading, as in:: img = nibabel.load('some_image.img') aff = img.get_affine() x_flipper = np.diag([-1,1,1,1]) - lr_img = nibabel.Nifti1Image(img.get_data, np.dot(x_flipper, aff), img.header) + lr_img = nibabel.Nifti1Image(img.get_fdata(), np.dot(x_flipper, aff), img.header) Affines for Analyze, SPM analyze, and NIFTI ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/scripts/make_coord_examples.py b/doc/source/scripts/make_coord_examples.py index 790c8b7671..9079cea141 100644 --- a/doc/source/scripts/make_coord_examples.py +++ b/doc/source/scripts/make_coord_examples.py @@ -50,7 +50,7 @@ t1_img, t2_img = imgs # Make fake localizer -data = t1_img.get_data() +data = t1_img.get_fdata() n_x, n_y, n_z = img.shape mid_x = round(n_x / 2) @@ -171,7 +171,7 @@ def vx2mm(pts): # resample, preserving affine epi_cmap = nca.vox2mni(epi_vox2mm) epi = rsm.resample(t2_img, epi_cmap, np.eye(4), epi_vox_shape) -epi_data = epi.get_data() +epi_data = epi.get_fdata() # Do the same kind of thing for the anatomical scan anat_vox_sizes = [2.75, 2.75, 2.75] anat_scale = npl.inv(np.diag(anat_vox_sizes + [1])) @@ -183,7 +183,7 @@ def vx2mm(pts): [data.shape[0], anat_x_len, anat_y_len], anat_vox_sizes)) anat_cmap = nca.vox2mni(anat_vox2mm) anat = rsm.resample(t1_img, anat_cmap, np.eye(4), anat_vox_shape) -anat_data = anat.get_data() +anat_data = anat.get_fdata() save_plot() nipy.save_image(epi, 'someones_epi.nii.gz', dtype_from='uint8') diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index e95f519e02..c74386b0ac 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -34,7 +34,7 @@ from .volumeutils import array_from_file, apply_read_scaling from .fileslice import fileslice from .keywordonly import kw_only_meth -from .openers import ImageOpener, HAVE_INDEXED_GZIP +from . import openers """This flag controls whether a new file handle is created every time an image @@ -42,15 +42,22 @@ used for the lifetime of the ``ArrayProxy``. It should be set to one of ``True``, ``False``, or ``'auto'``. -If ``True``, a single file handle is created and used. If ``False``, a new -file handle is created every time the image is accessed. If ``'auto'``, and -the optional ``indexed_gzip`` dependency is present, a single file handle is -created and persisted. If ``indexed_gzip`` is not available, behaviour is the -same as if ``keep_file_open is False``. +Management of file handles will be performed either by ``ArrayProxy`` objects, +or by the ``indexed_gzip`` package if it is used. + +If this flag is set to ``True``, a single file handle is created and used. If +``False``, a new file handle is created every time the image is accessed. For +gzip files, if ``'auto'``, and the optional ``indexed_gzip`` dependency is +present, a single file handle is created and persisted. If ``indexed_gzip`` is +not available, behaviour is the same as if ``keep_file_open is False``. If this is set to any other value, attempts to create an ``ArrayProxy`` without specifying the ``keep_file_open`` flag will result in a ``ValueError`` being raised. + +.. warning:: Setting this flag to a value of ``'auto'`` will become deprecated + behaviour in version 2.4.0. Support for ``'auto'`` will be removed + in version 3.0.0. """ KEEP_FILE_OPEN_DEFAULT = False @@ -156,8 +163,10 @@ def __init__(self, file_like, spec, mmap=True, keep_file_open=None): # Permit any specifier that can be interpreted as a numpy dtype self._dtype = np.dtype(self._dtype) self._mmap = mmap - self._keep_file_open = self._should_keep_file_open(file_like, - keep_file_open) + # Flags to keep track of whether a single ImageOpener is created, and + # whether a single underlying file handle is created. + self._keep_file_open, self._persist_opener = \ + self._should_keep_file_open(file_like, keep_file_open) self._lock = RLock() def __del__(self): @@ -180,16 +189,64 @@ def __setstate__(self, state): self._lock = RLock() def _should_keep_file_open(self, file_like, keep_file_open): - """Called by ``__init__``, and used to determine the final value of - ``keep_file_open``. + """Called by ``__init__``. + + This method determines how to manage ``ImageOpener`` instances, + and the underlying file handles - the behaviour depends on: + + - whether ``file_like`` is an an open file handle, or a path to a + ``'.gz'`` file, or a path to a non-gzip file. + - whether ``indexed_gzip`` is present (see + :attr:`.openers.HAVE_INDEXED_GZIP`). + + An ``ArrayProxy`` object uses two internal flags to manage + ``ImageOpener`` instances and underlying file handles. + + - The ``_persist_opener`` flag controls whether a single + ``ImageOpener`` should be created and used for the lifetime of + this ``ArrayProxy``, or whether separate ``ImageOpener`` instances + should be created on each file access. + + - The ``_keep_file_open`` flag controls qwhether the underlying file + handle should be kept open for the lifetime of this + ``ArrayProxy``, or whether the file handle should be (re-)opened + and closed on each file access. + + The internal ``_keep_file_open`` flag is only relevant if + ``file_like`` is a ``'.gz'`` file, and the ``indexed_gzip`` library is + present. + + This method returns the values to be used for the internal + ``_persist_opener`` and ``_keep_file_open`` flags; these values are + derived according to the following rules: - The return value is derived from these rules: + 1. If ``file_like`` is a file(-like) object, both flags are set to + ``False``. - - If ``file_like`` is a file(-like) object, ``False`` is returned. - Otherwise, ``file_like`` is assumed to be a file name. - - if ``file_like`` ends with ``'gz'``, and the ``indexed_gzip`` - library is available, ``True`` is returned. - - Otherwise, ``False`` is returned. + 2. If ``keep_file_open`` (as passed to :meth:``__init__``) is + ``True``, both internal flags are set to ``True``. + + 3. If ``keep_file_open`` is ``False``, but ``file_like`` is not a path + to a ``.gz`` file or ``indexed_gzip`` is not present, both flags + are set to ``False``. + + 4. If ``keep_file_open`` is ``False``, ``file_like`` is a path to a + ``.gz`` file, and ``indexed_gzip`` is present, ``_persist_opener`` + is set to ``True``, and ``_keep_file_open`` is set to ``False``. + In this case, file handle management is delegated to the + ``indexed_gzip`` library. + + 5. If ``keep_file_open`` is ``'auto'``, ``file_like`` is a path to a + ``.gz`` file, and ``indexed_gzip`` is present, both internal flags + are set to ``True``. + + 6. If ``keep_file_open`` is ``'auto'``, and ``file_like`` is not a + path to a ``.gz`` file, or ``indexed_gzip`` is not present, both + internal flags are set to ``False``. + + Note that a value of ``'auto'`` for ``keep_file_open`` will become + deprecated behaviour in version 2.4.0, and support for ``'auto'`` will + be removed in version 3.0.0. Parameters ---------- @@ -202,24 +259,27 @@ def _should_keep_file_open(self, file_like, keep_file_open): Returns ------- - The value of ``keep_file_open`` that will be used by this - ``ArrayProxy``. + A tuple containing: + - ``keep_file_open`` flag to control persistence of file handles + - ``persist_opener`` flag to control persistence of ``ImageOpener`` + objects. """ if keep_file_open is None: keep_file_open = KEEP_FILE_OPEN_DEFAULT - # if keep_file_open is True/False, we do what the user wants us to do - if isinstance(keep_file_open, bool): - return keep_file_open - if keep_file_open != 'auto': + if keep_file_open not in ('auto', True, False): raise ValueError('keep_file_open should be one of {None, ' '\'auto\', True, False}') - # file_like is a handle - keep_file_open is irrelevant if hasattr(file_like, 'read') and hasattr(file_like, 'seek'): - return False - # Otherwise, if file_like is gzipped, and we have_indexed_gzip, we set - # keep_file_open to True, else we set it to False - return HAVE_INDEXED_GZIP and file_like.endswith('gz') + return False, False + # if the file is a gzip file, and we have_indexed_gzip, + have_igzip = openers.HAVE_INDEXED_GZIP and file_like.endswith('.gz') + if keep_file_open == 'auto': + return have_igzip, have_igzip + elif keep_file_open: + return True, True + else: + return False, have_igzip @property @deprecate_with_version('ArrayProxy.header deprecated', '2.2', '3.0') @@ -230,6 +290,10 @@ def header(self): def shape(self): return self._shape + @property + def ndim(self): + return len(self.shape) + @property def dtype(self): return self._dtype @@ -263,12 +327,14 @@ def _get_fileobj(self): A newly created ``ImageOpener`` instance, or an existing one, which provides access to the file. """ - if self._keep_file_open: + if self._persist_opener: if not hasattr(self, '_opener'): - self._opener = ImageOpener(self.file_like, keep_open=True) + self._opener = openers.ImageOpener( + self.file_like, keep_open=self._keep_file_open) yield self._opener else: - with ImageOpener(self.file_like, keep_open=False) as opener: + with openers.ImageOpener( + self.file_like, keep_open=False) as opener: yield opener def get_unscaled(self): diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index f55b8a2583..36921a106a 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -16,6 +16,7 @@ from __future__ import division, print_function import sys +from io import BytesIO # NOQA import numpy as np diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 321a0779d5..c880aa0700 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -15,7 +15,6 @@ """ from timeit import timeit -import contextlib import gc import itertools as it import numpy as np @@ -51,7 +50,7 @@ ('?', '?', '?', ':'), ] -KEEP_OPENS = [False, True] +KEEP_OPENS = [False, True, 'auto'] if HAVE_INDEXED_GZIP: HAVE_IGZIP = [False, True] @@ -59,16 +58,6 @@ HAVE_IGZIP = [False] -@contextlib.contextmanager -def patch_indexed_gzip(have_igzip): - - atts = ['nibabel.openers.HAVE_INDEXED_GZIP', - 'nibabel.arrayproxy.HAVE_INDEXED_GZIP'] - - with mock.patch(atts[0], have_igzip), mock.patch(atts[1], have_igzip): - yield - - def bench_arrayproxy_slicing(): print_git_title('\nArrayProxy gzip slicing') @@ -154,14 +143,15 @@ def fmt_sliceobj(sliceobj): # load uncompressed and compressed versions of the image img = nib.load(testfile, keep_file_open=keep_open) - with patch_indexed_gzip(have_igzip): + with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', have_igzip): imggz = nib.load(testfilegz, keep_file_open=keep_open) def basefunc(): img.dataobj[fix_sliceobj(sliceobj)] def testfunc(): - with patch_indexed_gzip(have_igzip): + with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', + have_igzip): imggz.dataobj[fix_sliceobj(sliceobj)] # make sure nothing is floating around from the previous test diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py new file mode 100644 index 0000000000..9e521e61b6 --- /dev/null +++ b/nibabel/brikhead.py @@ -0,0 +1,627 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +""" +Class for reading AFNI BRIK/HEAD datasets + +See https://afni.nimh.nih.gov/pub/dist/doc/program_help/README.attributes.html +for information on what is required to have a valid BRIK/HEAD dataset. + +Unless otherwise noted, descriptions AFNI attributes in the code refer to this +document. + +Notes +----- + +In the AFNI HEAD file, the first two values of the attribute DATASET_RANK +determine the shape of the data array stored in the corresponding BRIK file. +The first value, DATASET_RANK[0], must be set to 3 denoting a 3D image. The +second value, DATASET_RANK[1], determines how many "sub-bricks" (in AFNI +parlance) / volumes there are along the fourth (traditionally, but not +exclusively) time axis. Thus, DATASET_RANK[1] will (at least as far as I (RM) +am aware) always be >= 1. This permits sub-brick indexing common in AFNI +programs (e.g., example4d+orig'[0]'). +""" +from __future__ import print_function, division + +from copy import deepcopy +import os +import re + +import numpy as np +from six import string_types + +from .arrayproxy import ArrayProxy +from .fileslice import strided_scalar +from .keywordonly import kw_only_meth +from .spatialimages import ( + SpatialImage, + SpatialHeader, + HeaderDataError, + ImageDataError +) +from .volumeutils import Recoder + +# used for doc-tests +filepath = os.path.dirname(os.path.realpath(__file__)) +datadir = os.path.realpath(os.path.join(filepath, 'tests/data')) + +_attr_dic = { + 'string': str, + 'integer': int, + 'float': float +} + +_endian_dict = { + 'LSB_FIRST': '<', + 'MSB_FIRST': '>', +} + +_dtype_dict = { + 0: 'B', + 1: 'h', + 3: 'f', + 5: 'D', +} + +space_codes = Recoder(( + (0, 'unknown', ''), + (1, 'scanner', 'ORIG'), + (3, 'talairach', 'TLRC'), + (4, 'mni', 'MNI')), fields=('code', 'label', 'space')) + + +class AFNIImageError(ImageDataError): + """Error when reading AFNI BRIK files""" + + +class AFNIHeaderError(HeaderDataError): + """Error when reading AFNI HEAD file""" + + +DATA_OFFSET = 0 +TYPE_RE = re.compile('type\s*=\s*(string|integer|float)-attribute\s*\n') +NAME_RE = re.compile('name\s*=\s*(\w+)\s*\n') + + +def _unpack_var(var): + """ + Parses key : value pair from `var` + + Parameters + ---------- + var : str + Entry from HEAD file + + Returns + ------- + name : str + Name of attribute + value : object + Value of attribute + + Examples + -------- + >>> var = "type = integer-attribute\\nname = BRICK_TYPES\\ncount = 1\\n1\\n" + >>> name, attr = _unpack_var(var) + >>> print(name, attr) + BRICK_TYPES 1 + >>> var = "type = string-attribute\\nname = TEMPLATE_SPACE\\ncount = 5\\n'ORIG~" + >>> name, attr = _unpack_var(var) + >>> print(name, attr) + TEMPLATE_SPACE ORIG + """ + + err_msg = ('Please check HEAD file to ensure it is AFNI compliant. ' + 'Offending attribute:\n%s' % var) + atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) + if len(atype) != 1: + raise AFNIHeaderError('Invalid attribute type entry in HEAD file. ' + '%s' % err_msg) + if len(aname) != 1: + raise AFNIHeaderError('Invalid attribute name entry in HEAD file. ' + '%s' % err_msg) + atype = _attr_dic.get(atype[0], str) + attr = ' '.join(var.strip().splitlines()[3:]) + if atype is not str: + try: + attr = [atype(f) for f in attr.split()] + except ValueError: + raise AFNIHeaderError('Failed to read variable from HEAD file due ' + 'to improper type casting. %s' % err_msg) + else: + # AFNI string attributes will always start with open single quote and + # end with a tilde (NUL). These attributes CANNOT contain tildes (so + # stripping is safe), but can contain single quotes (so we replace) + attr = attr.replace('\'', '', 1).rstrip('~') + + return aname[0], attr[0] if len(attr) == 1 else attr + + +def _get_datatype(info): + """ + Gets datatype of BRIK file associated with HEAD file yielding `info` + + Parameters + ---------- + info : dict + As obtained by :func:`parse_AFNI_header` + + Returns + ------- + dt : np.dtype + Datatype of BRIK file associated with HEAD + + Notes + ----- + ``BYTEORDER_STRING`` may be absent, signifying platform native byte order, + or contain one of "LSB_FIRST" or "MSB_FIRST". + + ``BRICK_TYPES`` gives the storage data type for each sub-brick, with + 0=uint, 1=int16, 3=float32, 5=complex64 (see ``_dtype_dict``). This should + generally be the same value for each sub-brick in the dataset. + """ + bo = info['BYTEORDER_STRING'] + bt = info['BRICK_TYPES'] + if isinstance(bt, list): + if np.unique(bt).size > 1: + raise AFNIImageError('Can\'t load file with multiple data types.') + bt = bt[0] + bo = _endian_dict.get(bo, '=') + bt = _dtype_dict.get(bt, None) + if bt is None: + raise AFNIImageError('Can\'t deduce image data type.') + return np.dtype(bo + bt) + + +def parse_AFNI_header(fobj): + """ + Parses `fobj` to extract information from HEAD file + + Parameters + ---------- + fobj : file-like object + AFNI HEAD file object or filename. If file object, should + implement at least ``read`` + + Returns + ------- + info : dict + Dictionary containing AFNI-style key:value pairs from HEAD file + + Examples + -------- + >>> fname = os.path.join(datadir, 'example4d+orig.HEAD') + >>> info = parse_AFNI_header(fname) + >>> print(info['BYTEORDER_STRING']) + LSB_FIRST + >>> print(info['BRICK_TYPES']) + [1, 1, 1] + """ + # edge case for being fed a filename instead of a file object + if isinstance(fobj, string_types): + with open(fobj, 'rt') as src: + return parse_AFNI_header(src) + # unpack variables in HEAD file + head = fobj.read().split('\n\n') + return {key: value for key, value in map(_unpack_var, head)} + + +class AFNIArrayProxy(ArrayProxy): + """ Proxy object for AFNI image array. + + Attributes + ---------- + scaling : np.ndarray + Scaling factor (one factor per volume/sub-brick) for data. Default is + None + """ + + @kw_only_meth(2) + def __init__(self, file_like, header, mmap=True, keep_file_open=None): + """ + Initialize AFNI array proxy + + Parameters + ---------- + file_like : file-like object + File-like object or filename. If file-like object, should implement + at least ``read`` and ``seek``. + header : ``AFNIHeader`` object + mmap : {True, False, 'c', 'r'}, optional, keyword only + `mmap` controls the use of numpy memory mapping for reading data. + If False, do not try numpy ``memmap`` for data array. If one of + {'c', 'r'}, try numpy memmap with ``mode=mmap``. A `mmap` value of + True gives the same behavior as ``mmap='c'``. If `file_like` + cannot be memory-mapped, ignore `mmap` value and read array from + file. + keep_file_open : { None, 'auto', True, False }, optional, keyword only + `keep_file_open` controls whether a new file handle is created + every time the image is accessed, or a single file handle is + created and used for the lifetime of this ``ArrayProxy``. If + ``True``, a single file handle is created and used. If ``False``, + a new file handle is created every time the image is accessed. If + ``'auto'``, and the optional ``indexed_gzip`` dependency is + present, a single file handle is created and persisted. If + ``indexed_gzip`` is not available, behavior is the same as if + ``keep_file_open is False``. If ``file_like`` refers to an open + file handle, this setting has no effect. The default value + (``None``) will result in the value of + ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT` being used. + """ + super(AFNIArrayProxy, self).__init__(file_like, + header, + mmap=mmap, + keep_file_open=keep_file_open) + self._scaling = header.get_data_scaling() + + @property + def scaling(self): + return self._scaling + + def __array__(self): + raw_data = self.get_unscaled() + # datatype may change if applying self._scaling + return raw_data if self.scaling is None else raw_data * self.scaling + + def __getitem__(self, slicer): + raw_data = super(AFNIArrayProxy, self).__getitem__(slicer) + # apply volume specific scaling (may change datatype!) + if self.scaling is not None: + fake_data = strided_scalar(self._shape) + _, scaling = np.broadcast_arrays(fake_data, self.scaling) + raw_data = raw_data * scaling[slicer] + return raw_data + + +class AFNIHeader(SpatialHeader): + """Class for AFNI header""" + + def __init__(self, info): + """ + Initialize AFNI header object + + Parameters + ---------- + info : dict + Information from HEAD file as obtained by :func:`parse_AFNI_header` + + Examples + -------- + >>> fname = os.path.join(datadir, 'example4d+orig.HEAD') + >>> header = AFNIHeader(parse_AFNI_header(fname)) + >>> header.get_data_dtype() + dtype('int16') + >>> header.get_zooms() + (3.0, 3.0, 3.0, 3.0) + >>> header.get_data_shape() + (33, 41, 25, 3) + """ + self.info = info + dt = _get_datatype(self.info) + super(AFNIHeader, self).__init__(data_dtype=dt, + shape=self._calc_data_shape(), + zooms=self._calc_zooms()) + + @classmethod + def from_header(klass, header=None): + if header is None: + raise AFNIHeaderError('Cannot create AFNIHeader from nothing.') + if type(header) == klass: + return header.copy() + raise AFNIHeaderError('Cannot create AFNIHeader from non-AFNIHeader.') + + @classmethod + def from_fileobj(klass, fileobj): + info = parse_AFNI_header(fileobj) + return klass(info) + + def copy(self): + return AFNIHeader(deepcopy(self.info)) + + def _calc_data_shape(self): + """ + Calculate the output shape of the image data + + Returns length 3 tuple for 3D image, length 4 tuple for 4D. + + Returns + ------- + (x, y, z, t) : tuple of int + + Notes + ----- + ``DATASET_RANK[0]`` gives number of spatial dimensions (and apparently + must be 3). ``DATASET_RANK[1]`` gives the number of sub-bricks. + ``DATASET_DIMENSIONS`` is length 3, giving the number of voxels in i, + j, k. + """ + dset_rank = self.info['DATASET_RANK'] + shape = tuple(self.info['DATASET_DIMENSIONS'][:dset_rank[0]]) + n_vols = dset_rank[1] + return shape + (n_vols,) + + def _calc_zooms(self): + """ + Get image zooms from header data + + Spatial axes are first three indices, time axis is last index. If + dataset is not a time series the last value will be zero. + + Returns + ------- + zooms : tuple + + Notes + ----- + Gets zooms from attributes ``DELTA`` and ``TAXIS_FLOATS``. + + ``DELTA`` gives (x,y,z) voxel sizes. + + ``TAXIS_FLOATS`` should be length 5, with first entry giving "Time + origin", and second giving "Time step (TR)". + """ + xyz_step = tuple(np.abs(self.info['DELTA'])) + t_step = self.info.get('TAXIS_FLOATS', (0, 0,)) + if len(t_step) > 0: + t_step = (t_step[1],) + return xyz_step + t_step + + def get_space(self): + """ + Return label for anatomical space to which this dataset is aligned. + + Returns + ------- + space : str + AFNI "space" designation; one of [ORIG, ANAT, TLRC, MNI] + + Notes + ----- + There appears to be documentation for these spaces at + https://afni.nimh.nih.gov/pub/dist/atlases/elsedemo/AFNI_atlas_spaces.niml + """ + listed_space = self.info.get('TEMPLATE_SPACE', 0) + space = space_codes.space[listed_space] + return space + + def get_affine(self): + """ + Returns affine of dataset + + Examples + -------- + >>> fname = os.path.join(datadir, 'example4d+orig.HEAD') + >>> header = AFNIHeader(parse_AFNI_header(fname)) + >>> header.get_affine() + array([[ -3. , -0. , -0. , 49.5 ], + [ -0. , -3. , -0. , 82.312 ], + [ 0. , 0. , 3. , -52.3511], + [ 0. , 0. , 0. , 1. ]]) + """ + # AFNI default is RAI- == LPS+ == DICOM order. We need to flip RA sign + # to align with nibabel RAS+ system + affine = np.asarray(self.info['IJK_TO_DICOM_REAL']).reshape(3, 4) + affine = np.row_stack((affine * [[-1], [-1], [1]], + [0, 0, 0, 1])) + return affine + + def get_data_scaling(self): + """ + AFNI applies volume-specific data scaling + + Examples + -------- + >>> fname = os.path.join(datadir, 'scaled+tlrc.HEAD') + >>> header = AFNIHeader(parse_AFNI_header(fname)) + >>> header.get_data_scaling() + array([ 3.88336300e-08]) + """ + # BRICK_FLOAT_FACS has one value per sub-brick, such that the scaled + # values for sub-brick array [n] are the values read from disk * + # BRICK_FLOAT_FACS[n] + floatfacs = self.info.get('BRICK_FLOAT_FACS', None) + if floatfacs is None or not np.any(floatfacs): + return None + scale = np.ones(self.info['DATASET_RANK'][1]) + floatfacs = np.atleast_1d(floatfacs) + scale[floatfacs.nonzero()] = floatfacs[floatfacs.nonzero()] + return scale + + def get_slope_inter(self): + """ + Use `self.get_data_scaling()` instead + + Holdover because ``AFNIArrayProxy`` (inheriting from ``ArrayProxy``) + requires this functionality so as to not error. + """ + return None, None + + def get_data_offset(self): + """Data offset in BRIK file + + Offset is always 0. + """ + return DATA_OFFSET + + def get_volume_labels(self): + """ + Returns volume labels + + Returns + ------- + labels : list of str + Labels for volumes along fourth dimension + + Examples + -------- + >>> header = AFNIHeader(parse_AFNI_header(os.path.join(datadir, 'example4d+orig.HEAD'))) + >>> header.get_volume_labels() + ['#0', '#1', '#2'] + """ + labels = self.info.get('BRICK_LABS', None) + if labels is not None: + labels = labels.split('~') + return labels + + +class AFNIImage(SpatialImage): + """ + AFNI Image file + + Can be loaded from either the BRIK or HEAD file (but MUST specify one!) + + Examples + -------- + >>> import nibabel as nib + >>> brik = nib.load(os.path.join(datadir, 'example4d+orig.BRIK.gz')) + >>> brik.shape + (33, 41, 25, 3) + >>> brik.affine + array([[ -3. , -0. , -0. , 49.5 ], + [ -0. , -3. , -0. , 82.312 ], + [ 0. , 0. , 3. , -52.3511], + [ 0. , 0. , 0. , 1. ]]) + >>> head = load(os.path.join(datadir, 'example4d+orig.HEAD')) + >>> np.array_equal(head.get_data(), brik.get_data()) + True + """ + + header_class = AFNIHeader + valid_exts = ('.brik', '.head') + files_types = (('image', '.brik'), ('header', '.head')) + _compressed_suffixes = ('.gz', '.bz2', '.Z') + makeable = False + rw = False + ImageArrayProxy = AFNIArrayProxy + + @classmethod + @kw_only_meth(1) + def from_file_map(klass, file_map, mmap=True, keep_file_open=None): + """ + Creates an AFNIImage instance from `file_map` + + Parameters + ---------- + file_map : dict + dict with keys ``image, header`` and values being fileholder + objects for the respective BRIK and HEAD files + mmap : {True, False, 'c', 'r'}, optional, keyword only + `mmap` controls the use of numpy memory mapping for reading image + array data. If False, do not try numpy ``memmap`` for data array. + If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A + `mmap` value of True gives the same behavior as ``mmap='c'``. If + image data file cannot be memory-mapped, ignore `mmap` value and + read array from file. + keep_file_open : {None, 'auto', True, False}, optional, keyword only + `keep_file_open` controls whether a new file handle is created + every time the image is accessed, or a single file handle is + created and used for the lifetime of this ``ArrayProxy``. If + ``True``, a single file handle is created and used. If ``False``, + a new file handle is created every time the image is accessed. If + ``'auto'``, and the optional ``indexed_gzip`` dependency is + present, a single file handle is created and persisted. If + ``indexed_gzip`` is not available, behavior is the same as if + ``keep_file_open is False``. If ``file_like`` refers to an open + file handle, this setting has no effect. The default value + (``None``) will result in the value of + ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT` being used. + """ + with file_map['header'].get_prepare_fileobj('rt') as hdr_fobj: + hdr = klass.header_class.from_fileobj(hdr_fobj) + imgf = file_map['image'].fileobj + imgf = file_map['image'].filename if imgf is None else imgf + data = klass.ImageArrayProxy(imgf, hdr.copy(), mmap=mmap, + keep_file_open=keep_file_open) + return klass(data, hdr.get_affine(), header=hdr, extra=None, + file_map=file_map) + + @classmethod + @kw_only_meth(1) + def from_filename(klass, filename, mmap=True, keep_file_open=None): + """ + Creates an AFNIImage instance from `filename` + + Parameters + ---------- + filename : str + Path to BRIK or HEAD file to be loaded + mmap : {True, False, 'c', 'r'}, optional, keyword only + `mmap` controls the use of numpy memory mapping for reading image + array data. If False, do not try numpy ``memmap`` for data array. + If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A + `mmap` value of True gives the same behavior as ``mmap='c'``. If + image data file cannot be memory-mapped, ignore `mmap` value and + read array from file. + keep_file_open : {None, 'auto', True, False}, optional, keyword only + `keep_file_open` controls whether a new file handle is created + every time the image is accessed, or a single file handle is + created and used for the lifetime of this ``ArrayProxy``. If + ``True``, a single file handle is created and used. If ``False``, + a new file handle is created every time the image is accessed. If + ``'auto'``, and the optional ``indexed_gzip`` dependency is + present, a single file handle is created and persisted. If + ``indexed_gzip`` is not available, behavior is the same as if + ``keep_file_open is False``. If ``file_like`` refers to an open + file handle, this setting has no effect. The default value + (``None``) will result in the value of + ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT` being used. + """ + file_map = klass.filespec_to_file_map(filename) + return klass.from_file_map(file_map, mmap=mmap, + keep_file_open=keep_file_open) + + @classmethod + def filespec_to_file_map(klass, filespec): + """ + Make `file_map` from filename `filespec` + + AFNI BRIK files can be compressed, but HEAD files cannot - see + afni.nimh.nih.gov/pub/dist/doc/program_help/README.compression.html. + Thus, if you have AFNI files my_image.HEAD and my_image.BRIK.gz and you + want to load the AFNI BRIK / HEAD pair, you can specify: + * The HEAD filename - e.g., my_image.HEAD + * The BRIK filename w/o compressed extension - e.g., my_image.BRIK + * The full BRIK filename - e.g., my_image.BRIK.gz + + Parameters + ---------- + filespec : str + Filename that might be for this image file type. + + Returns + ------- + file_map : dict + dict with keys ``image`` and ``header`` where values are fileholder + objects for the respective BRIK and HEAD files + + Raises + ------ + ImageFileError + If `filespec` is not recognizable as being a filename for this + image type. + """ + file_map = super(AFNIImage, klass).filespec_to_file_map(filespec) + # check for AFNI-specific BRIK/HEAD compression idiosyncrasies + for key, fholder in file_map.items(): + fname = fholder.filename + if key == 'header' and not os.path.exists(fname): + for ext in klass._compressed_suffixes: + fname = fname[:-len(ext)] if fname.endswith(ext) else fname + elif key == 'image' and not os.path.exists(fname): + for ext in klass._compressed_suffixes: + if os.path.exists(fname + ext): + fname += ext + break + file_map[key].filename = fname + return file_map + + load = from_filename + + +load = AFNIImage.load diff --git a/nibabel/casting.py b/nibabel/casting.py index 0ad0d5a5ca..ebdd96d550 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -268,6 +268,9 @@ def type_info(np_type): # 80) but in calculations nexp in fact appears to be 11 as for float64 ret.update(dict(width=width)) return ret + if vals == (105, 11, 16): # correctly detected double double + ret.update(dict(nmant=nmant, nexp=nexp, width=width)) + return ret # Oh dear, we don't recognize the type information. Try some known types # and then give up. At this stage we're expecting exotic longdouble or # their complex equivalent. diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index 4b3d5fa267..b9919eb2e1 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -517,28 +517,28 @@ def flush_chardata(self): # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) vertices = self.struct_state[-1] - vertices.extend(np.genfromtxt(c, dtype=np.int)) + vertices.extend(np.loadtxt(c, dtype=np.int)) c.close() elif self.write_to == 'VoxelIndices': # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) parent = self.struct_state[-1] - parent.voxel_indices_ijk.extend(np.genfromtxt(c, dtype=np.int).reshape(-1, 3)) + parent.voxel_indices_ijk.extend(np.loadtxt(c, dtype=np.int).reshape(-1, 3)) c.close() elif self.write_to == 'VertexIndices': # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) index = self.struct_state[-1] - index.extend(np.genfromtxt(c, dtype=np.int)) + index.extend(np.loadtxt(c, dtype=np.int)) c.close() elif self.write_to == 'TransformMatrix': # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) transform = self.struct_state[-1] - transform.matrix = np.genfromtxt(c, dtype=np.float) + transform.matrix = np.loadtxt(c, dtype=np.float) c.close() elif self.write_to == 'Label': diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py new file mode 100644 index 0000000000..c54c07f966 --- /dev/null +++ b/nibabel/cmdline/dicomfs.py @@ -0,0 +1,241 @@ +#!python +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# Copyright (C) 2011 Christian Haselgrove + +import sys +import os +import stat +import errno +import time +import locale +import logging + + +class dummy_fuse(object): + """Dummy fuse "module" so that nose does not blow during doctests""" + Fuse = object + + +try: + import fuse + uid = os.getuid() + gid = os.getgid() +except ImportError: + fuse = dummy_fuse + +import nibabel as nib +import nibabel.dft as dft + +from optparse import OptionParser, Option + +encoding = locale.getdefaultlocale()[1] + +fuse.fuse_python_api = (0, 2) + +logger = logging.getLogger('nibabel.dft') + + +class FileHandle: + + def __init__(self, fno): + self.fno = fno + self.keep_cache = False + self.direct_io = False + return + + def __str__(self): + return 'FileHandle(%d)' % self.fno + + +class DICOMFS(fuse.Fuse): + + def __init__(self, *args, **kwargs): + if fuse is dummy_fuse: + raise RuntimeError( + "fuse module is not available, install it to use DICOMFS") + self.followlinks = kwargs.pop('followlinks', False) + self.dicom_path = kwargs.pop('dicom_path', None) + fuse.Fuse.__init__(self, *args, **kwargs) + self.fhs = {} + return + + def get_paths(self): + paths = {} + for study in dft.get_studies(self.dicom_path, self.followlinks): + pd = paths.setdefault(study.patient_name_or_uid(), {}) + patient_info = 'patient information\n' + patient_info = 'name: %s\n' % study.patient_name + patient_info += 'ID: %s\n' % study.patient_id + patient_info += 'birth date: %s\n' % study.patient_birth_date + patient_info += 'sex: %s\n' % study.patient_sex + pd['INFO'] = patient_info.encode('ascii', 'replace') + study_datetime = '%s_%s' % (study.date, study.time) + study_info = 'study info\n' + study_info += 'UID: %s\n' % study.uid + study_info += 'date: %s\n' % study.date + study_info += 'time: %s\n' % study.time + study_info += 'comments: %s\n' % study.comments + d = {'INFO': study_info.encode('ascii', 'replace')} + for series in study.series: + series_info = 'series info\n' + series_info += 'UID: %s\n' % series.uid + series_info += 'number: %s\n' % series.number + series_info += 'description: %s\n' % series.description + series_info += 'rows: %d\n' % series.rows + series_info += 'columns: %d\n' % series.columns + series_info += 'bits allocated: %d\n' % series.bits_allocated + series_info += 'bits stored: %d\n' % series.bits_stored + series_info += 'storage instances: %d\n' % len(series.storage_instances) + d[series.number] = {'INFO': series_info.encode('ascii', 'replace'), + '%s.nii' % series.number: (series.nifti_size, series.as_nifti), + '%s.png' % series.number: (series.png_size, series.as_png)} + pd[study_datetime] = d + return paths + + def match_path(self, path): + wd = self.get_paths() + if path == '/': + logger.debug('return root') + return wd + for part in path.lstrip('/').split('/'): + logger.debug("path:%s part:%s" % (path, part)) + if part not in wd: + return None + wd = wd[part] + logger.debug('return') + return wd + + def readdir(self, path, fh): + logger.info('readdir %s' % (path,)) + matched_path = self.match_path(path) + if matched_path is None: + return -errno.ENOENT + logger.debug('matched %s' % (matched_path,)) + fnames = [k.encode('ascii', 'replace') for k in matched_path.keys()] + fnames.append('.') + fnames.append('..') + return [fuse.Direntry(f) for f in fnames] + + def getattr(self, path): + logger.debug('getattr %s' % path) + matched_path = self.match_path(path) + logger.debug('matched: %s' % (matched_path,)) + now = time.time() + st = fuse.Stat() + if isinstance(matched_path, dict): + st.st_mode = stat.S_IFDIR | 0o755 + st.st_ctime = now + st.st_mtime = now + st.st_atime = now + st.st_uid = uid + st.st_gid = gid + st.st_nlink = len(matched_path) + return st + if isinstance(matched_path, str): + st.st_mode = stat.S_IFREG | 0o644 + st.st_ctime = now + st.st_mtime = now + st.st_atime = now + st.st_uid = uid + st.st_gid = gid + st.st_size = len(matched_path) + st.st_nlink = 1 + return st + if isinstance(matched_path, tuple): + st.st_mode = stat.S_IFREG | 0o644 + st.st_ctime = now + st.st_mtime = now + st.st_atime = now + st.st_uid = uid + st.st_gid = gid + st.st_size = matched_path[0]() + st.st_nlink = 1 + return st + return -errno.ENOENT + + def open(self, path, flags): + logger.debug('open %s' % (path,)) + matched_path = self.match_path(path) + if matched_path is None: + return -errno.ENOENT + for i in range(1, 10): + if i not in self.fhs: + if isinstance(matched_path, str): + self.fhs[i] = matched_path + elif isinstance(matched_path, tuple): + self.fhs[i] = matched_path[1]() + else: + raise -errno.EFTYPE + return FileHandle(i) + raise -errno.ENFILE + + # not done + def read(self, path, size, offset, fh): + logger.debug('read') + logger.debug(path) + logger.debug(size) + logger.debug(offset) + logger.debug(fh) + return self.fhs[fh.fno][offset:offset + size] + + def release(self, path, flags, fh): + logger.debug('release') + logger.debug(path) + logger.debug(fh) + del self.fhs[fh.fno] + return + + +def get_opt_parser(): + # use module docstring for help output + p = OptionParser( + usage="%s [OPTIONS] " + % os.path.basename(sys.argv[0]), + version="%prog " + nib.__version__) + + p.add_options([ + Option("-v", "--verbose", action="count", + dest="verbose", default=0, + help="make noise. Could be specified multiple times"), + ]) + + p.add_options([ + Option("-L", "--follow-links", action="store_true", + dest="followlinks", default=False, + help="Follow symbolic links in DICOM directory"), + ]) + return p + + +def main(args=None): + parser = get_opt_parser() + (opts, files) = parser.parse_args(args=args) + + if opts.verbose: + logger.addHandler(logging.StreamHandler(sys.stdout)) + logger.setLevel(opts.verbose > 1 and logging.DEBUG or logging.INFO) + + if len(files) != 2: + sys.stderr.write("Please provide two arguments:\n%s\n" % parser.usage) + sys.exit(1) + + fs = DICOMFS( + dash_s_do='setsingle', + followlinks=opts.followlinks, + dicom_path=files[0].decode(encoding) + ) + fs.parse(['-f', '-s', files[1]]) + try: + fs.main() + except fuse.FuseError: + # fuse prints the error message + sys.exit(1) + + sys.exit(0) diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py new file mode 100755 index 0000000000..4b8b69381c --- /dev/null +++ b/nibabel/cmdline/diff.py @@ -0,0 +1,365 @@ +#!python +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +""" +Quick summary of the differences among a set of neuroimaging files +""" +from __future__ import division, print_function, absolute_import + +import re +import sys +from collections import OrderedDict +from optparse import OptionParser, Option + +import numpy as np + +import nibabel as nib +import nibabel.cmdline.utils +import hashlib +import os + + +def get_opt_parser(): + # use module docstring for help output + p = OptionParser( + usage="%s [OPTIONS] [FILE ...]\n\n" % sys.argv[0] + __doc__, + version="%prog " + nib.__version__) + + p.add_options([ + Option("-v", "--verbose", action="count", + dest="verbose", default=0, + help="Make more noise. Could be specified multiple times"), + + Option("-H", "--header-fields", + dest="header_fields", default='all', + help="Header fields (comma separated) to be printed as well" + " (if present)"), + + Option("--ma", "--data-max-abs-diff", + dest="data_max_abs_diff", + type=float, + default=0.0, + help="Maximal absolute difference in data between files" + " to tolerate."), + + Option("--mr", "--data-max-rel-diff", + dest="data_max_rel_diff", + type=float, + default=0.0, + help="Maximal relative difference in data between files to" + " tolerate. If --data-max-abs-diff is also specified," + " only the data points with absolute difference greater" + " than that value would be considered for relative" + " difference check."), + Option("--dt", "--datatype", + dest="dtype", + default=np.float64, + help="Enter a numpy datatype such as 'float32'.") + ]) + + return p + + +def are_values_different(*values): + """Generically compare values, return True if different + + Note that comparison is targetting reporting of comparison of the headers + so has following specifics: + - even a difference in data types is considered a difference, i.e. 1 != 1.0 + - nans are considered to be the "same", although generally nan != nan + """ + value0 = values[0] + + # to not recompute over again + if isinstance(value0, np.ndarray): + try: + # np.asarray for elderly numpys, e.g. 1.7.1 where for + # degenerate arrays (shape ()) it would return a pure scalar + value0_nans = np.asanyarray(np.isnan(value0)) + value0_nonnans = np.asanyarray(np.logical_not(value0_nans)) + # if value0_nans.size == 1: + # import pdb; pdb.set_trace() + if not np.any(value0_nans): + value0_nans = None + except TypeError as exc: + str_exc = str(exc) + # Not implemented in numpy 1.7.1 + if "not supported" in str_exc or "ot implemented" in str_exc: + value0_nans = None + else: + raise + + for value in values[1:]: + if type(value0) != type(value): # if types are different, then we consider them different + return True + elif isinstance(value0, np.ndarray): + if value0.dtype != value.dtype or \ + value0.shape != value.shape: + return True + # there might be nans and they need special treatment + if value0_nans is not None: + value_nans = np.isnan(value) + if np.any(value0_nans != value_nans): + return True + if np.any(value0[value0_nonnans] != value[value0_nonnans]): + return True + elif np.any(value0 != value): + return True + elif value0 is np.nan: + if value is not np.nan: + return True + elif value0 != value: + return True + + return False + + +def get_headers_diff(file_headers, names=None): + """Get difference between headers + + Parameters + ---------- + file_headers: list of actual headers (dicts) from files + names: list of header fields to test + + Returns + ------- + dict + str: list for each header field which differs, return list of + values per each file + """ + difference = OrderedDict() + fields = names + + if names is None: + fields = file_headers[0].keys() + + # for each header field + for field in fields: + values = [header.get(field) for header in file_headers] # get corresponding value + + # if these values are different, store them in a dictionary + if are_values_different(*values): + difference[field] = values + + return difference + + +def get_data_hash_diff(files, dtype=np.float64): + """Get difference between md5 values of data + + Parameters + ---------- + files: list of actual files + + Returns + ------- + list + np.array: md5 values of respective files + """ + + md5sums = [ + hashlib.md5(np.ascontiguousarray(nib.load(f).get_fdata(dtype=dtype))).hexdigest() + for f in files + ] + + if len(set(md5sums)) == 1: + return [] + + return md5sums + + +def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): + """Get difference between data + + Parameters + ---------- + files: list of (str or ndarray) + If list of strings is provided -- they must be existing file names + max_abs: float, optional + Maximal absolute difference to tolerate. + max_rel: float, optional + Maximal relative (`abs(diff)/mean(diff)`) difference to tolerate. + If `max_abs` is specified, then those data points with lesser than that + absolute difference, are not considered for relative difference testing + dtype: np, optional + Datatype to be used when extracting data from files + + Returns + ------- + diffs: OrderedDict + An ordered dict with a record per each file which has differences + with other files subsequent detected. Each record is a list of + difference records, one per each file pair. + Each difference record is an Ordered Dict with possible keys + 'abs' or 'rel' showing maximal absolute or relative differences + in the file or the record ('CMP': 'incompat') if file shapes + are incompatible. + """ + + # we are doomed to keep them in RAM now + data = [f if isinstance(f, np.ndarray) else nib.load(f).get_fdata(dtype=dtype) + for f in files] + diffs = OrderedDict() + for i, d1 in enumerate(data[:-1]): + # populate empty entries for non-compared + diffs1 = [None] * (i + 1) + + for j, d2 in enumerate(data[i + 1:], i + 1): + + if d1.shape == d2.shape: + abs_diff = np.abs(d1 - d2) + mean_abs = (np.abs(d1) + np.abs(d2)) * 0.5 + candidates = np.logical_or(mean_abs != 0, abs_diff != 0) + + if max_abs: + candidates[abs_diff <= max_abs] = False + + max_abs_diff = np.max(abs_diff) + if np.any(candidates): + rel_diff = abs_diff[candidates] / mean_abs[candidates] + if max_rel: + sub_thr = rel_diff <= max_rel + # Since we operated on sub-selected values already, we need + # to plug them back in + candidates[ + tuple((indexes[sub_thr] for indexes in np.where(candidates))) + ] = False + max_rel_diff = np.max(rel_diff) + else: + max_rel_diff = 0 + + if np.any(candidates): + + diff_rec = OrderedDict() # so that abs goes before relative + + diff_rec['abs'] = max_abs_diff.astype(dtype) + diff_rec['rel'] = max_rel_diff.astype(dtype) + diffs1.append(diff_rec) + else: + diffs1.append(None) + + else: + diffs1.append({'CMP': "incompat"}) + + if any(diffs1): + + diffs['DATA(diff %d:)' % (i + 1)] = diffs1 + + return diffs + + +def display_diff(files, diff): + """Format header differences into a nice string + + Parameters + ---------- + files: list of files that were compared so we can print their names + diff: dict of different valued header fields + + Returns + ------- + str + string-formatted table of differences + """ + output = "" + field_width = "{:<15}" + filename_width = "{:<53}" + value_width = "{:<55}" + + output += "These files are different.\n" + output += field_width.format('Field/File') + + for i, f in enumerate(files, 1): + output += "%d:%s" % (i, filename_width.format(os.path.basename(f))) + + output += "\n" + + for key, value in diff.items(): + output += field_width.format(key) + + for item in value: + if isinstance(item, dict): + item_str = ', '.join('%s: %s' % i for i in item.items()) + elif item is None: + item_str = '-' + else: + item_str = str(item) + # Value might start/end with some invisible spacing characters so we + # would "condition" it on both ends a bit + item_str = re.sub('^[ \t]+', '<', item_str) + item_str = re.sub('[ \t]+$', '>', item_str) + # and also replace some other invisible symbols with a question + # mark + item_str = re.sub('[\x00]', '?', item_str) + output += value_width.format(item_str) + + output += "\n" + + return output + + +def diff(files, header_fields='all', data_max_abs_diff=None, + data_max_rel_diff=None, dtype=np.float64): + assert len(files) >= 2, "Please enter at least two files" + + file_headers = [nib.load(f).header for f in files] + + # signals "all fields" + if header_fields == 'all': + # TODO: header fields might vary across file types, + # thus prior sensing would be needed + header_fields = file_headers[0].keys() + else: + header_fields = header_fields.split(',') + + diff = get_headers_diff(file_headers, header_fields) + + data_md5_diffs = get_data_hash_diff(files, dtype) + if data_md5_diffs: + # provide details, possibly triggering the ignore of the difference + # in data + data_diffs = get_data_diff(files, + max_abs=data_max_abs_diff, + max_rel=data_max_rel_diff, + dtype=dtype) + if data_diffs: + diff['DATA(md5)'] = data_md5_diffs + diff.update(data_diffs) + + return diff + + +def main(args=None, out=None): + """Getting the show on the road""" + + out = out or sys.stdout + parser = get_opt_parser() + (opts, files) = parser.parse_args(args) + + nibabel.cmdline.utils.verbose_level = opts.verbose + + if nibabel.cmdline.utils.verbose_level < 3: + # suppress nibabel format-compliance warnings + nib.imageglobals.logger.level = 50 + + files_diff = diff( + files, + header_fields=opts.header_fields, + data_max_abs_diff=opts.data_max_abs_diff, + data_max_rel_diff=opts.data_max_rel_diff, + dtype=opts.dtype + ) + + if files_diff: + out.write(display_diff(files, files_diff)) + raise SystemExit(1) + else: + out.write("These files are identical.\n") + raise SystemExit(0) diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index 98f75e21dc..f919700247 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -21,8 +21,7 @@ import nibabel.cmdline.utils from nibabel.cmdline.utils import _err, verbose, table2string, ap, safe_get -__author__ = 'Yaroslav Halchenko' -__copyright__ = 'Copyright (c) 2011-2016 Yaroslav Halchenko ' \ +__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' \ 'and NiBabel contributors' __license__ = 'MIT' @@ -153,11 +152,11 @@ def proc_file(f, opts): return row -def main(): +def main(args=None): """Show must go on""" parser = get_opt_parser() - (opts, files) = parser.parse_args() + (opts, files) = parser.parse_args(args=args) nibabel.cmdline.utils.verbose_level = opts.verbose diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py new file mode 100644 index 0000000000..e478b5a5c2 --- /dev/null +++ b/nibabel/cmdline/nifti_dx.py @@ -0,0 +1,38 @@ +#!python +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +''' Print nifti diagnostics for header files ''' + +import sys +from optparse import OptionParser + +import nibabel as nib + +__author__ = 'Matthew Brett' +__copyright__ = 'Copyright (c) 2011-18 Matthew Brett ' \ + 'and NiBabel contributors' +__license__ = 'MIT' + + +def main(args=None): + """ Go go team """ + parser = OptionParser( + usage="%s [FILE ...]\n\n" % sys.argv[0] + __doc__, + version="%prog " + nib.__version__) + (opts, files) = parser.parse_args(args=args) + + for fname in files: + with nib.openers.ImageOpener(fname) as fobj: + hdr = fobj.read(nib.nifti1.header_dtype.itemsize) + result = nib.Nifti1Header.diagnose_binaryblock(hdr) + if len(result): + print('Picky header check output for "%s"\n' % fname) + print(result + '\n') + else: + print('Header for "%s" is clean' % fname) diff --git a/nibabel/parrec2nii_cmd.py b/nibabel/cmdline/parrec2nii.py similarity index 100% rename from nibabel/parrec2nii_cmd.py rename to nibabel/cmdline/parrec2nii.py diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py new file mode 100644 index 0000000000..deb3adcd5f --- /dev/null +++ b/nibabel/cmdline/tck2trk.py @@ -0,0 +1,56 @@ +""" +Convert tractograms (TCK -> TRK). +""" +import os +import argparse + +import nibabel as nib + +from nibabel.streamlines import Field +from nibabel.orientations import aff2axcodes + + +def parse_args(): + DESCRIPTION = "Convert tractograms (TCK -> TRK)." + parser = argparse.ArgumentParser(description=DESCRIPTION) + parser.add_argument("anatomy", + help="reference anatomical image (.nii|.nii.gz.") + parser.add_argument("tractograms", metavar="tractogram", nargs="+", + help="list of tractograms (.tck).") + parser.add_argument("-f", "--force", action="store_true", + help="overwrite existing output files.") + + args = parser.parse_args() + return args, parser + + +def main(): + args, parser = parse_args() + + try: + nii = nib.load(args.anatomy) + except Exception: + parser.error("Expecting anatomical image as first agument.") + + for tractogram in args.tractograms: + tractogram_format = nib.streamlines.detect_format(tractogram) + if tractogram_format is not nib.streamlines.TckFile: + print("Skipping non TCK file: '{}'".format(tractogram)) + continue + + filename, _ = os.path.splitext(tractogram) + output_filename = filename + '.trk' + if os.path.isfile(output_filename) and not args.force: + msg = "Skipping existing file: '{}'. Use -f to overwrite." + print(msg.format(output_filename)) + continue + + # Build header using infos from the anatomical image. + header = {} + header[Field.VOXEL_TO_RASMM] = nii.affine.copy() + header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3] + header[Field.DIMENSIONS] = nii.shape[:3] + header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine)) + + tck = nib.streamlines.load(tractogram) + nib.streamlines.save(tck.tractogram, output_filename, header=header) diff --git a/nibabel/cmdline/tests/__init__.py b/nibabel/cmdline/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nibabel/tests/test_parrec2nii.py b/nibabel/cmdline/tests/test_parrec2nii.py similarity index 91% rename from nibabel/tests/test_parrec2nii.py rename to nibabel/cmdline/tests/test_parrec2nii.py index aa018b24d0..c5b5831270 100644 --- a/nibabel/tests/test_parrec2nii.py +++ b/nibabel/cmdline/tests/test_parrec2nii.py @@ -6,7 +6,7 @@ from numpy import array as npa import nibabel -from nibabel import parrec2nii_cmd as parrec2nii +from nibabel.cmdline import parrec2nii from mock import Mock, MagicMock, patch from nose.tools import assert_true @@ -29,10 +29,10 @@ [ 0. , 0. , 0. , 1. ]]) -@patch('nibabel.parrec2nii_cmd.verbose') -@patch('nibabel.parrec2nii_cmd.io_orientation') -@patch('nibabel.parrec2nii_cmd.nifti1') -@patch('nibabel.parrec2nii_cmd.pr') +@patch('nibabel.cmdline.parrec2nii.verbose') +@patch('nibabel.cmdline.parrec2nii.io_orientation') +@patch('nibabel.cmdline.parrec2nii.nifti1') +@patch('nibabel.cmdline.parrec2nii.pr') def test_parrec2nii_sets_qform_sform_code1(*args): # Check that set_sform(), set_qform() are called on the new header. parrec2nii.verbose.switch = False @@ -67,7 +67,7 @@ def test_parrec2nii_sets_qform_sform_code1(*args): nhdr.set_sform.assert_called_with(AN_OLD_AFFINE, code=1) -@patch('nibabel.parrec2nii_cmd.verbose') +@patch('nibabel.cmdline.parrec2nii.verbose') def test_parrec2nii_save_load_qform_code(*args): # Tests that after parrec2nii saves file, it has the sform and qform 'code' # set to '1', which means 'scanner', so that other software, e.g. FSL picks diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 8dcd09e261..e701925870 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -5,13 +5,17 @@ Test running scripts """ -from numpy.testing import (assert_almost_equal, - assert_array_equal) - -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) +from nose.tools import assert_equal +from numpy.testing import assert_raises +import nibabel as nib +import numpy as np from nibabel.cmdline.utils import * +from nibabel.cmdline.diff import * +from os.path import (join as pjoin) +from nibabel.testing import data_path +from collections import OrderedDict +from six import StringIO def test_table2string(): @@ -42,3 +46,159 @@ def get_test(self): assert_equal(safe_get(test, "test"), 2) assert_equal(safe_get(test, "failtest"), "-") + + +def test_get_headers_diff(): + fnames = [pjoin(data_path, f) + for f in ('standard.nii.gz', 'example4d.nii.gz')] + actual_difference = get_headers_diff([nib.load(f).header for f in fnames]) + expected_difference = OrderedDict([ + ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), + ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), + ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), + np.array([ 4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), + ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), + ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), + ("pixdim", [np.array([ 1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( + [ -1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, + 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), + ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), + ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), + ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), + ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), + np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), + ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), + ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), + ("quatern_b", [np.array(0.0).astype(dtype="float32"), + np.array(-1.9451068140294884e-26).astype(dtype="float32")]), + ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), + ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), + ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), + ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), + ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), + ("srow_x", [np.array([ 1., 0., 0., 0.]).astype(dtype="float32"), + np.array([ -2.00000000e+00, 6.71471565e-19, 9.08102451e-18, + 1.17855103e+02]).astype(dtype="float32")]), + ("srow_y", [np.array([ 0., 3., 0., 0.]).astype(dtype="float32"), + np.array([ -6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype(dtype="float32")]), + ("srow_z", [np.array([ 0., 0., 2., 0.]).astype(dtype="float32"), + np.array([ 8.25548089e-18, 3.23207617e-01, 2.17108178e+00, + -7.24879837e+00]).astype(dtype="float32")])]) + + np.testing.assert_equal(actual_difference, expected_difference) + + +def test_display_diff(): + bogus_names = ["hellokitty.nii.gz", "privettovarish.nii.gz"] + + dict_values = OrderedDict([ + ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), + ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]) + ]) + + expected_output = "These files are different.\n" + "Field/File 1:hellokitty.nii.gz" \ + " " \ + "2:privettovarish.nii.gz \n" \ + "datatype " \ + "2 " \ + "4 \n" \ + "bitpix " \ + "8 16" \ + " " \ + "\n" + + assert_equal(display_diff(bogus_names, dict_values), expected_output) + + +def test_get_data_diff(): + # testing for identical files specifically as md5 may vary by computer + test_names = [pjoin(data_path, f) + for f in ('standard.nii.gz', 'standard.nii.gz')] + assert_equal(get_data_hash_diff(test_names), []) + + # testing the maximum relative and absolute differences' different use cases + test_array = np.arange(16).reshape(4, 4) + test_array_2 = np.arange(1, 17).reshape(4, 4) + test_array_3 = np.arange(2, 18).reshape(4, 4) + test_array_4 = np.arange(100).reshape(10, 10) + test_array_5 = np.arange(64).reshape(8, 8) + + # same shape, 2 files + assert_equal(get_data_diff([test_array, test_array_2]), + OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)])])])) + + # same shape, 3 files + assert_equal(get_data_diff([test_array, test_array_2, test_array_3]), + OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)]), + OrderedDict([('abs', 2), ('rel', 2.0)])]), + ('DATA(diff 2:)', [None, None, + OrderedDict([('abs', 1), ('rel', 0.66666666666666663)])])])) + + # same shape, 2 files, modified maximum abs/rel + assert_equal(get_data_diff([test_array, test_array_2], max_abs=2, max_rel=2), OrderedDict()) + + # different shape, 2 files + assert_equal(get_data_diff([test_array_2, test_array_4]), + OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}])])) + + # different shape, 3 files + assert_equal(get_data_diff([test_array_4, test_array_5, test_array_2]), + OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}]), + ('DATA(diff 2:)', [None, None, {'CMP': 'incompat'}])])) + + test_return = get_data_diff([test_array, test_array_2], dtype=np.float32) + assert_equal(type(test_return['DATA(diff 1:)'][1]['abs']), np.float32) + assert_equal(type(test_return['DATA(diff 1:)'][1]['rel']), np.float32) + + test_return_2 = get_data_diff([test_array, test_array_2, test_array_3]) + assert_equal(type(test_return_2['DATA(diff 1:)'][1]['abs']), np.float64) + assert_equal(type(test_return_2['DATA(diff 1:)'][1]['rel']), np.float64) + assert_equal(type(test_return_2['DATA(diff 2:)'][2]['abs']), np.float64) + assert_equal(type(test_return_2['DATA(diff 2:)'][2]['rel']), np.float64) + + +def test_main(): + test_names = [pjoin(data_path, f) + for f in ('standard.nii.gz', 'example4d.nii.gz')] + expected_difference = OrderedDict([ + ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), + ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), + ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), + ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), + ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), + ("pixdim", [np.array([1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( + [-1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, + 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), + ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), + ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), + ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), + ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), + np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), + ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), + ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), + ("quatern_b", [np.array(0.0).astype(dtype="float32"), + np.array(-1.9451068140294884e-26).astype(dtype="float32")]), + ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), + ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), + ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), + ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), + ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), + ("srow_x", [np.array([1., 0., 0., 0.]).astype(dtype="float32"), + np.array([-2.00000000e+00, 6.71471565e-19, 9.08102451e-18, + 1.17855103e+02]).astype(dtype="float32")]), + ("srow_y", [np.array([0., 3., 0., 0.]).astype(dtype="float32"), + np.array([-6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype( + dtype="float32")]), + ("srow_z", [np.array([0., 0., 2., 0.]).astype(dtype="float32"), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e+00, + -7.24879837e+00]).astype(dtype="float32")]), + ('DATA(md5)', ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'])]) + + with assert_raises(SystemExit): + np.testing.assert_equal(main(test_names, StringIO()), expected_difference) + + test_names_2 = [pjoin(data_path, f) for f in ('standard.nii.gz', 'standard.nii.gz')] + + with assert_raises(SystemExit): + assert_equal(main(test_names_2, StringIO()), "These files are identical.") diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py new file mode 100644 index 0000000000..a55f7e95af --- /dev/null +++ b/nibabel/cmdline/trk2tck.py @@ -0,0 +1,39 @@ +""" +Convert tractograms (TRK -> TCK). +""" + +import os +import argparse + +import nibabel as nib + + +def parse_args(): + DESCRIPTION = "Convert tractograms (TRK -> TCK)." + parser = argparse.ArgumentParser(description=DESCRIPTION) + parser.add_argument("tractograms", metavar="tractogram", nargs="+", + help="list of tractograms (.trk).") + parser.add_argument("-f", "--force", action="store_true", + help="overwrite existing output files.") + + args = parser.parse_args() + return args, parser + + +def main(): + args, parser = parse_args() + for tractogram in args.tractograms: + tractogram_format = nib.streamlines.detect_format(tractogram) + if tractogram_format is not nib.streamlines.TrkFile: + print("Skipping non TRK file: '{}'".format(tractogram)) + continue + + filename, _ = os.path.splitext(tractogram) + output_filename = filename + '.tck' + if os.path.isfile(output_filename) and not args.force: + msg = "Skipping existing file: '{}'. Use -f to overwrite." + print(msg.format(output_filename)) + continue + + trk = nib.streamlines.load(tractogram) + nib.streamlines.save(trk.tractogram, output_filename) diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index b88f02dd21..86185a7aef 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -28,8 +28,8 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): ---------- dataobj : object Object containg image data. It should be some object that retuns an - array from ``np.asanyarray``. It should have a ``shape`` attribute - or property + array from ``np.asanyarray``. It should have ``shape`` and ``ndim`` + attributes or properties header : None or mapping or header instance, optional metadata for this image format extra : None or mapping, optional @@ -344,7 +344,7 @@ def get_fdata(self, caching='fill', dtype=np.float64): if self._fdata_cache is not None: if self._fdata_cache.dtype.type == dtype.type: return self._fdata_cache - data = np.asanyarray(self._dataobj).astype(dtype) + data = np.asanyarray(self._dataobj).astype(dtype, copy=False) if caching == 'fill': self._fdata_cache = data return data @@ -392,6 +392,10 @@ def uncache(self): def shape(self): return self._dataobj.shape + @property + def ndim(self): + return self._dataobj.ndim + @deprecate_with_version('get_shape method is deprecated.\n' 'Please use the ``img.shape`` property ' 'instead.', diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 814e7b85cd..c8abee91a0 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -52,8 +52,8 @@ class FutureWarningMixin(object): >>> with warnings.catch_warnings(record=True) as warns: ... d = D() - ... warns[0].message - FutureWarning("Please, don't use this class",) + ... warns[0].message.args[0] + "Please, don't use this class" """ warn_message = 'This class will be removed in future versions' diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 3c0957e11d..c2d343f739 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -680,6 +680,10 @@ def __init__(self, subheader): def shape(self): return self._shape + @property + def ndim(self): + return len(self.shape) + @property def is_proxy(self): return True diff --git a/nibabel/externals/oset.py b/nibabel/externals/oset.py new file mode 100644 index 0000000000..6bc6ed67a3 --- /dev/null +++ b/nibabel/externals/oset.py @@ -0,0 +1,85 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""OrderedSet implementation + +Borrowed from https://pypi.org/project/oset/ +Copyright (c) 2009, Raymond Hettinger, and others All rights reserved. +License: BSD-3 +""" + +from __future__ import absolute_import + +from collections import MutableSet + +KEY, PREV, NEXT = range(3) + + +class OrderedSet(MutableSet): + + def __init__(self, iterable=None): + self.end = end = [] + end += [None, end, end] # sentinel node for doubly linked list + self.map = {} # key --> [key, prev, next] + if iterable is not None: + self |= iterable + + def __len__(self): + return len(self.map) + + def __contains__(self, key): + return key in self.map + + def __getitem__(self, key): + return list(self)[key] + + def add(self, key): + if key not in self.map: + end = self.end + curr = end[PREV] + curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end] + + def discard(self, key): + if key in self.map: + key, prev, next = self.map.pop(key) + prev[NEXT] = next + next[PREV] = prev + + def __iter__(self): + end = self.end + curr = end[NEXT] + while curr is not end: + yield curr[KEY] + curr = curr[NEXT] + + def __reversed__(self): + end = self.end + curr = end[PREV] + while curr is not end: + yield curr[KEY] + curr = curr[PREV] + + def pop(self, last=True): + if not self: + raise KeyError('set is empty') + key = next(reversed(self)) if last else next(iter(self)) + self.discard(key) + return key + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self)) + + def __eq__(self, other): + if isinstance(other, OrderedSet): + return len(self) == len(other) and list(self) == list(other) + return set(self) == set(other) + + def __del__(self): + self.clear() # remove circular references \ No newline at end of file diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 4212574ef9..edce19c6cd 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -81,24 +81,24 @@ def _read_volume_info(fobj): return volume_info -def _pack_rgba(rgba): - """Pack an RGBA sequence into a single integer. +def _pack_rgb(rgb): + """Pack an RGB sequence into a single integer. Used by :func:`read_annot` and :func:`write_annot` to generate "annotation values" for a Freesurfer ``.annot`` file. Parameters ---------- - rgba : ndarray, shape (n, 4) - RGBA colors + rgb : ndarray, shape (n, 3) + RGB colors Returns ------- out : ndarray, shape (n, 1) Annotation values for each color. """ - bitshifts = 2 ** np.array([[0], [8], [16], [24]], dtype=rgba.dtype) - return rgba.dot(bitshifts) + bitshifts = 2 ** np.array([[0], [8], [16]], dtype=rgb.dtype) + return rgb.dot(bitshifts) def read_geometry(filepath, read_metadata=False, read_stamp=False): @@ -333,9 +333,13 @@ def read_annot(filepath, orig_ids=False): Annotation file format versions 1 and 2 are supported, corresponding to the "old-style" and "new-style" color table layout. + Note that the output color table ``ctab`` is in RGBT form, where T + (transparency) is 255 - alpha. + See: * https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles#Annotation * https://github.com/freesurfer/freesurfer/blob/dev/matlab/read_annotation.m + * https://github.com/freesurfer/freesurfer/blob/8b88b34/utils/colortab.c Parameters ---------- @@ -352,7 +356,7 @@ def read_annot(filepath, orig_ids=False): Annotation id at each vertex. If a vertex does not belong to any label and orig_ids=False, its id will be set to -1. ctab : ndarray, shape (n_labels, 5) - RGBA + label id colortable array. + RGBT + label id colortable array. names : list of str (python 2), list of bytes (python 3) The names of the labels. The length of the list is n_labels. """ @@ -384,7 +388,7 @@ def read_annot(filepath, orig_ids=False): ctab, names = _read_annot_ctab_new_format(fobj, -n_entries) # generate annotation values for each LUT entry - ctab[:, [4]] = _pack_rgba(ctab[:, :4]) + ctab[:, [4]] = _pack_rgb(ctab[:, :3]) if not orig_ids: ord = np.argsort(ctab[:, -1]) @@ -397,6 +401,9 @@ def read_annot(filepath, orig_ids=False): def _read_annot_ctab_old_format(fobj, n_entries): """Read in an old-style Freesurfer color table from `fobj`. + Note that the output color table ``ctab`` is in RGBT form, where T + (transparency) is 255 - alpha. + This function is used by :func:`read_annot`. Parameters @@ -412,7 +419,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): ------- ctab : ndarray, shape (n_entries, 5) - RGBA colortable array - the last column contains all zeros. + RGBT colortable array - the last column contains all zeros. names : list of str The names of the labels. The length of the list is n_entries. """ @@ -430,7 +437,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): name_length = np.fromfile(fobj, dt, 1)[0] name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] names.append(name) - # read RGBA for this entry + # read RGBT for this entry ctab[i, :4] = np.fromfile(fobj, dt, 4) return ctab, names @@ -439,6 +446,9 @@ def _read_annot_ctab_old_format(fobj, n_entries): def _read_annot_ctab_new_format(fobj, ctab_version): """Read in a new-style Freesurfer color table from `fobj`. + Note that the output color table ``ctab`` is in RGBT form, where T + (transparency) is 255 - alpha. + This function is used by :func:`read_annot`. Parameters @@ -454,7 +464,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): ------- ctab : ndarray, shape (n_labels, 5) - RGBA colortable array - the last column contains all zeros. + RGBT colortable array - the last column contains all zeros. names : list of str The names of the labels. The length of the list is n_labels. """ @@ -480,7 +490,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): name_length = np.fromfile(fobj, dt, 1)[0] name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] names.append(name) - # RGBA + # RGBT ctab[idx, :4] = np.fromfile(fobj, dt, 4) return ctab, names @@ -489,9 +499,13 @@ def _read_annot_ctab_new_format(fobj, ctab_version): def write_annot(filepath, labels, ctab, names, fill_ctab=True): """Write out a "new-style" Freesurfer annotation file. + Note that the color table ``ctab`` is in RGBT form, where T (transparency) + is 255 - alpha. + See: * https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles#Annotation * https://github.com/freesurfer/freesurfer/blob/dev/matlab/write_annotation.m + * https://github.com/freesurfer/freesurfer/blob/8b88b34/utils/colortab.c Parameters ---------- @@ -500,7 +514,7 @@ def write_annot(filepath, labels, ctab, names, fill_ctab=True): labels : ndarray, shape (n_vertices,) Annotation id at each vertex. ctab : ndarray, shape (n_labels, 5) - RGBA + label id colortable array. + RGBT + label id colortable array. names : list of str The names of the labels. The length of the list is n_labels. fill_ctab : {True, False} optional @@ -523,8 +537,8 @@ def write_string(s): # Generate annotation values for each ctab entry if fill_ctab: - ctab = np.hstack((ctab[:, :4], _pack_rgba(ctab[:, :4]))) - elif not np.array_equal(ctab[:, [4]], _pack_rgba(ctab[:, :4])): + ctab = np.hstack((ctab[:, :4], _pack_rgb(ctab[:, :3]))) + elif not np.array_equal(ctab[:, [4]], _pack_rgb(ctab[:, :3])): warnings.warn('Annotation values in {} will be incorrect'.format( filepath)) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index b3dd1f9bc5..bf92bd962c 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -255,7 +255,7 @@ def get_zooms(self): .. _mghformat: https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat#line-82 ''' # Do not return time zoom (TR) if 3D image - tzoom = (self['tr'],)[:self._ndims() > 3] + tzoom = (self['tr'],) if self._ndims() > 3 else () return tuple(self._structarr['delta']) + tzoom def set_zooms(self, zooms): @@ -276,10 +276,15 @@ def set_zooms(self, zooms): ndims = self._ndims() if len(zooms) > ndims: raise HeaderDataError('Expecting %d zoom values' % ndims) - if np.any(zooms <= 0): - raise HeaderDataError('zooms must be positive') + if np.any(zooms[:3] <= 0): + raise HeaderDataError('Spatial (first three) zooms must be ' + 'positive; got {!r}' + ''.format(tuple(zooms[:3]))) hdr['delta'] = zooms[:3] if len(zooms) == 4: + if zooms[3] < 0: + raise HeaderDataError('TR must be non-negative; got {!r}' + ''.format(zooms[3])) hdr['tr'] = zooms[3] def get_data_shape(self): diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 83da38ed20..1b6065f351 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -16,7 +16,7 @@ from .. import (read_geometry, read_morph_data, read_annot, read_label, write_geometry, write_morph_data, write_annot) -from ..io import _pack_rgba +from ..io import _pack_rgb from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data from ...fileslice import strided_scalar @@ -236,8 +236,7 @@ def test_read_write_annot(): # Generate the annotation values for each LUT entry rgbal[:, 4] = (rgbal[:, 0] + rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16) + - rgbal[:, 3] * (2 ** 24)) + rgbal[:, 2] * (2 ** 16)) annot_path = 'c.annot' with InTemporaryDirectory(): write_annot(annot_path, labels, rgbal, names, fill_ctab=False) @@ -287,8 +286,7 @@ def test_write_annot_fill_ctab(): rgbal = np.hstack((rgba, np.zeros((nlabels, 1), dtype=np.int32))) rgbal[:, 4] = (rgbal[:, 0] + rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16) + - rgbal[:, 3] * (2 ** 24)) + rgbal[:, 2] * (2 ** 16)) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) assert_true( @@ -307,7 +305,7 @@ def gen_old_annot_file(fpath, nverts, labels, rgba, names): dt = '>i' vdata = np.zeros((nverts, 2), dtype=dt) vdata[:, 0] = np.arange(nverts) - vdata[:, [1]] = _pack_rgba(rgba[labels, :]) + vdata[:, [1]] = _pack_rgb(rgba[labels, :3]) fbytes = b'' # number of vertices fbytes += struct.pack(dt, nverts) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 776c461e18..47e54080c3 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -159,6 +159,8 @@ def test_set_zooms(): (1, 1, 1, 1, 5)): with assert_raises(HeaderDataError): h.set_zooms(zooms) + # smoke test for tr=0 + h.set_zooms((1, 1, 1, 0)) def bad_dtype_mgh(): diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index f136a070be..c1a0b7133a 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -9,6 +9,7 @@ ''' Define supported image classes and names ''' from .analyze import AnalyzeImage +from .brikhead import AFNIImage from .cifti2 import Cifti2Image from .freesurfer import MGHImage from .gifti import GiftiImage @@ -31,7 +32,7 @@ Cifti2Image, Nifti2Image, # Cifti2 before Nifti2 Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, Minc1Image, Minc2Image, MGHImage, - PARRECImage, GiftiImage] + PARRECImage, GiftiImage, AFNIImage] # DEPRECATED: mapping of names to classes and class functionality @@ -88,7 +89,12 @@ def __getitem__(self, *args, **kwargs): 'ext': '.par', 'has_affine': True, 'makeable': False, - 'rw': False}) + 'rw': False}, + afni={'class': AFNIImage, + 'ext': '.brik', + 'has_affine': True, + 'makeable': False, + 'rw': False}) class ExtMapRecoder(Recoder): @@ -107,6 +113,7 @@ def __getitem__(self, *args, **kwargs): ('mgh', '.mgh'), ('mgz', '.mgz'), ('par', '.par'), + ('brik', '.brik') )) # Image classes known to require spatial axes to be first in index ordering. @@ -114,7 +121,7 @@ def __getitem__(self, *args, **kwargs): # here. KNOWN_SPATIAL_FIRST = (Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, - MGHImage, PARRECImage) + MGHImage, PARRECImage, AFNIImage) def spatial_axes_first(img): diff --git a/nibabel/info.py b/nibabel/info.py index 83982dd8f2..204412c5d2 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -18,9 +18,9 @@ # (pre-release) version. _version_major = 2 _version_minor = 3 -_version_micro = 0 -_version_extra = 'dev' -# _version_extra = '' +_version_micro = 1 +# _version_extra = 'dev' +_version_extra = '' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" __version__ = "%s.%s.%s%s" % (_version_major, diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 5eb077ada0..57042f32f0 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -252,6 +252,10 @@ def __init__(self, minc_file): def shape(self): return self._shape + @property + def ndim(self): + return len(self.shape) + @property def is_proxy(self): return True diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 24c1808df5..056d0dbee9 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1573,14 +1573,23 @@ def set_slice_times(self, slice_times): so_recoder = self._field_recoders['slice_code'] labels = so_recoder.value_set('label') labels.remove('unknown') + + matching_labels = [] for label in labels: if np.all(st_order == self._slice_time_order( label, n_timed)): - break - else: + matching_labels.append(label) + + if not matching_labels: raise HeaderDataError('slice ordering of %s fits ' 'with no known scheme' % st_order) + if len(matching_labels) > 1: + warnings.warn( + 'Multiple slice orders satisfy: %s. Choosing the first one' + % ', '.join(matching_labels) + ) + label = matching_labels[0] # Set values into header hdr['slice_start'] = slice_start hdr['slice_end'] = slice_end diff --git a/nibabel/openers.py b/nibabel/openers.py index 0f57fa406a..f64ab23b37 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -18,16 +18,22 @@ # is indexed_gzip present and modern? try: - from indexed_gzip import SafeIndexedGzipFile, __version__ as version + import indexed_gzip as igzip + version = igzip.__version__ HAVE_INDEXED_GZIP = True - if StrictVersion(version) < StrictVersion('0.6.0'): + # < 0.7 - no good + if StrictVersion(version) < StrictVersion('0.7.0'): warnings.warn('indexed_gzip is present, but too old ' - '(>= 0.6.0 required): {})'.format(version)) + '(>= 0.7.0 required): {})'.format(version)) HAVE_INDEXED_GZIP = False - - del version + # >= 0.8 SafeIndexedGzipFile renamed to IndexedGzipFile + elif StrictVersion(version) < StrictVersion('0.8.0'): + IndexedGzipFile = igzip.SafeIndexedGzipFile + else: + IndexedGzipFile = igzip.IndexedGzipFile + del igzip, version except ImportError: HAVE_INDEXED_GZIP = False @@ -80,9 +86,11 @@ def readinto(self, buf): def _gzip_open(filename, mode='rb', compresslevel=9, keep_open=False): - # use indexed_gzip if possible for faster read access - if keep_open and mode == 'rb' and HAVE_INDEXED_GZIP: - gzip_file = SafeIndexedGzipFile(filename) + # use indexed_gzip if possible for faster read access. If keep_open == + # True, we tell IndexedGzipFile to keep the file handle open. Otherwise + # the IndexedGzipFile will close/open the file on each read. + if HAVE_INDEXED_GZIP and mode == 'rb': + gzip_file = IndexedGzipFile(filename, drop_handles=not keep_open) # Fall-back to built-in GzipFile (wrapped with the BufferedGzipFile class # defined above) diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 5fd460b4e1..87e1ac81e6 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -622,6 +622,10 @@ def __init__(self, file_like, header, mmap=True, scaling='dv'): def shape(self): return self._shape + @property + def ndim(self): + return len(self.shape) + @property def dtype(self): return self._dtype diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index b88b3e8538..9c3fef4fbb 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -140,6 +140,7 @@ from .filebasedimages import ImageFileError # flake8: noqa; for back-compat from .viewers import OrthoSlicer3D from .volumeutils import shape_zoom_affine +from .fileslice import canonical_slicers from .deprecated import deprecate_with_version from .orientations import apply_orientation, inv_ornt_aff @@ -321,9 +322,103 @@ class ImageDataError(Exception): pass +class SpatialFirstSlicer(object): + ''' Slicing interface that returns a new image with an updated affine + + Checks that an image's first three axes are spatial + ''' + def __init__(self, img): + # Local import to avoid circular import on module load + from .imageclasses import spatial_axes_first + if not spatial_axes_first(img): + raise ValueError("Cannot predict position of spatial axes for " + "Image type " + img.__class__.__name__) + self.img = img + + def __getitem__(self, slicer): + try: + slicer = self.check_slicing(slicer) + except ValueError as err: + raise IndexError(*err.args) + + dataobj = self.img.dataobj[slicer] + if any(dim == 0 for dim in dataobj.shape): + raise IndexError("Empty slice requested") + + affine = self.slice_affine(slicer) + return self.img.__class__(dataobj.copy(), affine, self.img.header) + + def check_slicing(self, slicer, return_spatial=False): + ''' Canonicalize slicers and check for scalar indices in spatial dims + + Parameters + ---------- + slicer : object + something that can be used to slice an array as in + ``arr[sliceobj]`` + return_spatial : bool + return only slices along spatial dimensions (x, y, z) + + Returns + ------- + slicer : object + Validated slicer object that will slice image's `dataobj` + without collapsing spatial dimensions + ''' + slicer = canonical_slicers(slicer, self.img.shape) + # We can get away with this because we've checked the image's + # first three axes are spatial. + # More general slicers will need to be smarter, here. + spatial_slices = slicer[:3] + for subslicer in spatial_slices: + if subslicer is None: + raise IndexError("New axis not permitted in spatial dimensions") + elif isinstance(subslicer, int): + raise IndexError("Scalar indices disallowed in spatial dimensions; " + "Use `[x]` or `x:x+1`.") + return spatial_slices if return_spatial else slicer + + def slice_affine(self, slicer): + """ Retrieve affine for current image, if sliced by a given index + + Applies scaling if down-sampling is applied, and adjusts the intercept + to account for any cropping. + + Parameters + ---------- + slicer : object + something that can be used to slice an array as in + ``arr[sliceobj]`` + + Returns + ------- + affine : (4,4) ndarray + Affine with updated scale and intercept + """ + slicer = self.check_slicing(slicer, return_spatial=True) + + # Transform: + # sx 0 0 tx + # 0 sy 0 ty + # 0 0 sz tz + # 0 0 0 1 + transform = np.eye(4, dtype=int) + + for i, subslicer in enumerate(slicer): + if isinstance(subslicer, slice): + if subslicer.step == 0: + raise ValueError("slice step cannot be 0") + transform[i, i] = subslicer.step if subslicer.step is not None else 1 + transform[i, 3] = subslicer.start or 0 + # If slicer is None, nothing to do + + return self.img.affine.dot(transform) + + class SpatialImage(DataobjImage): ''' Template class for volumetric (3D/4D) images ''' header_class = SpatialHeader + ImageSlicer = SpatialFirstSlicer def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): @@ -461,12 +556,38 @@ def from_image(klass, img): klass.header_class.from_header(img.header), extra=img.extra.copy()) + @property + def slicer(self): + """ Slicer object that returns cropped and subsampled images + + The image is resliced in the current orientation; no rotation or + resampling is performed, and no attempt is made to filter the image + to avoid `aliasing`_. + + The affine matrix is updated with the new intercept (and scales, if + down-sampling is used), so that all values are found at the same RAS + locations. + + Slicing may include non-spatial dimensions. + However, this method does not currently adjust the repetition time in + the image header. + + .. _aliasing: https://en.wikipedia.org/wiki/Aliasing + """ + return self.ImageSlicer(self) + + def __getitem__(self, idx): ''' No slicing or dictionary interface for images + + Use the slicer attribute to perform cropping and subsampling at your + own risk. ''' - raise TypeError("Cannot slice image objects; consider slicing image " - "array data with `img.dataobj[slice]` or " - "`img.get_data()[slice]`") + raise TypeError( + "Cannot slice image objects; consider using `img.slicer[slice]` " + "to generate a sliced image (see documentation for caveats) or " + "slicing image array data with `img.dataobj[slice]` or " + "`img.get_data()[slice]`") def orthoview(self): """Plot the image using OrthoSlicer3D diff --git a/nibabel/tests/data/bad_attribute+orig.HEAD b/nibabel/tests/data/bad_attribute+orig.HEAD new file mode 100644 index 0000000000..95fbdeb309 --- /dev/null +++ b/nibabel/tests/data/bad_attribute+orig.HEAD @@ -0,0 +1,133 @@ + +type = string-attribute +name = DATASET_NAME +count = 5 +'none~ + +type = string-attribute +name = TYPESTRING +count = 15 +'3DIM_HEAD_ANAT~ + +type = string-attribute +name = IDCODE_STRING +count = 27 +'AFN_-zxZ0OyZs8eEtm9syGBNdA~ + +type = string-attribute +name = IDCODE_DATE +count = 25 +'Sun Oct 1 21:13:09 2017~ + +type = integer-attribute +name = SCENE_DATA +count = 8 + 0 2 0 -999 -999 + -999 -999 -999 + +type = string-attribute +name = LABEL_1 +count = 5 +'none~ + +type = string-attribute +name = LABEL_2 +count = 5 +'none~ + +type = integer-attribute +name = ORIENT_SPECIFIC +count = 3 + 0 3 4 + +type = float-attribute +name = ORIGIN +count = 3 + -49.5 -82.312 -52.3511 + +type = float-attribute +name = DELTA +count = 3 + 3 3 3 + +type = float-attribute +name = IJK_TO_DICOM +count = 12 + 3 0 0 -49.5 0 + 3 0 -82.312 0 0 + 3 -52.3511 + +type = float-attribute +name = IJK_TO_DICOM_REAL +count = 12 + 3 0 0 -49.5 0 + 3 0 -82.312 0 0 + 3 -52.3511 + +type = float-attribute +name = BRICK_STATS +count = 6 + 0 13722 0 10051 0 + 9968 + +type = integer-attribute +name = TAXIS_NUMS +count = 8 + 3 25 77002 -999 -999 + -999 -999 -999 + +type = float-attribute +name = TAXIS_FLOATS +count = 8 + 0 3 0 -52.3511 3 + -999999 -999999 -999999 + +type = float-attribute +name = TAXIS_OFFSETS +count = 25 + 0.3260869 1.826087 0.3913043 1.891304 0.4565217 + 1.956521 0.5217391 2.021739 0.5869564 2.086956 + 0.6521738 2.152174 0.7173912 2.217391 0.7826086 + 2.282609 0.8478259 2.347826 0.9130433 2.413044 + 0.9782607 2.478261 1.043478 2.543479 1.108696 + +type = integer-attribute +name = DATASET_RANK +count = 8 + 3 3 0 0 0 + 0 0 0 + +type = integer-attribute +name = DATASET_DIMENSIONS +count = 5 + 33 41 25 0 0 + +type = integer-attribute +name = BRICK_TYPES +count = 3 + 1 1 1 + +type = float-attribute +name = BRICK_FLOAT_FACS +count = 3 + 0 0 0 + +type = string-attribute +name = TEMPLATE_SPACE +count = 5 +'ORIG~ + +type = integer-attribute +name = INT_CMAP +count = 1 + 0 + +type = integer-attribute +name = BYTEORDER_STRING +count = 10 +'LSB_FIRST~ + +type = string-attribute +name = BRICK_LABS +count = 9 +'#0~#1~#2~ diff --git a/nibabel/tests/data/bad_datatype+orig.HEAD b/nibabel/tests/data/bad_datatype+orig.HEAD new file mode 100644 index 0000000000..27b3a56abb --- /dev/null +++ b/nibabel/tests/data/bad_datatype+orig.HEAD @@ -0,0 +1,133 @@ + +type = string-attribute +name = DATASET_NAME +count = 5 +'none~ + +type = string-attribute +name = TYPESTRING +count = 15 +'3DIM_HEAD_ANAT~ + +type = string-attribute +name = IDCODE_STRING +count = 27 +'AFN_-zxZ0OyZs8eEtm9syGBNdA~ + +type = string-attribute +name = IDCODE_DATE +count = 25 +'Sun Oct 1 21:13:09 2017~ + +type = integer-attribute +name = SCENE_DATA +count = 8 + 0 2 0 -999 -999 + -999 -999 -999 + +type = string-attribute +name = LABEL_1 +count = 5 +'none~ + +type = string-attribute +name = LABEL_2 +count = 5 +'none~ + +type = integer-attribute +name = ORIENT_SPECIFIC +count = 3 + 0 3 4 + +type = float-attribute +name = ORIGIN +count = 3 + -49.5 -82.312 -52.3511 + +type = float-attribute +name = DELTA +count = 3 + 3 3 3 + +type = float-attribute +name = IJK_TO_DICOM +count = 12 + 3 0 0 -49.5 0 + 3 0 -82.312 0 0 + 3 -52.3511 + +type = float-attribute +name = IJK_TO_DICOM_REAL +count = 12 + 3 0 0 -49.5 0 + 3 0 -82.312 0 0 + 3 -52.3511 + +type = float-attribute +name = BRICK_STATS +count = 6 + 0 13722 0 10051 0 + 9968 + +type = integer-attribute +name = TAXIS_NUMS +count = 8 + 3 25 77002 -999 -999 + -999 -999 -999 + +type = float-attribute +name = TAXIS_FLOATS +count = 8 + 0 3 0 -52.3511 3 + -999999 -999999 -999999 + +type = float-attribute +name = TAXIS_OFFSETS +count = 25 + 0.3260869 1.826087 0.3913043 1.891304 0.4565217 + 1.956521 0.5217391 2.021739 0.5869564 2.086956 + 0.6521738 2.152174 0.7173912 2.217391 0.7826086 + 2.282609 0.8478259 2.347826 0.9130433 2.413044 + 0.9782607 2.478261 1.043478 2.543479 1.108696 + +type = integer-attribute +name = DATASET_RANK +count = 8 + 3 3 0 0 0 + 0 0 0 + +type = integer-attribute +name = DATASET_DIMENSIONS +count = 5 + 33 41 25 0 0 + +type = integer-attribute +name = BRICK_TYPES +count = 3 + 1 3 5 + +type = float-attribute +name = BRICK_FLOAT_FACS +count = 3 + 0 0 0 + +type = string-attribute +name = TEMPLATE_SPACE +count = 5 +'ORIG~ + +type = integer-attribute +name = INT_CMAP +count = 1 + 0 + +type = string-attribute +name = BYTEORDER_STRING +count = 10 +'LSB_FIRST~ + +type = string-attribute +name = BRICK_LABS +count = 9 +'#0~#1~#2~ diff --git a/nibabel/tests/data/example4d+orig.HEAD b/nibabel/tests/data/example4d+orig.HEAD new file mode 100644 index 0000000000..a43b839d0a --- /dev/null +++ b/nibabel/tests/data/example4d+orig.HEAD @@ -0,0 +1,133 @@ + +type = string-attribute +name = DATASET_NAME +count = 5 +'none~ + +type = string-attribute +name = TYPESTRING +count = 15 +'3DIM_HEAD_ANAT~ + +type = string-attribute +name = IDCODE_STRING +count = 27 +'AFN_-zxZ0OyZs8eEtm9syGBNdA~ + +type = string-attribute +name = IDCODE_DATE +count = 25 +'Sun Oct 1 21:13:09 2017~ + +type = integer-attribute +name = SCENE_DATA +count = 8 + 0 2 0 -999 -999 + -999 -999 -999 + +type = string-attribute +name = LABEL_1 +count = 5 +'none~ + +type = string-attribute +name = LABEL_2 +count = 5 +'none~ + +type = integer-attribute +name = ORIENT_SPECIFIC +count = 3 + 0 3 4 + +type = float-attribute +name = ORIGIN +count = 3 + -49.5 -82.312 -52.3511 + +type = float-attribute +name = DELTA +count = 3 + 3 3 3 + +type = float-attribute +name = IJK_TO_DICOM +count = 12 + 3 0 0 -49.5 0 + 3 0 -82.312 0 0 + 3 -52.3511 + +type = float-attribute +name = IJK_TO_DICOM_REAL +count = 12 + 3 0 0 -49.5 0 + 3 0 -82.312 0 0 + 3 -52.3511 + +type = float-attribute +name = BRICK_STATS +count = 6 + 0 13722 0 10051 0 + 9968 + +type = integer-attribute +name = TAXIS_NUMS +count = 8 + 3 25 77002 -999 -999 + -999 -999 -999 + +type = float-attribute +name = TAXIS_FLOATS +count = 8 + 0 3 0 -52.3511 3 + -999999 -999999 -999999 + +type = float-attribute +name = TAXIS_OFFSETS +count = 25 + 0.3260869 1.826087 0.3913043 1.891304 0.4565217 + 1.956521 0.5217391 2.021739 0.5869564 2.086956 + 0.6521738 2.152174 0.7173912 2.217391 0.7826086 + 2.282609 0.8478259 2.347826 0.9130433 2.413044 + 0.9782607 2.478261 1.043478 2.543479 1.108696 + +type = integer-attribute +name = DATASET_RANK +count = 8 + 3 3 0 0 0 + 0 0 0 + +type = integer-attribute +name = DATASET_DIMENSIONS +count = 5 + 33 41 25 0 0 + +type = integer-attribute +name = BRICK_TYPES +count = 3 + 1 1 1 + +type = float-attribute +name = BRICK_FLOAT_FACS +count = 3 + 0 0 0 + +type = string-attribute +name = TEMPLATE_SPACE +count = 5 +'ORIG~ + +type = integer-attribute +name = INT_CMAP +count = 1 + 0 + +type = string-attribute +name = BYTEORDER_STRING +count = 10 +'LSB_FIRST~ + +type = string-attribute +name = BRICK_LABS +count = 9 +'#0~#1~#2~ diff --git a/nibabel/tests/data/scaled+tlrc.HEAD b/nibabel/tests/data/scaled+tlrc.HEAD new file mode 100644 index 0000000000..a13b054e2d --- /dev/null +++ b/nibabel/tests/data/scaled+tlrc.HEAD @@ -0,0 +1,116 @@ + +type = string-attribute +name = TYPESTRING +count = 15 +'3DIM_HEAD_ANAT~ + +type = string-attribute +name = IDCODE_STRING +count = 27 +'AFN_vLKn9e5VumKelWXNeq4SWA~ + +type = string-attribute +name = IDCODE_DATE +count = 25 +'Tue Jan 23 20:05:10 2018~ + +type = integer-attribute +name = SCENE_DATA +count = 8 + 2 2 0 -999 -999 + -999 -999 -999 + +type = string-attribute +name = LABEL_1 +count = 5 +'zyxt~ + +type = string-attribute +name = LABEL_2 +count = 5 +'zyxt~ + +type = string-attribute +name = DATASET_NAME +count = 5 +'zyxt~ + +type = integer-attribute +name = ORIENT_SPECIFIC +count = 3 + 1 2 4 + +type = float-attribute +name = ORIGIN +count = 3 + 66 87 -54 + +type = float-attribute +name = DELTA +count = 3 + -3 -3 3 + +type = float-attribute +name = IJK_TO_DICOM +count = 12 + -3 0 0 66 0 + -3 0 87 0 0 + 3 -54 + +type = float-attribute +name = IJK_TO_DICOM_REAL +count = 12 + -3 0 0 66 0 + -3 0 87 0 0 + 3 -54 + +type = float-attribute +name = BRICK_STATS +count = 2 + 1.941682e-07 0.001272461 + +type = integer-attribute +name = DATASET_RANK +count = 8 + 3 1 0 0 0 + 0 0 0 + +type = integer-attribute +name = DATASET_DIMENSIONS +count = 5 + 47 54 43 0 0 + +type = integer-attribute +name = BRICK_TYPES +count = 1 + 1 + +type = float-attribute +name = BRICK_FLOAT_FACS +count = 1 + 3.883363e-08 + +type = string-attribute +name = BRICK_LABS +count = 3 +'#0~ + +type = string-attribute +name = BRICK_KEYWORDS +count = 1 +'~ + +type = string-attribute +name = TEMPLATE_SPACE +count = 5 +'TLRC~ + +type = integer-attribute +name = INT_CMAP +count = 1 + 0 + +type = string-attribute +name = BYTEORDER_STRING +count = 10 +'LSB_FIRST~ diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 537ed8f87d..187d5940df 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -20,8 +20,7 @@ import numpy as np -from ..arrayproxy import (ArrayProxy, KEEP_FILE_OPEN_DEFAULT, is_proxy, - reshape_dataobj) +from ..arrayproxy import (ArrayProxy, is_proxy, reshape_dataobj) from ..openers import ImageOpener from ..nifti1 import Nifti1Header @@ -342,73 +341,19 @@ def check_mmap(hdr, offset, proxy_class, # An image opener class which counts how many instances of itself have been # created class CountingImageOpener(ImageOpener): - num_openers = 0 - def __init__(self, *args, **kwargs): - super(CountingImageOpener, self).__init__(*args, **kwargs) CountingImageOpener.num_openers += 1 -def test_keep_file_open_true_false_invalid(): - # Test the behaviour of the keep_file_open __init__ flag, when it is set to - # True or False. +def _count_ImageOpeners(proxy, data, voxels): CountingImageOpener.num_openers = 0 - fname = 'testdata' - dtype = np.float32 - data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) - voxels = np.random.randint(0, 10, (10, 3)) - with InTemporaryDirectory(): - with open(fname, 'wb') as fobj: - fobj.write(data.tostring(order='F')) - # Test that ArrayProxy(keep_file_open=True) only creates one file - # handle, and that ArrayProxy(keep_file_open=False) creates a file - # handle on every data access. - with mock.patch('nibabel.arrayproxy.ImageOpener', CountingImageOpener): - proxy_no_kfp = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open=False) - assert not proxy_no_kfp._keep_file_open - for i in range(voxels.shape[0]): - x , y, z = [int(c) for c in voxels[i, :]] - assert proxy_no_kfp[x, y, z] == x * 100 + y * 10 + z - assert CountingImageOpener.num_openers == i + 1 - CountingImageOpener.num_openers = 0 - proxy_kfp = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open=True) - assert proxy_kfp._keep_file_open - for i in range(voxels.shape[0]): - x , y, z = [int(c) for c in voxels[i, :]] - assert proxy_kfp[x, y, z] == x * 100 + y * 10 + z - assert CountingImageOpener.num_openers == 1 - del proxy_kfp - del proxy_no_kfp - # Test that the keep_file_open flag has no effect if an open file - # handle is passed in - with open(fname, 'rb') as fobj: - for kfo in (True, False, 'auto'): - proxy = ArrayProxy(fobj, ((10, 10, 10), dtype), - keep_file_open=kfo) - if kfo == 'auto': - kfo = False - assert proxy._keep_file_open is kfo - for i in range(voxels.shape[0]): - assert proxy[x, y, z] == x * 100 + y * 10 + z - assert not fobj.closed - del proxy - assert not fobj.closed - assert fobj.closed - # Test invalid values of keep_file_open - with assert_raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=0) - with assert_raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=1) - with assert_raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=55) - with assert_raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='autob') - with assert_raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='cauto') + # expected data is defined in the test_keep_file_open_* tests + for i in range(voxels.shape[0]): + x, y, z = [int(c) for c in voxels[i, :]] + assert proxy[x, y, z] == x * 100 + y * 10 + z + return CountingImageOpener.num_openers @contextlib.contextmanager @@ -418,70 +363,127 @@ def patch_keep_file_open_default(value): yield -def test_keep_file_open_auto(): +def test_keep_file_open_true_false_invalid(): # Test the behaviour of the keep_file_open __init__ flag, when it is set to - # 'auto' - dtype = np.float32 - data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) - with InTemporaryDirectory(): - fname = 'testdata.gz' - with gzip.open(fname, 'wb') as fobj: - fobj.write(data.tostring(order='F')) - # If have_indexed_gzip, then keep_file_open should be True - with patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open='auto') - assert proxy._keep_file_open - # If no have_indexed_gzip, then keep_file_open should be False - with patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open='auto') - assert not proxy._keep_file_open - - -def test_keep_file_open_default(): - # Test the behaviour of the keep_file_open __init__ flag, when the - # arrayproxy.KEEP_FILE_OPEN_DEFAULT value is changed + # True or False. Expected behaviour is as follows: + # keep_open | igzip present | persist ImageOpener | igzip.drop_handles + # | and is gzip file | | + # ----------|------------------|---------------------|------------------- + # False | False | False | n/a + # False | True | True | True + # True | False | True | n/a + # True | True | True | False + # 'auto' | False | False | n/a + # 'auto' | True | True | False + # + # Each test tuple contains: + # - file type - gzipped ('gz') or not ('bin'), or an open file handle + # ('open') + # - keep_file_open value passed to ArrayProxy + # - whether or not indexed_gzip is present + # - expected value for internal ArrayProxy._persist_opener flag + # - expected value for internal ArrayProxy._keep_file_open flag + tests = [ + # open file handle - kfo and have_igzip are both irrelevant + ('open', False, False, False, False), + ('open', False, True, False, False), + ('open', True, False, False, False), + ('open', True, True, False, False), + ('open', 'auto', False, False, False), + ('open', 'auto', True, False, False), + # non-gzip file - have_igzip is irrelevant, decision should be made + # solely from kfo flag + ('bin', False, False, False, False), + ('bin', False, True, False, False), + ('bin', True, False, True, True), + ('bin', True, True, True, True), + ('bin', 'auto', False, False, False), + ('bin', 'auto', True, False, False), + # gzip file. If igzip is present, we persist the ImageOpener. If kfo + # is 'auto': + # - if igzip is present, kfo -> True + # - otherwise, kfo -> False + ('gz', False, False, False, False), + ('gz', False, True, True, False), + ('gz', True, False, True, True), + ('gz', True, True, True, True), + ('gz', 'auto', False, False, False), + ('gz', 'auto', True, True, True)] + dtype = np.float32 data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) + voxels = np.random.randint(0, 10, (10, 3)) + + for test in tests: + filetype, kfo, have_igzip, exp_persist, exp_kfo = test + with InTemporaryDirectory(), \ + mock.patch('nibabel.openers.ImageOpener', CountingImageOpener), \ + patch_indexed_gzip(have_igzip): + fname = 'testdata.{}'.format(filetype) + # create the test data file + if filetype == 'gz': + with gzip.open(fname, 'wb') as fobj: + fobj.write(data.tostring(order='F')) + else: + with open(fname, 'wb') as fobj: + fobj.write(data.tostring(order='F')) + # pass in a file name or open file handle. If the latter, we open + # two file handles, because we're going to create two proxies + # below. + if filetype == 'open': + fobj1 = open(fname, 'rb') + fobj2 = open(fname, 'rb') + else: + fobj1 = fname + fobj2 = fname + try: + proxy = ArrayProxy(fobj1, ((10, 10, 10), dtype), + keep_file_open=kfo) + # We also test that we get the same behaviour when the + # KEEP_FILE_OPEN_DEFAULT flag is changed + with patch_keep_file_open_default(kfo): + proxy_def = ArrayProxy(fobj2, ((10, 10, 10), dtype)) + # check internal flags + assert proxy._persist_opener == exp_persist + assert proxy._keep_file_open == exp_kfo + assert proxy_def._persist_opener == exp_persist + assert proxy_def._keep_file_open == exp_kfo + # check persist_opener behaviour - whether one imageopener is + # created for the lifetime of the ArrayProxy, or one is + # created on each access + if exp_persist: + assert _count_ImageOpeners(proxy, data, voxels) == 1 + assert _count_ImageOpeners(proxy_def, data, voxels) == 1 + else: + assert _count_ImageOpeners(proxy, data, voxels) == 10 + assert _count_ImageOpeners(proxy_def, data, voxels) == 10 + # if indexed_gzip is active, check that the file object was + # created correctly - the _opener.fobj will be a + # MockIndexedGzipFile, defined in test_openers.py + if filetype == 'gz' and have_igzip: + assert proxy._opener.fobj._drop_handles == (not exp_kfo) + # if we were using an open file handle, check that the proxy + # didn't close it + if filetype == 'open': + assert not fobj1.closed + assert not fobj2.closed + finally: + del proxy + del proxy_def + if filetype == 'open': + fobj1.close() + fobj2.close() + # Test invalid values of keep_file_open with InTemporaryDirectory(): - fname = 'testdata.gz' - with gzip.open(fname, 'wb') as fobj: + fname = 'testdata' + with open(fname, 'wb') as fobj: fobj.write(data.tostring(order='F')) - # The default value of KEEP_FILE_OPEN_DEFAULT should cause - # keep_file_open to be False, regardless of whether or not indexed_gzip - # is present - assert KEEP_FILE_OPEN_DEFAULT is False - with patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert not proxy._keep_file_open - with patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert not proxy._keep_file_open - # KEEP_FILE_OPEN_DEFAULT=True should cause keep_file_open to be True, - # regardless of whether or not indexed_gzip is present - with patch_keep_file_open_default(True), patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open - with patch_keep_file_open_default(True), patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open - # KEEP_FILE_OPEN_DEFAULT=auto should cause keep_file_open to be True - # or False, depending on whether indeed_gzip is present, - with patch_keep_file_open_default('auto'), patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open - with patch_keep_file_open_default('auto'), patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert not proxy._keep_file_open - # KEEP_FILE_OPEN_DEFAULT=any other value should cuse an error to be - # raised - with patch_keep_file_open_default('badvalue'): - assert_raises(ValueError, ArrayProxy, fname, ((10, 10, 10), - dtype)) - with patch_keep_file_open_default(None): - assert_raises(ValueError, ArrayProxy, fname, ((10, 10, 10), - dtype)) + with assert_raises(ValueError): + ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=55) + with assert_raises(ValueError): + ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='autob') + with assert_raises(ValueError): + ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='cauto') def test_pickle_lock(): diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py new file mode 100644 index 0000000000..c1632c06c2 --- /dev/null +++ b/nibabel/tests/test_brikhead.py @@ -0,0 +1,150 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +from __future__ import division, print_function, absolute_import + +from os.path import join as pjoin + +import numpy as np + +from .. import load, Nifti1Image +from .. import brikhead + +from nose.tools import (assert_true, assert_equal, assert_raises) +from numpy.testing import assert_array_equal +from ..testing import data_path + +from .test_fileslice import slicer_samples +from .test_helpers import assert_data_similar + +EXAMPLE_IMAGES = [ + dict( + head=pjoin(data_path, 'example4d+orig.HEAD'), + fname=pjoin(data_path, 'example4d+orig.BRIK.gz'), + shape=(33, 41, 25, 3), + dtype=np.int16, + affine=np.array([[-3.0,0,0,49.5], + [0,-3.0,0,82.312], + [0,0,3.0,-52.3511], + [0,0,0,1.0]]), + zooms=(3., 3., 3., 3.), + data_summary=dict( + min=0, + max=13722, + mean=4266.76024636), + is_proxy=True, + space='ORIG', + labels=['#0', '#1', '#2'], + scaling=None), + dict( + head=pjoin(data_path, 'scaled+tlrc.HEAD'), + fname=pjoin(data_path, 'scaled+tlrc.BRIK'), + shape=(47, 54, 43, 1.), + dtype=np.int16, + affine=np.array([[3.0,0,0,-66.], + [0,3.0,0,-87.], + [0,0,3.0,-54.], + [0,0,0,1.0]]), + zooms=(3., 3., 3., 0.), + data_summary=dict( + min=1.9416814999999998e-07, + max=0.0012724615542099998, + mean=0.00023919645351876782), + is_proxy=True, + space='TLRC', + labels=['#0'], + scaling=np.array([ 3.88336300e-08]), + ) +] + +EXAMPLE_BAD_IMAGES = [ + dict( + head=pjoin(data_path, 'bad_datatype+orig.HEAD'), + err=brikhead.AFNIImageError + ), + dict( + head=pjoin(data_path, 'bad_attribute+orig.HEAD'), + err=brikhead.AFNIHeaderError + ) +] + +class TestAFNIHeader(object): + module = brikhead + test_files = EXAMPLE_IMAGES + + def test_makehead(self): + for tp in self.test_files: + head1 = self.module.AFNIHeader.from_fileobj(tp['head']) + head2 = self.module.AFNIHeader.from_header(head1) + assert_equal(head1, head2) + with assert_raises(self.module.AFNIHeaderError): + self.module.AFNIHeader.from_header(header=None) + with assert_raises(self.module.AFNIHeaderError): + self.module.AFNIHeader.from_header(tp['fname']) + + +class TestAFNIImage(object): + module = brikhead + test_files = EXAMPLE_IMAGES + + def test_brikheadfile(self): + for tp in self.test_files: + brik = self.module.load(tp['fname']) + assert_equal(brik.get_data_dtype().type, tp['dtype']) + assert_equal(brik.shape, tp['shape']) + assert_equal(brik.header.get_zooms(), tp['zooms']) + assert_array_equal(brik.affine, tp['affine']) + assert_equal(brik.header.get_space(), tp['space']) + data = brik.get_data() + assert_equal(data.shape, tp['shape']) + assert_array_equal(brik.dataobj.scaling, tp['scaling']) + assert_equal(brik.header.get_volume_labels(), tp['labels']) + + def test_load(self): + # Check highest level load of brikhead works + for tp in self.test_files: + img = self.module.load(tp['head']) + data = img.get_data() + assert_equal(data.shape, tp['shape']) + # min, max, mean values + assert_data_similar(data, tp) + # check if file can be converted to nifti + ni_img = Nifti1Image.from_image(img) + assert_array_equal(ni_img.affine, tp['affine']) + assert_array_equal(ni_img.get_data(), data) + + def test_array_proxy_slicing(self): + # Test slicing of array proxy + for tp in self.test_files: + img = self.module.load(tp['fname']) + arr = img.get_data() + prox = img.dataobj + assert_true(prox.is_proxy) + for sliceobj in slicer_samples(img.shape): + assert_array_equal(arr[sliceobj], prox[sliceobj]) + + +class TestBadFiles(object): + module = brikhead + test_files = EXAMPLE_BAD_IMAGES + + def test_brikheadfile(self): + for tp in self.test_files: + with assert_raises(tp['err']): + self.module.load(tp['head']) + + +class TestBadVars(object): + module = brikhead + vars = ['type = badtype-attribute\nname = BRICK_TYPES\ncount = 1\n1\n', + 'type = integer-attribute\ncount = 1\n1\n'] + + def test_unpack_var(self): + for var in self.vars: + with assert_raises(self.module.AFNIHeaderError): + self.module._unpack_var(var) diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py new file mode 100644 index 0000000000..4f99ca145f --- /dev/null +++ b/nibabel/tests/test_diff.py @@ -0,0 +1,74 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" Test diff +""" +from __future__ import division, print_function, absolute_import + +from os.path import (dirname, join as pjoin, abspath) +import numpy as np + + +DATA_PATH = abspath(pjoin(dirname(__file__), 'data')) + +from nibabel.cmdline.diff import are_values_different + + +def test_diff_values_int(): + large = 10**30 + assert not are_values_different(0, 0) + assert not are_values_different(1, 1) + assert not are_values_different(large, large) + assert are_values_different(0, 1) + assert are_values_different(1, 2) + assert are_values_different(1, large) + + +def test_diff_values_float(): + assert not are_values_different(0., 0.) + assert not are_values_different(0., 0., 0.) # can take more + assert not are_values_different(1.1, 1.1) + assert are_values_different(0., 1.1) + assert are_values_different(0., 0, 1.1) + assert are_values_different(1., 2.) + + +def test_diff_values_mixed(): + assert are_values_different(1.0, 1) + assert are_values_different(1.0, "1") + assert are_values_different(1, "1") + assert are_values_different(1, None) + assert are_values_different(np.ndarray([0]), 'hey') + assert not are_values_different(None, None) + + +def test_diff_values_array(): + from numpy import nan, array, inf + a_int = array([1, 2]) + a_float = a_int.astype(float) + + assert are_values_different(a_int, a_float) + assert are_values_different(a_int, a_int, a_float) + assert are_values_different(np.arange(3), np.arange(1, 4)) + assert are_values_different(np.arange(3), np.arange(4)) + assert are_values_different(np.arange(4), np.arange(4).reshape((2, 2))) + # no broadcasting should kick in - shape difference + assert are_values_different(array([1]), array([1, 1])) + assert not are_values_different(a_int, a_int) + assert not are_values_different(a_float, a_float) + + # nans - we consider them "the same" for the purpose of these comparisons + assert not are_values_different(nan, nan) + assert not are_values_different(nan, nan, nan) + assert are_values_different(nan, nan, 1) + assert are_values_different(1, nan, nan) + assert not are_values_different(array([nan, nan]), array([nan, nan])) + assert not are_values_different(array([nan, nan]), array([nan, nan]), array([nan, nan])) + assert not are_values_different(array([nan, 1]), array([nan, 1])) + assert are_values_different(array([nan, nan]), array([nan, 1])) + assert are_values_different(array([0, nan]), array([nan, 0])) + assert are_values_different(array([1, 2, 3, nan]), array([nan, 3, 5, 4])) + assert are_values_different(nan, 1.0) + assert are_values_different(array([1, 2, 3, nan]), array([3, 4, 5, nan])) + # and some inf should not be a problem + assert not are_values_different(array([0, inf]), array([0, inf])) + assert are_values_different(array([0, inf]), array([inf, 0])) diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 3022265df4..96376270b1 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -30,6 +30,16 @@ LD_INFO = type_info(np.longdouble) +def dtt2dict(dtt): + """ Create info dictionary from numpy type + """ + info = np.finfo(dtt) + return dict(min=info.min, max=info.max, + nexp=info.nexp, nmant=info.nmant, + minexp=info.minexp, maxexp=info.maxexp, + width=np.dtype(dtt).itemsize) + + def test_type_info(): # Test routine to get min, max, nmant, nexp for dtt in np.sctypes['int'] + np.sctypes['uint']: @@ -42,42 +52,35 @@ def test_type_info(): assert_equal(infod['min'].dtype.type, dtt) assert_equal(infod['max'].dtype.type, dtt) for dtt in IEEE_floats + [np.complex64, np.complex64]: - info = np.finfo(dtt) infod = type_info(dtt) - assert_equal(dict(min=info.min, max=info.max, - nexp=info.nexp, nmant=info.nmant, - minexp=info.minexp, maxexp=info.maxexp, - width=np.dtype(dtt).itemsize), - infod) + assert_equal(dtt2dict(dtt), infod) assert_equal(infod['min'].dtype.type, dtt) assert_equal(infod['max'].dtype.type, dtt) # What is longdouble? - info = np.finfo(np.longdouble) - dbl_info = np.finfo(np.float64) + ld_dict = dtt2dict(np.longdouble) + dbl_dict = dtt2dict(np.float64) infod = type_info(np.longdouble) - width = np.dtype(np.longdouble).itemsize - vals = (info.nmant, info.nexp, width) + vals = tuple(ld_dict[k] for k in ('nmant', 'nexp', 'width')) # Information for PPC head / tail doubles from: # https://developer.apple.com/library/mac/#documentation/Darwin/Reference/Manpages/man3/float.3.html if vals in ((52, 11, 8), # longdouble is same as double (63, 15, 12), (63, 15, 16), # intel 80 bit (112, 15, 16), # real float128 (106, 11, 16)): # PPC head, tail doubles, expected values - assert_equal(dict(min=info.min, max=info.max, - minexp=info.minexp, maxexp=info.maxexp, - nexp=info.nexp, nmant=info.nmant, width=width), - infod) - elif vals == (1, 1, 16): # bust info for PPC head / tail longdoubles - assert_equal(dict(min=dbl_info.min, max=dbl_info.max, - minexp=-1022, maxexp=1024, - nexp=11, nmant=106, width=16), - infod) + pass + elif vals == (105, 11, 16): # bust info for PPC head / tail longdoubles + # min and max broken, copy from infod + ld_dict.update({k: infod[k] for k in ('min', 'max')}) + elif vals == (1, 1, 16): # another bust info for PPC head / tail longdoubles + ld_dict = dbl_dict.copy() + ld_dict.update(dict(nmant=106, width=16)) elif vals == (52, 15, 12): - exp_res = type_info(np.float64) - exp_res['width'] = width - assert_equal(exp_res, infod) + width = ld_dict['width'] + ld_dict = dbl_dict.copy() + ld_dict['width'] = width else: - raise ValueError("Unexpected float type to test") + raise ValueError("Unexpected float type {} to test".format(np.longdouble)) + assert_equal(ld_dict, infod) def test_nmant(): @@ -103,7 +106,7 @@ def test_check_nmant_nexp(): # Check against type_info for t in ok_floats(): ti = type_info(t) - if ti['nmant'] != 106: # This check does not work for PPC double pair + if ti['nmant'] not in (105, 106): # This check does not work for PPC double pair assert_true(_check_nmant(t, ti['nmant'])) # Test fails for longdouble after blacklisting of OSX powl as of numpy # 1.12 - see https://github.com/numpy/numpy/issues/8307 diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 8ee7c22cc7..ba51878715 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -27,6 +27,7 @@ import warnings from functools import partial +from itertools import product from six import string_types import numpy as np @@ -39,7 +40,7 @@ Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, MGHImage, Minc1Image, Minc2Image, is_proxy) from ..spatialimages import SpatialImage -from .. import minc1, minc2, parrec +from .. import minc1, minc2, parrec, brikhead from nose import SkipTest from nose.tools import (assert_true, assert_false, assert_raises, assert_equal) @@ -54,7 +55,7 @@ from .test_minc1 import EXAMPLE_IMAGES as MINC1_EXAMPLE_IMAGES from .test_minc2 import EXAMPLE_IMAGES as MINC2_EXAMPLE_IMAGES from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLE_IMAGES - +from .test_brikhead import EXAMPLE_IMAGES as AFNI_EXAMPLE_IMAGES class GenericImageAPI(ValidateAPI): """ General image validation API """ @@ -195,156 +196,23 @@ class DataInterfaceMixin(GetSetDtypeMixin): Use this mixin if your image has a ``dataobj`` property that contains an array or an array-like thing. """ + meth_names = ('get_fdata', 'get_data') + def validate_data_interface(self, imaker, params): # Check get data returns array, and caches img = imaker() assert_equal(img.shape, img.dataobj.shape) + assert_equal(img.ndim, len(img.shape)) assert_data_similar(img.dataobj, params) - meth_names = ('get_fdata', 'get_data') - for meth_name in meth_names: + for meth_name in self.meth_names: if params['is_proxy']: - # Parameters assert this is an array proxy - img = imaker() - # Does is_proxy agree? - assert_true(is_proxy(img.dataobj)) - # Confirm it is not a numpy array - assert_false(isinstance(img.dataobj, np.ndarray)) - # Confirm it can be converted to a numpy array with asarray - proxy_data = np.asarray(img.dataobj) - proxy_copy = proxy_data.copy() - # Not yet cached, proxy image: in_memory is False - assert_false(img.in_memory) - # Load with caching='unchanged' - method = getattr(img, meth_name) - data = method(caching='unchanged') - # Still not cached - assert_false(img.in_memory) - # Default load, does caching - data = method() - # Data now cached. in_memory is True if either of the get_data - # or get_fdata caches are not-None - assert_true(img.in_memory) - # We previously got proxy_data from disk, but data, which we - # have just fetched, is a fresh copy. - assert_false(proxy_data is data) - # asarray on dataobj, applied above, returns same numerical - # values. This might not be true get_fdata operating on huge - # integers, but lets assume that's not true here. - assert_array_equal(proxy_data, data) - # Now caching='unchanged' does nothing, returns cached version - data_again = method(caching='unchanged') - assert_true(data is data_again) - # caching='fill' does nothing because the cache is already full - data_yet_again = method(caching='fill') - assert_true(data is data_yet_again) - # changing array data does not change proxy data, or reloaded - # data - data[:] = 42 - assert_array_equal(proxy_data, proxy_copy) - assert_array_equal(np.asarray(img.dataobj), proxy_copy) - # It does change the result of get_data - assert_array_equal(method(), 42) - # until we uncache - img.uncache() - # Which unsets in_memory - assert_false(img.in_memory) - assert_array_equal(method(), proxy_copy) - # Check caching='fill' does cache data - img = imaker() - method = getattr(img, meth_name) - assert_false(img.in_memory) - data = method(caching='fill') - assert_true(img.in_memory) - data_again = method() - assert_true(data is data_again) - # Check the interaction of caching with get_data, get_fdata. - # Caching for `get_data` should have no effect on caching for - # get_fdata, and vice versa. - # Modify the cached data - data[:] = 43 - # Load using the other data fetch method - other_name = set(meth_names).difference({meth_name}).pop() - other_method = getattr(img, other_name) - other_data = other_method() - # We get the original data, not the modified cache - assert_array_equal(proxy_data, other_data) - assert_false(np.all(data == other_data)) - # We can modify the other cache, without affecting the first - other_data[:] = 44 - assert_array_equal(other_method(), 44) - assert_false(np.all(method() == other_method())) - # Check that caching refreshes for new floating point type. - if meth_name == 'get_fdata': - img.uncache() - fdata = img.get_fdata() - assert_equal(fdata.dtype, np.float64) - fdata[:] = 42 - fdata_back = img.get_fdata() - assert_array_equal(fdata_back, 42) - assert_equal(fdata_back.dtype, np.float64) - # New data dtype, no caching, doesn't use or alter cache - fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') - # We get back the original read, not the modified cache - assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) - assert_equal(fdata_new_dt.dtype, np.float32) - # The original cache stays in place, for default float64 - assert_array_equal(img.get_fdata(), 42) - # And for not-default float32, because we haven't cached - fdata_new_dt[:] = 43 - fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') - assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) - # Until we reset with caching='fill', at which point we - # drop the original float64 cache, and have a float32 cache - fdata_new_dt = img.get_fdata(caching='fill', dtype='f4') - assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) - # We're using the cache, for dtype='f4' reads - fdata_new_dt[:] = 43 - assert_array_equal(img.get_fdata(dtype='f4'), 43) - # We've lost the cache for float64 reads (no longer 42) - assert_array_equal(img.get_fdata(), proxy_data) - else: # not proxy - for caching in (None, 'fill', 'unchanged'): - img = imaker() - method = getattr(img, meth_name) - get_data_func = (method if caching is None else - partial(method, caching=caching)) - assert_true(isinstance(img.dataobj, np.ndarray)) - assert_true(img.in_memory) - data = get_data_func() - # Returned data same object as underlying dataobj if using - # old ``get_data`` method, or using newer ``get_fdata`` - # method, where original array was float64. - dataobj_is_data = (img.dataobj.dtype == np.float64 - or method == img.get_data) - # Set something to the output array. - data[:] = 42 - get_result_changed = np.all(get_data_func() == 42) - assert_equal(get_result_changed, - dataobj_is_data or caching != 'unchanged') - if dataobj_is_data: - assert_true(data is img.dataobj) - # Changing array data changes - # data - assert_array_equal(np.asarray(img.dataobj), 42) - # Uncache has no effect - img.uncache() - assert_array_equal(get_data_func(), 42) - else: - assert_false(data is img.dataobj) - assert_false(np.all(np.asarray(img.dataobj) == 42)) - # Uncache does have an effect - img.uncache() - assert_false(np.all(get_data_func() == 42)) - # in_memory is always true for array images, regardless of - # cache state. - img.uncache() - assert_true(img.in_memory) - # Values to get_(f)data caching parameter must be 'fill' or - # 'unchanged' - assert_raises(ValueError, img.get_data, caching='something') - assert_raises(ValueError, img.get_fdata, caching='something') + self._check_proxy_interface(imaker, meth_name) + else: # Array image + self._check_array_interface(imaker, meth_name) # Data shape is same as image shape - assert_equal(img.shape, method().shape) + assert_equal(img.shape, getattr(img, meth_name)().shape) + # Data ndim is same as image ndim + assert_equal(img.ndim, getattr(img, meth_name)().ndim) # Values to get_data caching parameter must be 'fill' or # 'unchanged' assert_raises(ValueError, img.get_data, caching='something') @@ -354,6 +222,159 @@ def validate_data_interface(self, imaker, params): # So is in_memory assert_raises(AttributeError, setattr, img, 'in_memory', False) + def _check_proxy_interface(self, imaker, meth_name): + # Parameters assert this is an array proxy + img = imaker() + # Does is_proxy agree? + assert_true(is_proxy(img.dataobj)) + # Confirm it is not a numpy array + assert_false(isinstance(img.dataobj, np.ndarray)) + # Confirm it can be converted to a numpy array with asarray + proxy_data = np.asarray(img.dataobj) + proxy_copy = proxy_data.copy() + # Not yet cached, proxy image: in_memory is False + assert_false(img.in_memory) + # Load with caching='unchanged' + method = getattr(img, meth_name) + data = method(caching='unchanged') + # Still not cached + assert_false(img.in_memory) + # Default load, does caching + data = method() + # Data now cached. in_memory is True if either of the get_data + # or get_fdata caches are not-None + assert_true(img.in_memory) + # We previously got proxy_data from disk, but data, which we + # have just fetched, is a fresh copy. + assert_false(proxy_data is data) + # asarray on dataobj, applied above, returns same numerical + # values. This might not be true get_fdata operating on huge + # integers, but lets assume that's not true here. + assert_array_equal(proxy_data, data) + # Now caching='unchanged' does nothing, returns cached version + data_again = method(caching='unchanged') + assert_true(data is data_again) + # caching='fill' does nothing because the cache is already full + data_yet_again = method(caching='fill') + assert_true(data is data_yet_again) + # changing array data does not change proxy data, or reloaded + # data + data[:] = 42 + assert_array_equal(proxy_data, proxy_copy) + assert_array_equal(np.asarray(img.dataobj), proxy_copy) + # It does change the result of get_data + assert_array_equal(method(), 42) + # until we uncache + img.uncache() + # Which unsets in_memory + assert_false(img.in_memory) + assert_array_equal(method(), proxy_copy) + # Check caching='fill' does cache data + img = imaker() + method = getattr(img, meth_name) + assert_false(img.in_memory) + data = method(caching='fill') + assert_true(img.in_memory) + data_again = method() + assert_true(data is data_again) + # Check the interaction of caching with get_data, get_fdata. + # Caching for `get_data` should have no effect on caching for + # get_fdata, and vice versa. + # Modify the cached data + data[:] = 43 + # Load using the other data fetch method + other_name = set(self.meth_names).difference({meth_name}).pop() + other_method = getattr(img, other_name) + other_data = other_method() + # We get the original data, not the modified cache + assert_array_equal(proxy_data, other_data) + assert_false(np.all(data == other_data)) + # We can modify the other cache, without affecting the first + other_data[:] = 44 + assert_array_equal(other_method(), 44) + assert_false(np.all(method() == other_method())) + if meth_name != 'get_fdata': + return + # Check that caching refreshes for new floating point type. + img.uncache() + fdata = img.get_fdata() + assert_equal(fdata.dtype, np.float64) + fdata[:] = 42 + fdata_back = img.get_fdata() + assert_array_equal(fdata_back, 42) + assert_equal(fdata_back.dtype, np.float64) + # New data dtype, no caching, doesn't use or alter cache + fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') + # We get back the original read, not the modified cache + assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) + assert_equal(fdata_new_dt.dtype, np.float32) + # The original cache stays in place, for default float64 + assert_array_equal(img.get_fdata(), 42) + # And for not-default float32, because we haven't cached + fdata_new_dt[:] = 43 + fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') + assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) + # Until we reset with caching='fill', at which point we + # drop the original float64 cache, and have a float32 cache + fdata_new_dt = img.get_fdata(caching='fill', dtype='f4') + assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) + # We're using the cache, for dtype='f4' reads + fdata_new_dt[:] = 43 + assert_array_equal(img.get_fdata(dtype='f4'), 43) + # We've lost the cache for float64 reads (no longer 42) + assert_array_equal(img.get_fdata(), proxy_data) + + def _check_array_interface(self, imaker, meth_name): + for caching in (None, 'fill', 'unchanged'): + self._check_array_caching(imaker, meth_name, caching) + + def _check_array_caching(self, imaker, meth_name, caching): + img = imaker() + method = getattr(img, meth_name) + get_data_func = (method if caching is None else + partial(method, caching=caching)) + assert_true(isinstance(img.dataobj, np.ndarray)) + assert_true(img.in_memory) + data = get_data_func() + # Returned data same object as underlying dataobj if using + # old ``get_data`` method, or using newer ``get_fdata`` + # method, where original array was float64. + arr_dtype = img.dataobj.dtype + dataobj_is_data = arr_dtype == np.float64 or method == img.get_data + # Set something to the output array. + data[:] = 42 + get_result_changed = np.all(get_data_func() == 42) + assert_equal(get_result_changed, + dataobj_is_data or caching != 'unchanged') + if dataobj_is_data: + assert_true(data is img.dataobj) + # Changing array data changes + # data + assert_array_equal(np.asarray(img.dataobj), 42) + # Uncache has no effect + img.uncache() + assert_array_equal(get_data_func(), 42) + else: + assert_false(data is img.dataobj) + assert_false(np.all(np.asarray(img.dataobj) == 42)) + # Uncache does have an effect + img.uncache() + assert_false(np.all(get_data_func() == 42)) + # in_memory is always true for array images, regardless of + # cache state. + img.uncache() + assert_true(img.in_memory) + if meth_name != 'get_fdata': + return + # Return original array from get_fdata only if the input array is the + # requested dtype. + float_types = np.sctypes['float'] + if arr_dtype not in float_types: + return + for float_type in float_types: + data = get_data_func(dtype=float_type) + assert_equal(data is img.dataobj, arr_dtype == float_type) + def validate_data_deprecated(self, imaker, params): # Check _data property still exists, but raises warning img = imaker() @@ -376,6 +397,17 @@ def validate_shape(self, imaker, params): # Read only assert_raises(AttributeError, setattr, img, 'shape', np.eye(4)) + def validate_ndim(self, imaker, params): + # Validate shape + img = imaker() + # Same as expected ndim + assert_equal(img.ndim, len(params['shape'])) + # Same as array ndim if passed + if 'data' in params: + assert_equal(img.ndim, params['data'].ndim) + # Read only + assert_raises(AttributeError, setattr, img, 'ndim', 5) + def validate_shape_deprecated(self, imaker, params): # Check deprecated get_shape API img = imaker() @@ -385,7 +417,6 @@ def validate_shape_deprecated(self, imaker, params): assert_equal(len(w), 1) - class HeaderShapeMixin(object): """ Tests that header shape can be set and got @@ -469,40 +500,49 @@ class MakeImageAPI(LoadImageAPI): header_maker = None # Example shapes for created images example_shapes = ((2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)) + # Supported dtypes for storing to disk + storable_dtypes = (np.uint8, np.int16, np.float32) def obj_params(self): # Return any obj_params from superclass for func, params in super(MakeImageAPI, self).obj_params(): yield func, params - # Create a new images + # Create new images aff = np.diag([1, 2, 3, 1]) def make_imaker(arr, aff, header=None): return lambda: self.image_maker(arr, aff, header) - for shape in self.example_shapes: - for dtype in (np.uint8, np.int16, np.float32): - arr = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) - hdr = self.header_maker() - hdr.set_data_dtype(dtype) - func = make_imaker(arr.copy(), aff, hdr) - params = dict( - dtype=dtype, - affine=aff, - data=arr, - shape=shape, - is_proxy=False) - yield func, params - if not self.can_save: - return - # Add a proxy image - # We assume that loading from a fileobj creates a proxy image - params['is_proxy'] = True - def prox_imaker(): - img = self.image_maker(arr, aff, hdr) - rt_img = bytesio_round_trip(img) - return self.image_maker(rt_img.dataobj, aff, rt_img.header) - yield prox_imaker, params + def make_prox_imaker(arr, aff, hdr): + + def prox_imaker(): + img = self.image_maker(arr, aff, hdr) + rt_img = bytesio_round_trip(img) + return self.image_maker(rt_img.dataobj, aff, rt_img.header) + + return prox_imaker + + for shape, stored_dtype in product(self.example_shapes, + self.storable_dtypes): + # To make sure we do not trigger scaling, always use the + # stored_dtype for the input array. + arr = np.arange(np.prod(shape), dtype=stored_dtype).reshape(shape) + hdr = self.header_maker() + hdr.set_data_dtype(stored_dtype) + func = make_imaker(arr.copy(), aff, hdr) + params = dict( + dtype=stored_dtype, + affine=aff, + data=arr, + shape=shape, + is_proxy=False) + yield make_imaker(arr.copy(), aff, hdr), params + if not self.can_save: + continue + # Create proxy images from these array images, by storing via BytesIO. + # We assume that loading from a fileobj creates a proxy image. + params['is_proxy'] = True + yield make_prox_imaker(arr.copy(), aff, hdr), params class ImageHeaderAPI(MakeImageAPI): @@ -520,6 +560,8 @@ class TestAnalyzeAPI(ImageHeaderAPI): has_scaling = False can_save = True standard_extension = '.img' + # Supported dtypes for storing to disk + storable_dtypes = (np.uint8, np.int16, np.int32, np.float32, np.float64) class TestSpatialImageAPI(TestAnalyzeAPI): @@ -596,3 +638,9 @@ class TestMGHAPI(ImageHeaderAPI): has_scaling = True can_save = True standard_extension = '.mgh' + + +class TestAFNIAPI(LoadImageAPI): + loader = brikhead.load + klass = image_maker = brikhead.AFNIImage + example_images = AFNI_EXAMPLE_IMAGES diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 1c6fb989b5..78f876ec7d 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -38,7 +38,12 @@ from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) -from ..testing import data_path, suppress_warnings, runif_extra_has +from ..testing import ( + clear_and_catch_warnings, + data_path, + runif_extra_has, + suppress_warnings, +) from . import test_analyze as tana from . import test_spm99analyze as tspm @@ -558,6 +563,22 @@ def test_slice_times(self): assert_equal(hdr['slice_end'], 5) assert_array_almost_equal(hdr['slice_duration'], 0.1) + # Ambiguous case + hdr2 = self.header_class() + hdr2.set_dim_info(slice=2) + hdr2.set_slice_duration(0.1) + hdr2.set_data_shape((1, 1, 2)) + with clear_and_catch_warnings() as w: + warnings.simplefilter("always") + hdr2.set_slice_times([0.1, 0]) + assert len(w) == 1 + # but always must be choosing sequential one first + assert_equal(hdr2.get_value_label('slice_code'), 'sequential decreasing') + # and the other direction + hdr2.set_slice_times([0, 0.1]) + assert_equal(hdr2.get_value_label('slice_code'), 'sequential increasing') + + def test_intents(self): ehdr = self.header_class() ehdr.set_intent('t test', (10,), name='some score') diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index ebde721732..6b5f231fc3 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -12,9 +12,10 @@ from gzip import GzipFile from bz2 import BZ2File from io import BytesIO, UnsupportedOperation +from distutils.version import StrictVersion from ..py3k import asstr, asbytes -from ..openers import Opener, ImageOpener +from ..openers import Opener, ImageOpener, HAVE_INDEXED_GZIP from ..tmpdirs import InTemporaryDirectory from ..volumeutils import BinOpener @@ -67,6 +68,8 @@ def test_Opener_various(): # Check we can do all sorts of files here message = b"Oh what a giveaway" bz2_fileno = hasattr(BZ2File, 'fileno') + if HAVE_INDEXED_GZIP: + import indexed_gzip as igzip with InTemporaryDirectory(): sobj = BytesIO() for input in ('test.txt', @@ -86,6 +89,11 @@ def test_Opener_various(): assert_raises(UnsupportedOperation, fobj.fileno) elif input.endswith('.bz2') and not bz2_fileno: assert_raises(AttributeError, fobj.fileno) + # indexed gzip is used by default, and drops file + # handles by default, so we don't have a fileno. + elif input.endswith('gz') and HAVE_INDEXED_GZIP and \ + StrictVersion(igzip.__version__) >= StrictVersion('0.7.0'): + assert_raises(igzip.NoHandleError, fobj.fileno) else: # Just check there is a fileno assert_not_equal(fobj.fileno(), 0) @@ -97,9 +105,10 @@ def test_BinOpener(): BinOpener, 'test.txt', 'r') -class MockIndexedGzipFile(object): +class MockIndexedGzipFile(GzipFile): def __init__(self, *args, **kwargs): - pass + self._drop_handles = kwargs.pop('drop_handles', False) + super(MockIndexedGzipFile, self).__init__(*args, **kwargs) @contextlib.contextmanager @@ -107,12 +116,11 @@ def patch_indexed_gzip(state): # Make it look like we do (state==True) or do not (state==False) have # the indexed gzip module. if state: - values = (True, True, MockIndexedGzipFile) + values = (True, MockIndexedGzipFile) else: - values = (False, False, GzipFile) + values = (False, GzipFile) with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), \ - mock.patch('nibabel.arrayproxy.HAVE_INDEXED_GZIP', values[1]), \ - mock.patch('nibabel.openers.SafeIndexedGzipFile', values[2], + mock.patch('nibabel.openers.IndexedGzipFile', values[1], create=True): yield @@ -132,14 +140,18 @@ def test_Opener_gzip_type(): # Each test is specified by a tuple containing: # (indexed_gzip present, Opener kwargs, expected file type) tests = [ - (False, {'mode' : 'rb', 'keep_open' : True}, GzipFile), - (False, {'mode' : 'rb', 'keep_open' : False}, GzipFile), - (False, {'mode' : 'wb', 'keep_open' : True}, GzipFile), - (False, {'mode' : 'wb', 'keep_open' : False}, GzipFile), - (True, {'mode' : 'rb', 'keep_open' : True}, MockIndexedGzipFile), - (True, {'mode' : 'rb', 'keep_open' : False}, GzipFile), - (True, {'mode' : 'wb', 'keep_open' : True}, GzipFile), - (True, {'mode' : 'wb', 'keep_open' : False}, GzipFile), + (False, {'mode' : 'rb', 'keep_open' : True}, GzipFile), + (False, {'mode' : 'rb', 'keep_open' : False}, GzipFile), + (False, {'mode' : 'rb', 'keep_open' : 'auto'}, GzipFile), + (False, {'mode' : 'wb', 'keep_open' : True}, GzipFile), + (False, {'mode' : 'wb', 'keep_open' : False}, GzipFile), + (False, {'mode' : 'wb', 'keep_open' : 'auto'}, GzipFile), + (True, {'mode' : 'rb', 'keep_open' : True}, MockIndexedGzipFile), + (True, {'mode' : 'rb', 'keep_open' : False}, MockIndexedGzipFile), + (True, {'mode' : 'rb', 'keep_open' : 'auto'}, MockIndexedGzipFile), + (True, {'mode' : 'wb', 'keep_open' : True}, GzipFile), + (True, {'mode' : 'wb', 'keep_open' : False}, GzipFile), + (True, {'mode' : 'wb', 'keep_open' : 'auto'}, GzipFile), ] for test in tests: @@ -256,11 +268,10 @@ class StrictOpener(Opener): assert_true(isinstance(fobj.fobj, file_class)) elif lext == 'gz': try: - from indexed_gzip import SafeIndexedGzipFile + from ..openers import IndexedGzipFile except ImportError: - SafeIndexedGzipFile = GzipFile - assert_true(isinstance(fobj.fobj, (GzipFile, - SafeIndexedGzipFile))) + IndexedGzipFile = GzipFile + assert_true(isinstance(fobj.fobj, (GzipFile, IndexedGzipFile))) else: assert_true(isinstance(fobj.fobj, BZ2File)) diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 285674083b..7280c5552d 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -108,6 +108,14 @@ def validate_shape(self, pmaker, params): # Read only assert_raises(AttributeError, setattr, prox, 'shape', params['shape']) + def validate_ndim(self, pmaker, params): + # Check shape + prox, fio, hdr = pmaker() + assert_equal(prox.ndim, len(params['shape'])) + # Read only + assert_raises(AttributeError, setattr, prox, + 'ndim', len(params['shape'])) + def validate_is_proxy(self, pmaker, params): # Check shape prox, fio, hdr = pmaker() diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 941e2271b0..2c17c33fd1 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -8,6 +8,7 @@ import sys import os +import shutil from os.path import (dirname, join as pjoin, abspath, splitext, basename, exists) import csv @@ -15,6 +16,7 @@ import numpy as np +import nibabel as nib from ..tmpdirs import InTemporaryDirectory from ..loadsave import load from ..orientations import flip_axis, aff2axcodes, inv_ornt_aff @@ -22,7 +24,7 @@ from nose.tools import assert_true, assert_false, assert_equal from nose import SkipTest -from numpy.testing import assert_almost_equal +from numpy.testing import assert_almost_equal, assert_array_equal from .scriptrunner import ScriptRunner from .nibabel_data import needs_nibabel_data @@ -65,6 +67,40 @@ def check_nib_ls_example4d(opts=[], hdrs_str="", other_str=""): assert_equal(fname, stdout[:len(fname)]) assert_re_in(expected_re, stdout[len(fname):]) + +def check_nib_diff_examples(): + fnames = [pjoin(DATA_PATH, f) + for f in ('standard.nii.gz', 'example4d.nii.gz')] + code, stdout, stderr = run_command(['nib-diff'] + fnames, check_code=False) + checked_fields = ["Field/File", "regular", "dim_info", "dim", "datatype", "bitpix", "pixdim", "slice_end", + "xyzt_units", "cal_max", "descrip", "qform_code", "sform_code", "quatern_b", + "quatern_c", "quatern_d", "qoffset_x", "qoffset_y", "qoffset_z", "srow_x", + "srow_y", "srow_z", "DATA(md5)", "DATA(diff 1:)"] + for item in checked_fields: + assert_true(item in stdout) + + fnames2 = [pjoin(DATA_PATH, f) + for f in ('example4d.nii.gz', 'example4d.nii.gz')] + code, stdout, stderr = run_command(['nib-diff'] + fnames2, check_code=False) + assert_equal(stdout, "These files are identical.") + + fnames3 = [pjoin(DATA_PATH, f) + for f in ('standard.nii.gz', 'example4d.nii.gz', 'example_nifti2.nii.gz')] + code, stdout, stderr = run_command(['nib-diff'] + fnames3, check_code=False) + for item in checked_fields: + assert_true(item in stdout) + + fnames4 = [pjoin(DATA_PATH, f) + for f in ('standard.nii.gz', 'standard.nii.gz', 'standard.nii.gz')] + code, stdout, stderr = run_command(['nib-diff'] + fnames4, check_code=False) + assert_equal(stdout, "These files are identical.") + + code, stdout, stderr = run_command(['nib-diff', '--dt', 'float64'] + fnames, check_code=False) + for item in checked_fields: + assert_true(item in stdout) + + + @script_test def test_nib_ls(): yield check_nib_ls_example4d @@ -128,6 +164,30 @@ def test_nib_ls_multiple(): ) +@script_test +def test_help(): + for cmd in ['parrec2nii', 'nib-dicomfs', 'nib-ls', 'nib-nifti-dx']: + if cmd == 'nib-dicomfs': + # needs special treatment since depends on fuse module which + # might not be available. + try: + import fuse + except Exception: + continue # do not test this one + code, stdout, stderr = run_command([cmd, '--help']) + assert_equal(code, 0) + assert_re_in(".*%s" % cmd, stdout) + assert_re_in(".*Usage", stdout) + # Some third party modules might like to announce some Deprecation + # etc warnings, see e.g. https://travis-ci.org/nipy/nibabel/jobs/370353602 + if 'warning' not in stderr.lower(): + assert_equal(stderr, '') + + +@script_test +def test_nib_diff(): + yield check_nib_diff_examples + @script_test def test_nib_nifti_dx(): @@ -357,3 +417,92 @@ def test_parrec2nii_with_data(): assert_equal(sorted(csv_keys), ['diffusion b value number', 'gradient orientation number']) assert_equal(nlines, 8) # 8 volumes present in DTI.PAR + + +@script_test +def test_nib_trk2tck(): + simple_trk = pjoin(DATA_PATH, "simple.trk") + standard_trk = pjoin(DATA_PATH, "standard.trk") + + with InTemporaryDirectory() as tmpdir: + # Copy input files to convert. + shutil.copy(simple_trk, tmpdir) + shutil.copy(standard_trk, tmpdir) + simple_trk = pjoin(tmpdir, "simple.trk") + standard_trk = pjoin(tmpdir, "standard.trk") + simple_tck = pjoin(tmpdir, "simple.tck") + standard_tck = pjoin(tmpdir, "standard.tck") + + # Convert one file. + cmd = ["nib-trk2tck", simple_trk] + code, stdout, stderr = run_command(cmd) + assert_equal(len(stdout), 0) + assert_true(os.path.isfile(simple_tck)) + trk = nib.streamlines.load(simple_trk) + tck = nib.streamlines.load(simple_tck) + assert_array_equal(tck.streamlines.data, trk.streamlines.data) + assert_true(isinstance(tck, nib.streamlines.TckFile)) + + # Skip non TRK files. + cmd = ["nib-trk2tck", simple_tck] + code, stdout, stderr = run_command(cmd) + assert_true("Skipping non TRK file" in stdout) + + # By default, refuse to overwrite existing output files. + cmd = ["nib-trk2tck", simple_trk] + code, stdout, stderr = run_command(cmd) + assert_true("Skipping existing file" in stdout) + + # Convert multiple files and with --force. + cmd = ["nib-trk2tck", "--force", simple_trk, standard_trk] + code, stdout, stderr = run_command(cmd) + assert_equal(len(stdout), 0) + trk = nib.streamlines.load(standard_trk) + tck = nib.streamlines.load(standard_tck) + assert_array_equal(tck.streamlines.data, trk.streamlines.data) + + +@script_test +def test_nib_tck2trk(): + anat = pjoin(DATA_PATH, "standard.nii.gz") + standard_tck = pjoin(DATA_PATH, "standard.tck") + + with InTemporaryDirectory() as tmpdir: + # Copy input file to convert. + shutil.copy(standard_tck, tmpdir) + standard_trk = pjoin(tmpdir, "standard.trk") + standard_tck = pjoin(tmpdir, "standard.tck") + + # Anatomical image not found as first argument. + cmd = ["nib-tck2trk", standard_tck, anat] + code, stdout, stderr = run_command(cmd, check_code=False) + assert_equal(code, 2) # Parser error. + assert_true("Expecting anatomical image as first agument" in stderr) + + # Convert one file. + cmd = ["nib-tck2trk", anat, standard_tck] + code, stdout, stderr = run_command(cmd) + assert_equal(len(stdout), 0) + assert_true(os.path.isfile(standard_trk)) + tck = nib.streamlines.load(standard_tck) + trk = nib.streamlines.load(standard_trk) + assert_array_equal(trk.streamlines.data, tck.streamlines.data) + assert_true(isinstance(trk, nib.streamlines.TrkFile)) + + # Skip non TCK files. + cmd = ["nib-tck2trk", anat, standard_trk] + code, stdout, stderr = run_command(cmd) + assert_true("Skipping non TCK file" in stdout) + + # By default, refuse to overwrite existing output files. + cmd = ["nib-tck2trk", anat, standard_tck] + code, stdout, stderr = run_command(cmd) + assert_true("Skipping existing file" in stdout) + + # Convert multiple files and with --force. + cmd = ["nib-tck2trk", "--force", anat, standard_tck, standard_tck] + code, stdout, stderr = run_command(cmd) + assert_equal(len(stdout), 0) + tck = nib.streamlines.load(standard_tck) + trk = nib.streamlines.load(standard_trk) + assert_array_equal(tck.streamlines.data, trk.streamlines.data) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index bd8b834b84..b0f571023d 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -17,6 +17,7 @@ from io import BytesIO from ..spatialimages import (SpatialHeader, SpatialImage, HeaderDataError, Header, ImageDataError) +from ..imageclasses import spatial_axes_first from unittest import TestCase from nose.tools import (assert_true, assert_false, assert_equal, @@ -385,9 +386,10 @@ def test_get_data(self): img[0, 0, 0] # Make sure the right message gets raised: assert_equal(str(exception_manager.exception), - ("Cannot slice image objects; consider slicing image " - "array data with `img.dataobj[slice]` or " - "`img.get_data()[slice]`")) + "Cannot slice image objects; consider using " + "`img.slicer[slice]` to generate a sliced image (see " + "documentation for caveats) or slicing image array data " + "with `img.dataobj[slice]` or `img.get_data()[slice]`") assert_true(in_data is img.dataobj) out_data = img.get_data() assert_true(in_data is out_data) @@ -411,6 +413,132 @@ def test_get_data(self): assert_false(rt_img.get_data() is out_data) assert_array_equal(rt_img.get_data(), in_data) + def test_slicer(self): + img_klass = self.image_class + in_data_template = np.arange(240, dtype=np.int16) + base_affine = np.eye(4) + t_axis = None + for dshape in ((4, 5, 6, 2), # Time series + (8, 5, 6)): # Volume + in_data = in_data_template.copy().reshape(dshape) + img = img_klass(in_data, base_affine.copy()) + + if not spatial_axes_first(img): + with assert_raises(ValueError): + img.slicer + continue + + assert_true(hasattr(img.slicer, '__getitem__')) + + # Note spatial zooms are always first 3, even when + spatial_zooms = img.header.get_zooms()[:3] + + # Down-sample with [::2, ::2, ::2] along spatial dimensions + sliceobj = [slice(None, None, 2)] * 3 + \ + [slice(None)] * (len(dshape) - 3) + downsampled_img = img.slicer[tuple(sliceobj)] + assert_array_equal(downsampled_img.header.get_zooms()[:3], + np.array(spatial_zooms) * 2) + + max4d = (hasattr(img.header, '_structarr') and + 'dims' in img.header._structarr.dtype.fields and + img.header._structarr['dims'].shape == (4,)) + # Check newaxis and single-slice errors + with assert_raises(IndexError): + img.slicer[None] + with assert_raises(IndexError): + img.slicer[0] + # Axes 1 and 2 are always spatial + with assert_raises(IndexError): + img.slicer[:, None] + with assert_raises(IndexError): + img.slicer[:, 0] + with assert_raises(IndexError): + img.slicer[:, :, None] + with assert_raises(IndexError): + img.slicer[:, :, 0] + if len(img.shape) == 4: + if max4d: + with assert_raises(ValueError): + img.slicer[:, :, :, None] + else: + # Reorder non-spatial axes + assert_equal(img.slicer[:, :, :, None].shape, + img.shape[:3] + (1,) + img.shape[3:]) + # 4D to 3D using ellipsis or slices + assert_equal(img.slicer[..., 0].shape, img.shape[:-1]) + assert_equal(img.slicer[:, :, :, 0].shape, img.shape[:-1]) + else: + # 3D Analyze/NIfTI/MGH to 4D + assert_equal(img.slicer[:, :, :, None].shape, img.shape + (1,)) + if len(img.shape) == 3: + # Slices exceed dimensions + with assert_raises(IndexError): + img.slicer[:, :, :, :, None] + elif max4d: + with assert_raises(ValueError): + img.slicer[:, :, :, :, None] + else: + assert_equal(img.slicer[:, :, :, :, None].shape, + img.shape + (1,)) + + # Crop by one voxel in each dimension + sliced_i = img.slicer[1:] + sliced_j = img.slicer[:, 1:] + sliced_k = img.slicer[:, :, 1:] + sliced_ijk = img.slicer[1:, 1:, 1:] + + # No scaling change + assert_array_equal(sliced_i.affine[:3, :3], img.affine[:3, :3]) + assert_array_equal(sliced_j.affine[:3, :3], img.affine[:3, :3]) + assert_array_equal(sliced_k.affine[:3, :3], img.affine[:3, :3]) + assert_array_equal(sliced_ijk.affine[:3, :3], img.affine[:3, :3]) + # Translation + assert_array_equal(sliced_i.affine[:, 3], [1, 0, 0, 1]) + assert_array_equal(sliced_j.affine[:, 3], [0, 1, 0, 1]) + assert_array_equal(sliced_k.affine[:, 3], [0, 0, 1, 1]) + assert_array_equal(sliced_ijk.affine[:, 3], [1, 1, 1, 1]) + + # No change to affines with upper-bound slices + assert_array_equal(img.slicer[:1, :1, :1].affine, img.affine) + + # Yell about step = 0 + with assert_raises(ValueError): + img.slicer[:, ::0] + with assert_raises(ValueError): + img.slicer.slice_affine((slice(None), slice(None, None, 0))) + + # Don't permit zero-length slices + with assert_raises(IndexError): + img.slicer[:0] + + # No fancy indexing + with assert_raises(IndexError): + img.slicer[[0]] + with assert_raises(IndexError): + img.slicer[[-1]] + with assert_raises(IndexError): + img.slicer[[0], [-1]] + + # Check data is consistent with slicing numpy arrays + slice_elems = (None, Ellipsis, 0, 1, -1, [0], [1], [-1], + slice(None), slice(1), slice(-1), slice(1, -1)) + for n_elems in range(6): + for _ in range(1 if n_elems == 0 else 10): + sliceobj = tuple( + np.random.choice(slice_elems, n_elems).tolist()) + try: + sliced_img = img.slicer[sliceobj] + except (IndexError, ValueError): + # Only checking valid slices + pass + else: + sliced_data = in_data[sliceobj] + assert_array_equal(sliced_data, sliced_img.get_data()) + assert_array_equal(sliced_data, sliced_img.dataobj) + assert_array_equal(sliced_data, img.dataobj[sliceobj]) + assert_array_equal(sliced_data, img.get_data()[sliceobj]) + def test_api_deprecations(self): class FakeImage(self.image_class): diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index f528555d05..40d5ebc41e 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -64,7 +64,7 @@ def test_assert_allclose_safely(): def assert_warn_len_equal(mod, n_in_context): mod_warns = mod.__warningregistry__ - # Python 3.4 appears to clear any pre-existing warnings of the same type, + # Python 3 appears to clear any pre-existing warnings of the same type, # when raising warnings inside a catch_warnings block. So, there is a # warning generated by the tests within the context manager, but no # previous warnings. @@ -84,18 +84,15 @@ def test_clear_and_catch_warnings(): assert_equal(my_mod.__warningregistry__, {}) # Without specified modules, don't clear warnings during context with clear_and_catch_warnings(): - warnings.simplefilter('ignore') warnings.warn('Some warning') assert_warn_len_equal(my_mod, 1) # Confirm that specifying module keeps old warning, does not add new with clear_and_catch_warnings(modules=[my_mod]): - warnings.simplefilter('ignore') warnings.warn('Another warning') assert_warn_len_equal(my_mod, 1) # Another warning, no module spec does add to warnings dict, except on - # Python 3.4 (see comments in `assert_warn_len_equal`) + # Python 3 (see comments in `assert_warn_len_equal`) with clear_and_catch_warnings(): - warnings.simplefilter('ignore') warnings.warn('Another warning') assert_warn_len_equal(my_mod, 2) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index e442b508d8..b7a510e337 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -13,6 +13,7 @@ import warnings import gzip import bz2 +from collections import OrderedDict from os.path import exists, splitext from operator import mul from functools import reduce @@ -22,6 +23,7 @@ from .casting import (shared_range, type_info, OK_FLOATS) from .openers import Opener from .deprecated import deprecate_with_version +from .externals.oset import OrderedSet sys_is_le = sys.byteorder == 'little' native_code = sys_is_le and '<' or '>' @@ -78,7 +80,7 @@ class Recoder(object): 2 ''' - def __init__(self, codes, fields=('code',), map_maker=dict): + def __init__(self, codes, fields=('code',), map_maker=OrderedDict): ''' Create recoder object ``codes`` give a sequence of code, alias sequences @@ -97,7 +99,7 @@ def __init__(self, codes, fields=('code',), map_maker=dict): Parameters ---------- - codes : seqence of sequences + codes : sequence of sequences Each sequence defines values (codes) that are equivalent fields : {('code',) string sequence}, optional names by which elements in sequences can be accessed @@ -133,13 +135,15 @@ def add_codes(self, code_syn_seqs): Examples -------- - >>> code_syn_seqs = ((1, 'one'), (2, 'two')) + >>> code_syn_seqs = ((2, 'two'), (1, 'one')) >>> rc = Recoder(code_syn_seqs) >>> rc.value_set() == set((1,2)) True >>> rc.add_codes(((3, 'three'), (1, 'first'))) >>> rc.value_set() == set((1,2,3)) True + >>> print(rc.value_set()) # set is actually ordered + OrderedSet([2, 1, 3]) ''' for code_syns in code_syn_seqs: # Add all the aliases @@ -186,7 +190,7 @@ def keys(self): return self.field1.keys() def value_set(self, name=None): - ''' Return set of possible returned values for column + ''' Return OrderedSet of possible returned values for column By default, the column is the first column. @@ -212,7 +216,7 @@ def value_set(self, name=None): d = self.field1 else: d = self.__dict__[name] - return set(d.values()) + return OrderedSet(d.values()) # Endian code aliases diff --git a/setup.py b/setup.py index 009969a3c5..27f85d3e99 100755 --- a/setup.py +++ b/setup.py @@ -117,6 +117,9 @@ def main(**extra_args): pjoin('bin', 'nib-ls'), pjoin('bin', 'nib-dicomfs'), pjoin('bin', 'nib-nifti-dx'), + pjoin('bin', 'nib-tck2trk'), + pjoin('bin', 'nib-trk2tck'), + pjoin('bin', 'nib-diff'), ], cmdclass = cmdclass, **extra_args From ead0ab214a0921b5286de9e79cd3d7c4294ab14f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 17 Oct 2018 07:48:41 -0700 Subject: [PATCH 24/26] CI: Test Numpy 1.14.2, 1.14.4 (suspected cause), 1.15.2 --- appveyor.yml | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index 6b098b3cb3..b6f6b0822e 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -5,9 +5,29 @@ environment: appveyor_build_worker_cloud: gce matrix: - PYTHON: C:\Python34 + NUMPY: 1.14.2 - PYTHON: C:\Python34-x64 + NUMPY: 1.14.2 - PYTHON: C:\Python35 + NUMPY: 1.14.2 - PYTHON: C:\Python35-x64 + NUMPY: 1.14.2 + - PYTHON: C:\Python34 + NUMPY: 1.14.4 + - PYTHON: C:\Python34-x64 + NUMPY: 1.14.4 + - PYTHON: C:\Python35 + NUMPY: 1.14.4 + - PYTHON: C:\Python35-x64 + NUMPY: 1.14.4 + - PYTHON: C:\Python34 + NUMPY: 1.15.2 + - PYTHON: C:\Python34-x64 + NUMPY: 1.15.2 + - PYTHON: C:\Python35 + NUMPY: 1.15.2 + - PYTHON: C:\Python35-x64 + NUMPY: 1.15.2 install: # Prepend newly installed Python to the PATH of this build (this cannot be @@ -16,7 +36,7 @@ install: - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% # Install the dependencies of the project. - - pip install numpy scipy matplotlib nose h5py mock pydicom + - pip install numpy==%NUMPY% scipy matplotlib nose h5py mock pydicom - pip install . - SET NIBABEL_DATA_DIR=%CD%\nibabel-data From ea344cbaef59a33359e844f60f33eb94af67ed7a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 17 Oct 2018 07:48:55 -0700 Subject: [PATCH 25/26] MNT: Empty commit to trigger AppVeyor From 66b04912573e131a9b204ab1be2455a457e3ead6 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 17 Oct 2018 09:47:39 -0700 Subject: [PATCH 26/26] CI: Test on numpy 1.{12,13,14}.0 --- appveyor.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index b6f6b0822e..697e5ea8dd 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -5,29 +5,29 @@ environment: appveyor_build_worker_cloud: gce matrix: - PYTHON: C:\Python34 - NUMPY: 1.14.2 + NUMPY: 1.14.0 - PYTHON: C:\Python34-x64 - NUMPY: 1.14.2 + NUMPY: 1.14.0 - PYTHON: C:\Python35 - NUMPY: 1.14.2 + NUMPY: 1.14.0 - PYTHON: C:\Python35-x64 - NUMPY: 1.14.2 + NUMPY: 1.14.0 - PYTHON: C:\Python34 - NUMPY: 1.14.4 + NUMPY: 1.13.0 - PYTHON: C:\Python34-x64 - NUMPY: 1.14.4 + NUMPY: 1.13.0 - PYTHON: C:\Python35 - NUMPY: 1.14.4 + NUMPY: 1.13.0 - PYTHON: C:\Python35-x64 - NUMPY: 1.14.4 + NUMPY: 1.13.0 - PYTHON: C:\Python34 - NUMPY: 1.15.2 + NUMPY: 1.12.0 - PYTHON: C:\Python34-x64 - NUMPY: 1.15.2 + NUMPY: 1.12.0 - PYTHON: C:\Python35 - NUMPY: 1.15.2 + NUMPY: 1.12.0 - PYTHON: C:\Python35-x64 - NUMPY: 1.15.2 + NUMPY: 1.12.0 install: # Prepend newly installed Python to the PATH of this build (this cannot be