diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000..0ff888cf54 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,63 @@ +version: 2 + +jobs: + build: + docker: + - image: circleci/python:3.6 + environment: + DISTRIB: "conda" + PYTHON_VERSION: "3.6" + NUMPY_VERSION: "*" + SCIPY_VERSION: "*" + SCIKIT_LEARN_VERSION: "*" + MATPLOTLIB_VERSION: "*" + + steps: + - checkout + # Get rid of existing virtualenvs on circle ci as they conflict with conda. + # Trick found here: + # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 + - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs + # We need to remove conflicting texlive packages. + - run: sudo -E apt-get -yq remove texlive-binaries --purge + # Installing required packages for `make -C doc check command` to work. + - run: sudo -E apt-get -yq update + - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra + - restore_cache: + key: v1-packages+datasets-{{ .Branch }} + - run: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh + - run: chmod +x ~/miniconda.sh && ~/miniconda.sh -b + - run: + name: Setup conda path in env variables + command: | + echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV + - run: + name: Create conda env + command: | + conda create -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ + flake8 lxml nose cython mkl sphinx coverage pillow pandas -yq + conda install -n testenv nibabel nose-timer -c conda-forge -yq + - run: + name: Running CircleCI test (make html) + command: | + source activate testenv + pip install -e . + set -o pipefail && cd doc && make html-strict 2>&1 | tee ~/log.txt + no_output_timeout: 5h + - save_cache: + key: v1-packages+datasets-{{ .Branch }} + paths: + - $HOME/nilearn_data + - $HOME/miniconda3 + + - store_artifacts: + path: doc/_build/html + - store_artifacts: + path: coverage + - store_artifacts: + path: $HOME/log.txt + destination: log.txt + + + + diff --git a/.gitignore b/.gitignore index e8abe21658..4beefc864a 100644 --- a/.gitignore +++ b/.gitignore @@ -31,3 +31,7 @@ tags *.nt.bz2 *.tar.gz *.tgz + +.idea/ + +doc/themes/nilearn/static/jquery.js \ No newline at end of file diff --git a/.mailmap b/.mailmap index fd860f4f0d..e0b2d2bbb3 100644 --- a/.mailmap +++ b/.mailmap @@ -1,39 +1,43 @@ -Alexandre Abraham -Alexandre Abraham -Alexandre Gramfort +Aina Frau Pascual +Alexandre Abadie +Alexandre Abraham +Alexandre Gramfort Alexandre Savio +Arthur Mensch Ben Cipollini Bertrand Thirion -Chris Filo Gorgolewski +Chris Filo Gorgolewski Danilo Bzdok +Demian Wassermann +Dimitri Papadopoulos Orfanos Elvis Dohmatob Fabian Pedregosa -Fabian Pedregosa -Fabian Pedregosa -Gael Varoquaux -GaelVaroquaux Gael Varoquaux -Jan Margeta +Jan Margeta Jaques Grobler Jason Gors +Jona Sassenhagen Jean Kossaifi -Jean Kossaifi +Jean Remi King +Jeff Chiang +Julia Huntenburg +J Necus +Kamalakar Daddy Konstantin Shmelkov Loïc Estève +Martin Perez-Guevara Matthias Ekman +Mehdi Rahim Mehdi Rahim -Mehdi Rahim Michael Eickenberg +Michael Hanke Michael Waskom -Philippe Gervais +Moritz Boos +Moritz Boos +Óscar Nájera +Philippe Gervais Ronald Phlypo -Salma Bougacha +Salma Bougacha Vincent Michel Virgile Fritsch -Yannick Schwartz -schwarty Yannick Schwartz -Óscar Nájera -Kamalakar Daddy -Fabian Pedregosa -Fabian Pedregosa diff --git a/.travis.yml b/.travis.yml index 2057fe42d2..4d36ea38c7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,6 @@ +sudo: required +dist: xenial + language: python virtualenv: @@ -6,54 +9,47 @@ virtualenv: env: global: - TEST_RUN_FOLDER="/tmp" # folder where the tests are run from - matrix: - # Ubuntu 14.04 versions - - DISTRIB="conda" PYTHON_VERSION="2.7" - NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.13.3" - SCIKIT_LEARN_VERSION="0.14.1" MATPLOTLIB_VERSION="1.3.1" - # Ubuntu 14.04 versions without matplotlib - - DISTRIB="conda" PYTHON_VERSION="2.7" - NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.13.3" - SCIKIT_LEARN_VERSION="0.14.1" - - DISTRIB="neurodebian" PYTHON_VERSION="2.7" - # Trying to get as close to the minimum required versions while - # still having the package version available through conda - - DISTRIB="conda" PYTHON_VERSION="2.6" - NUMPY_VERSION="1.6.2" SCIPY_VERSION="0.11.0" - SCIKIT_LEARN_VERSION="0.13" MATPLOTLIB_VERSION="1.1.1" - NIBABEL_VERSION="1.1.0" + +matrix: + # Do not wait for the allowed_failures entry to finish before + # setting the status + fast_finish: true + allow_failures: + # allow_failures seems to be keyed on the python version + - python: 2.7 + include: + # Oldest supported versions (with neurodebian) + - env: DISTRIB="conda" PYTHON_VERSION="2.7" + NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.17" + SCIKIT_LEARN_VERSION="0.18" MATPLOTLIB_VERSION="1.5.1" + PANDAS_VERSION="0.18.0" NIBABEL_VERSION="2.0.2" COVERAGE="true" + # Oldest supported versions without matplotlib + - env: DISTRIB="conda" PYTHON_VERSION="2.7" + NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.17" + SCIKIT_LEARN_VERSION="0.18" + # Fake Ubuntu Xenial (Travis doesn't support Xenial yet) + - env: DISTRIB="conda" PYTHON_VERSION="2.7" + NUMPY_VERSION="1.13" SCIPY_VERSION="0.19" + SCIKIT_LEARN_VERSION="0.18.1" + NIBABEL_VERSION="2.0.2" # Python 3.4 with intermediary versions - - DISTRIB="conda" PYTHON_VERSION="3.4" - NUMPY_VERSION="1.8" SCIPY_VERSION="0.14" - SCIKIT_LEARN_VERSION="0.15" MATPLOTLIB_VERSION="1.4" + - env: DISTRIB="conda" PYTHON_VERSION="3.4" + NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.17" + SCIKIT_LEARN_VERSION="0.18" MATPLOTLIB_VERSION="1.5.1" # Most recent versions - - DISTRIB="conda" PYTHON_VERSION="3.5" - NUMPY_VERSION="*" SCIPY_VERSION="*" - SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" - -install: - - source continuous_integration/install.sh - -before_script: - - make clean - -script: - - python continuous_integration/show-python-packages-versions.py - # Copy setup.cfg to TEST_RUN_FOLDER where we are going to run the tests from - # Mainly for nose config settings - - cp setup.cfg "$TEST_RUN_FOLDER" - # We want to back out of the current working directory to make - # sure we are using nilearn installed in site-packages rather - # than the one from the current working directory - # Parentheses (run in a subshell) are used to leave - # the current directory unchanged - - (cd "$TEST_RUN_FOLDER" && make -f $OLDPWD/Makefile test-code) - - test "$MATPLOTLIB_VERSION" == "" || make test-doc - -after_success: - # Ignore coveralls failures as the coveralls server is not very reliable - # but we don't want travis to report a failure in the github UI just - # because the coverage report failed to be published. - # coveralls need to be run from the git checkout - # so we need to copy the coverage results from TEST_RUN_FOLDER - - if [[ "$COVERAGE" == "true" ]]; then cp "$TEST_RUN_FOLDER/.coverage" .; coveralls || echo "failed"; fi + - env: DISTRIB="conda" PYTHON_VERSION="3.5" + NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" + SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" + LXML_VERSION="*" + # FLAKE8 linting on diff wrt common ancestor with upstream/master + # Note: the python value is only there to trigger allow_failures + - python: 2.7 + env: DISTRIB="conda" PYTHON_VERSION="2.7" FLAKE8_VERSION="*" SKIP_TESTS="true" + +install: source continuous_integration/install.sh + +before_script: make clean + +script: source continuous_integration/test_script.sh + +after_success: source continuous_integration/after_success.sh diff --git a/AUTHORS.rst b/AUTHORS.rst index 9575538012..c67751492c 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -16,13 +16,37 @@ particular: * `Gael Varoquaux `_ * Philippe Gervais * Michael Eickenberg -* `Chris Filo Gorgolewski `_ * Danilo Bzdok * Loïc Estève +* Kamalakar Reddy Daddy +* Elvis Dohmatob +* Alexandre Abadie +* Andres Hoyos Idrobo +* Salma Bougacha +* Mehdi Rahim +* Sylvain Lanuzel +* `Kshitij Chawla `_ + +Many of also contributed outside of Parietal, notably: + +* `Chris Filo Gorgolewski `_ * `Ben Cipollini `_ +* Julia Huntenburg +* Martin Perez-Guevara Thanks to M. Hanke and Y. Halchenko for data and packaging. +Funding +........ + +Alexandre Abraham, Gael Varoquaux, Kamalakar Reddy Daddy, Loïc Estève, +Mehdi Rahim, Philippe Gervais where payed by the `NiConnect +`_ +project, funded by the French `Investissement d'Avenir +`_. + +NiLearn is also supported by `DigiCosme `_ |digicomse logo| + .. _citing: Citing nilearn @@ -49,3 +73,7 @@ guarantee the future of the toolkit, if you use it, please cite it. See the scikit-learn documentation on `how to cite `_. + +.. |digicomse logo| image:: logos/digi-saclay-logo-small.png + :height: 25 + :alt: DigiComse Logo \ No newline at end of file diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000000..5d6c4220a6 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,49 @@ +.. _contributing: + +============ +Contributing +============ + +This project is a community effort, and everyone is welcome to +contribute. + +The project is hosted on https://github.com/nilearn/nilearn + +The best way to contribute and to help the project is to start working on known +issues. +See `Easy issues `_ to get +started. + +Submitting a bug report +======================= + +In case you experience issues using this package, do not hesitate to submit a +ticket to the +`Bug Tracker `_. You are +also welcome to post feature requests or pull requests. + +.. _git_repo: + +Retrieving the latest code +========================== + +We use `Git `_ for version control and +`GitHub `_ for hosting our main repository. If you are +new on GitHub and don't know how to work with it, please first +have a look at `this `_ to get the basics. + + +You can check out the latest sources with the command:: + + git clone git://github.com/nilearn/nilearn.git + +or if you have write privileges:: + + git clone git@github.com:nilearn/nilearn.git + +Coding guidelines +================= + +Nilearn follows the coding conventions used by scikit-learn. `Please read them +`_ +before you start implementing your changes. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..d6af1ad4eb --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,3 @@ +include AUTHORS.rst +include LICENSE +include README.rst diff --git a/Makefile b/Makefile index 97cebc5699..15d6f8d4dd 100644 --- a/Makefile +++ b/Makefile @@ -13,6 +13,7 @@ all: clean test doc-noplot clean-pyc: find . -name "*.pyc" | xargs rm -f + find . -name "__pycache__" | xargs rm -rf clean-so: find . -name "*.so" | xargs rm -f @@ -66,5 +67,3 @@ doc: pdf: make -C doc pdf -install: - cd doc && make install diff --git a/README.rst b/README.rst index 952a089154..7885ed878b 100644 --- a/README.rst +++ b/README.rst @@ -8,8 +8,8 @@ :target: https://ci.appveyor.com/project/nilearn-ci/nilearn :alt: AppVeyor Build Status -.. image:: https://coveralls.io/repos/nilearn/nilearn/badge.svg?branch=master - :target: https://coveralls.io/r/nilearn/nilearn +.. image:: https://codecov.io/gh/nilearn/nilearn/branch/master/graph/badge.svg + :target: https://codecov.io/gh/nilearn/nilearn nilearn ======= @@ -25,7 +25,7 @@ This work is made available by a community of people, amongst which the INRIA Parietal Project Team and the scikit-learn folks, in particular P. Gervais, A. Abraham, V. Michel, A. Gramfort, G. Varoquaux, F. Pedregosa, B. Thirion, M. Eickenberg, C. F. Gorgolewski, -D. Bzdok, L. Estève and B. Cipollini. +D. Bzdok, L. Esteve and B. Cipollini. Important links =============== @@ -38,15 +38,15 @@ Dependencies The required dependencies to use the software are: -* Python >= 2.6, +* Python >= 2.7, * setuptools -* Numpy >= 1.6.1 -* SciPy >= 0.9 -* Scikit-learn >= 0.13 (Some examples require 0.14 to run) -* Nibabel >= 1.1.0 +* Numpy >= 1.11 +* SciPy >= 0.17 +* Scikit-learn >= 0.18 +* Nibabel >= 2.0.2 If you are using nilearn plotting functionalities or running the -examples, matplotlib >= 1.1.1 is required. +examples, matplotlib >= 1.5.1 is required. If you want to run the tests, you need nose >= 1.2.1 and coverage >= 3.6. @@ -66,16 +66,5 @@ http://nilearn.github.io/introduction.html#installation. Development =========== -Code ----- - -GIT -~~~ - -You can check the latest sources with the command:: - - git clone git://github.com/nilearn/nilearn - -or if you have write privileges:: - - git clone git@github.com:nilearn/nilearn +Detailed instructions on how to contribute are available at +http://nilearn.github.io/contributing.html diff --git a/appveyor.yml b/appveyor.yml index ae181aff5f..c62ba4e777 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -9,8 +9,8 @@ environment: PYTHON_VERSION: "2.7.x" PYTHON_ARCH: "64" - - PYTHON: "C:\\Miniconda3-x64" - PYTHON_VERSION: "3.4.x" + - PYTHON: "C:\\Miniconda35-x64" + PYTHON_VERSION: "3.5.x" PYTHON_ARCH: "64" install: @@ -24,6 +24,10 @@ install: - "python -c \"import struct; print(struct.calcsize('P') * 8)\"" # Installed prebuilt dependencies from conda + # a temporary work around with failures related to matplotlib 2.1.0 + # See similar fix which made for travis and circleci + # https://github.com/nilearn/nilearn/pull/1525 + # Should be removed after a new matplotlib release 2.1.1 - "conda install pip numpy scipy scikit-learn nose wheel matplotlib -y -q" # Install other nilearn dependencies diff --git a/circle.yml b/circle.yml deleted file mode 100644 index 2426370514..0000000000 --- a/circle.yml +++ /dev/null @@ -1,36 +0,0 @@ -dependencies: - cache_directories: - - "~/nilearn_data" - - pre: - # We need to remove conflicting texlive packages. - - sudo -E apt-get -yq remove texlive-binaries --purge - # Installing required packages for `make -C doc check command` to work. - - sudo -E apt-get -yq update - - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - - override: - - pip install --upgrade pip - # Installing sphinx 1.2.3 to work-around autosummary issues in 1.3 - # They should be fixed in sphinx 1.4 - - pip install sphinx==1.2.3 matplotlib coverage Pillow - - pip install scipy - - pip install scikit-learn - - pip install nose-timer - - pip install -e . - # we need to do this here so the datasets will be cached - # pipefail is necessary to propagate exit codes - - set -o pipefail && cd doc && make html-strict 2>&1 | tee ~/log.txt - -test: - override: - - make clean test test-coverage - # workaround - make html returns 0 even if examples fail to build - # (see https://github.com/sphinx-gallery/sphinx-gallery/issues/45) - - cat ~/log.txt && if grep -q "Traceback (most recent call last):" ~/log.txt; then false; else true; fi - -general: - artifacts: - - "doc/_build/html" - - "coverage" - - "~/log.txt" diff --git a/continuous_integration/after_success.sh b/continuous_integration/after_success.sh new file mode 100755 index 0000000000..a7475f623e --- /dev/null +++ b/continuous_integration/after_success.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +set -e + +# Ignore codecov failures because we don't want travis to report a failure +# in the github UI just because the coverage report failed to be published. +# codecov needs to be run from the git checkout +# so we need to copy the coverage results from TEST_RUN_FOLDER +if [[ "$SKIP_TESTS" != "true" && "$COVERAGE" == "true" ]]; then + cp "$TEST_RUN_FOLDER/.coverage" . + codecov || echo "Codecov upload failed" +fi diff --git a/continuous_integration/flake8_diff.sh b/continuous_integration/flake8_diff.sh new file mode 100755 index 0000000000..b91e3bd13d --- /dev/null +++ b/continuous_integration/flake8_diff.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +set -e + +PROJECT=nilearn/nilearn +PROJECT_URL=https://github.com/$PROJECT.git + +echo "Remotes:" +git remote --verbose + +# Find the remote with the project name (upstream in most cases) +REMOTE=$(git remote -v | grep $PROJECT | cut -f1 | head -1 || echo '') + +# Add a temporary remote if needed. For example this is necessary when +# Travis is configured to run in a fork. In this case 'origin' is the +# fork and not the reference repo we want to diff against. +if [[ -z "$REMOTE" ]]; then + TMP_REMOTE=tmp_reference_upstream + REMOTE=$TMP_REMOTE + git remote add $REMOTE $PROJECT_URL +fi + +if [[ "$TRAVIS" == "true" ]]; then + if [[ "$TRAVIS_PULL_REQUEST" == "false" ]] + then + # Travis does the git clone with a limited depth (50 at the time of + # writing). This may not be enough to find the common ancestor with + # $REMOTE/master so we unshallow the git checkout + git fetch --unshallow || echo "Unshallowing the git checkout failed" + else + # We want to fetch the code as it is in the PR branch and not + # the result of the merge into master. This way line numbers + # reported by Travis will match with the local code. + BRANCH_NAME=travis_pr_$TRAVIS_PULL_REQUEST + git fetch $REMOTE pull/$TRAVIS_PULL_REQUEST/head:$BRANCH_NAME + git checkout $BRANCH_NAME + fi +fi + + +echo -e '\nLast 2 commits:' +echo '--------------------------------------------------------------------------------' +git log -2 --pretty=short + +git fetch $REMOTE master +REMOTE_MASTER_REF="$REMOTE/master" + +# Find common ancestor between HEAD and remotes/$REMOTE/master +COMMIT=$(git merge-base @ $REMOTE_MASTER_REF) || \ + echo "No common ancestor found for $(git show @ -q) and $(git show $REMOTE_MASTER_REF -q)" + +if [[ -n "$TMP_REMOTE" ]]; then + git remote remove $TMP_REMOTE +fi + +if [ -z "$COMMIT" ]; then + exit 1 +fi + +echo -e "\nCommon ancestor between HEAD and $REMOTE_MASTER_REF is:" +echo '--------------------------------------------------------------------------------' +git show --no-patch $COMMIT + +echo -e '\nRunning flake8 on the diff in the range'\ + "$(git rev-parse --short $COMMIT)..$(git rev-parse --short @)" \ + "($(git rev-list $COMMIT.. | wc -l) commit(s)):" +echo '--------------------------------------------------------------------------------' + +# Conservative approach: diff without context so that code that was +# not changed does not create failures +git diff --unified=0 $COMMIT | flake8 --diff --show-source +echo -e "No problem detected by flake8\n" diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index da8f427a56..512cbdf2f3 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -27,7 +27,7 @@ create_new_venv() { print_conda_requirements() { # Echo a conda requirement string for example - # "pip nose python='.7.3 scikit-learn=*". It has a hardcoded + # "pip nose python='2.7.3 scikit-learn=*". It has a hardcoded # list of possible packages to install and looks at _VERSION # environment variables to know whether to install a given package and # if yes which version to install. For example: @@ -35,7 +35,8 @@ print_conda_requirements() { # - for scikit-learn, SCIKIT_LEARN_VERSION is used TO_INSTALL_ALWAYS="pip nose" REQUIREMENTS="$TO_INSTALL_ALWAYS" - TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn" + TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn pandas \ +flake8 lxml" for PACKAGE in $TO_INSTALL_MAYBE; do # Capitalize package name and add _VERSION PACKAGE_VERSION_VARNAME="${PACKAGE^^}_VERSION" @@ -52,29 +53,35 @@ print_conda_requirements() { } create_new_conda_env() { - # Deactivate the travis-provided virtual environment and setup a - # conda-based environment instead - deactivate + # Skip Travis related code on circle ci. + if [ -z $CIRCLECI ]; then + # Deactivate the travis-provided virtual environment and setup a + # conda-based environment instead + deactivate + fi # Use the miniconda installer for faster download / install of conda # itself - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \ - -O miniconda.sh - chmod +x miniconda.sh && ./miniconda.sh -b - export PATH=/home/travis/miniconda2/bin:$PATH - conda update --yes conda + wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ + -O ~/miniconda.sh + chmod +x ~/miniconda.sh && ~/miniconda.sh -b + export PATH=$HOME/miniconda3/bin:$PATH + echo $PATH + conda update --quiet --yes conda # Configure the conda environment and put it in the path using the # provided versions REQUIREMENTS=$(print_conda_requirements) echo "conda requirements string: $REQUIREMENTS" - conda create -n testenv --yes $REQUIREMENTS + conda create -n testenv --quiet --yes $REQUIREMENTS source activate testenv if [[ "$INSTALL_MKL" == "true" ]]; then # Make sure that MKL is used - conda install --yes mkl - else + conda install --quiet --yes mkl + elif [[ -z $CIRCLECI ]]; then + # Travis doesn't use MKL but circle ci does for speeding up examples + # generation in the html documentation. # Make sure that MKL is not used conda remove --yes --features mkl || echo "MKL not installed" fi @@ -98,12 +105,18 @@ elif [[ "$DISTRIB" == "conda" ]]; then fi else - echo "Unrecognized distribution ($DISTRIB); cannot setup travis environment." + echo "Unrecognized distribution ($DISTRIB); cannot setup CI environment." exit 1 fi +pip install psutil memory_profiler + if [[ "$COVERAGE" == "true" ]]; then - pip install coverage coveralls + pip install codecov fi -python setup.py install +# numpy not installed when skipping the tests so we do not want to run +# setup.py install +if [[ "$SKIP_TESTS" != "true" ]]; then + python setup.py install +fi diff --git a/continuous_integration/test_script.sh b/continuous_integration/test_script.sh new file mode 100755 index 0000000000..1dfa2578d1 --- /dev/null +++ b/continuous_integration/test_script.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +set -e + +if [[ -n "$FLAKE8_VERSION" ]]; then + source continuous_integration/flake8_diff.sh +fi + +if [[ "$SKIP_TESTS" != "true" ]]; then + python continuous_integration/show-python-packages-versions.py + # Copy setup.cfg to TEST_RUN_FOLDER where we are going to run the tests from + # Mainly for nose config settings + cp setup.cfg "$TEST_RUN_FOLDER" + # We want to back out of the current working directory to make + # sure we are using nilearn installed in site-packages rather + # than the one from the current working directory + # Parentheses (run in a subshell) are used to leave + # the current directory unchanged + (cd "$TEST_RUN_FOLDER" && make -f $OLDPWD/Makefile test-code) + test "$MATPLOTLIB_VERSION" == "" || make test-doc +fi diff --git a/doc/Makefile b/doc/Makefile index 1d8127fef6..f5e4288c71 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -38,31 +38,41 @@ clean: -rm -rf generated/* -rm -rf modules/generated/* +sym_links: + # Make sym-links to share the cache across various example + # directories + -cd ../examples/ && mkdir -p nilearn_cache + -cd ../examples/01_plotting/ && ln -sf ../nilearn_cache + -cd ../examples/02_decoding/ && ln -sf ../nilearn_cache + -cd ../examples/03_connectivity/ && ln -sf ../nilearn_cache + -cd ../examples/04_manipulating_images/ && ln -sf ../nilearn_cache + -cd ../examples/05_advanced/ && ln -sf ../nilearn_cache + force_html: force html force: find . -name \*.rst -exec touch {} \; -html: +html: sym_links # These two lines make the build a bit more lengthy, and the # the embedding of images more robust rm -rf $(BUILDDIR)/html/_images $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - touch $(BUILDDIR)/html .nojekyll + touch $(BUILDDIR)/html/.nojekyll @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." -html-strict: +html-strict: sym_links # Build html documentation using a strict mode: Warnings are # considered as errors. make check - touch $(BUILDDIR)/html .nojekyll + touch $(BUILDDIR)/html/.nojekyll @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." html-noplot: $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - touch $(BUILDDIR)/html .nojekyll + touch $(BUILDDIR)/html/.nojekyll @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." @@ -161,5 +171,6 @@ install: cp -r html/* nilearn.github.io && \ cd nilearn.github.io && \ git add * && \ + git add .nojekyll && \ git commit -a -m 'Make install' && \ git push diff --git a/doc/authors.rst b/doc/authors.rst new file mode 100644 index 0000000000..e122f914a8 --- /dev/null +++ b/doc/authors.rst @@ -0,0 +1 @@ +.. include:: ../AUTHORS.rst diff --git a/doc/building_blocks/index.rst b/doc/building_blocks/index.rst index d939521119..73aea4b47d 100644 --- a/doc/building_blocks/index.rst +++ b/doc/building_blocks/index.rst @@ -15,4 +15,5 @@ terms of data processing. .. toctree:: manual_pipeline.rst + neurovault.rst diff --git a/doc/building_blocks/manual_pipeline.rst b/doc/building_blocks/manual_pipeline.rst index 2b3da743e7..610fba1785 100644 --- a/doc/building_blocks/manual_pipeline.rst +++ b/doc/building_blocks/manual_pipeline.rst @@ -40,13 +40,13 @@ example, we can download the data from the `dataset.func` contains filenames referring to dataset files on the disk:: >>> list(sorted(dataset.keys())) # doctest: +SKIP - ['anat', 'description', 'func', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target'] + ['anat', 'description', 'func', 'mask', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target'] >>> dataset.func # doctest: +ELLIPSIS +SKIP - ['.../haxby2001/subj1/bold.nii.gz'] + ['.../haxby2001/subj2/bold.nii.gz'] Access supplementary information on the dataset: - >>> print haxby_dataset['description'] # doctest: +SKIP + >>> print(haxby_dataset['description']) # doctest: +SKIP The complete list of the data-downloading functions can be found in the :ref:`reference documentation for the datasets `. @@ -60,19 +60,24 @@ presenting different category of pictures to the subject (face, cat, ...) and the goal of this experiment is to predict which category is presented to the subjects from the brain activation. -These conditions are presented as string into a CSV file. The numpy function -`recfromcsv` is very useful to load this kind of data. +These conditions are presented as string into a CSV file. The `pandas +`__ function +`read_csv` is very useful to load this kind of data. -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Load the behavioral labels - :end-before: # Keep only data corresponding to faces or cats +.. literalinclude:: ../../examples/plot_decoding_tutorial.py + :start-after: # We use pandas to load them in an array. + :end-before: ########################################################################### + +.. seealso:: + * `pandas `_ is a very useful Python + library to load CSV files and process their data -For example, we will now remove the *rest* condition from our dataset. +For example, we will now consider only the conditions *cat* and *face* from our dataset. This can be done as follows: -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Keep only data corresponding to faces or cats +.. literalinclude:: ../../examples/plot_decoding_tutorial.py + :start-after: # mask of the samples belonging to the condition. :end-before: ########################################################################### @@ -116,8 +121,8 @@ We use masking to convert 4D data (i.e. 3D volume over time) into 2D data Applying a mask ................ -.. figure:: ../auto_examples/images/sphx_glr_plot_haxby_simple_002.png - :target: ../auto_examples/plot_haxby_simple.html +.. figure:: ../auto_examples/images/sphx_glr_plot_decoding_tutorial_002.png + :target: ../auto_examples/plot_decoding_tutorial.html :align: right :scale: 30% @@ -130,8 +135,8 @@ The :class:`NiftiMasker` can be seen as a *tube* that transforms data from 4D images to 2D arrays, but first it needs to 'fit' this data in order to learn simple parameters from it, such as its shape: -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Prepare the data: apply the mask +.. literalinclude:: ../../examples/plot_decoding_tutorial.py + :start-after: # Now we use the NiftiMasker. :end-before: ########################################################################### @@ -158,9 +163,9 @@ scikit-learn, using its `fit`, `predict` or `transform` methods. Here, we use scikit-learn Support Vector Classification to learn how to predict the category of picture seen by the subject: -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # The decoding - :end-before: ########################################################################### +.. literalinclude:: ../../examples/plot_decoding_tutorial.py + :start-after: # We first fit it on the data + :end-before: # Let's measure the error rate: We will not detail it here since there is a very good documentation about it in the @@ -176,8 +181,8 @@ masked but also the results of an algorithm), the masker is clever and can take data of dimension 1D (resp. 2D) to convert it back to 3D (resp. 4D). -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Retrieve the discriminating weights and save them +.. literalinclude:: ../../examples/plot_decoding_tutorial.py + :start-after: # For this, we can call inverse_transform on the NiftiMasker: :end-before: ########################################################################### Here we want to see the discriminating weights of some voxels. @@ -189,11 +194,11 @@ Again the visualization code is simple. We can use an fMRI slice as a background and plot the weights. Brighter points have a higher discriminating weight. -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Visualize the discriminating weights over the mean EPI +.. literalinclude:: ../../examples/plot_decoding_tutorial.py + :start-after: # We can plot the weights, using the subject's anatomical as a background :end-before: ########################################################################### -.. figure:: ../auto_examples/images/sphx_glr_plot_haxby_simple_001.png - :target: ../auto_examples/plot_haxby_simple.html +.. figure:: ../auto_examples/images/sphx_glr_plot_decoding_tutorial_002.png + :target: ../auto_examples/plot_decoding_tutorial.html :align: center :scale: 50% diff --git a/doc/building_blocks/neurovault.rst b/doc/building_blocks/neurovault.rst new file mode 100644 index 0000000000..719b89430b --- /dev/null +++ b/doc/building_blocks/neurovault.rst @@ -0,0 +1,220 @@ +.. _neurovault: + +=========================================================== +Downloading statistical maps from the Neurovault repository +=========================================================== + +Neurovault is a public repository of unthresholded statistical maps, +parcellations, and atlases of the human brain. You can read about it +and browse the images it contains at http://www.neurovault.org. You +can download maps from Neurovault with Nilearn. + +Neurovault was introduced in [1]_. + +Neurovault contains collections of images. We can get information +about each collection - such as who uploaded it, a link to a paper, a +description - and about each image - the modality, number of subjects, +some tags, and more. The nilearn downloaders will fetch this metadata +and the images themselves. + +Nilearn provides two functions to download statistical maps from +Neurovault. + +Specific images or collections +------------------------------ + +In the simplest case, you already know the "id" of the collections or +images you want. Maybe you liked a paper and went to +http://www.neurovault.org looking for the data. Once on the relevant +collection's webpage, you can click 'Details' to see its id +(and more). You can then download it using +:func:`nilearn.datasets.fetch_neurovault_ids` : + + >>> from nilearn.datasets import fetch_neurovault_ids + >>> brainpedia = fetch_neurovault_ids(collection_ids=[1952]) # doctest: +SKIP + +Or if you want some images in particular, rather than whole +collections : + + >>> brainpedia_subset = fetch_neurovault_ids(image_ids=[32015, 32016]) # doctest: +SKIP + +Selection filters +----------------- + +You may not know which collections or images you want. For example, +you may be conducting a meta-analysis and want to grab all the images +that are related to "language". Using +:func:`nilearn.datasets.fetch_neurovault`, you can fetch all the images and +collections that match your criteria - you don't need to know their +ids. + +The filters are applied to images' and collections' metadata. + +You can describe filters with dictionaries. Each collection's +metadata is compared to the parameter ``collection_terms``. Collections +for which ``collection_metadata['key'] == value`` is not ``True`` for +every key, value pair in ``collection_terms`` will be discarded. We use +``image_terms`` in the same way to filter images. + +For example, many images on Neurovault have a "modality" field in their +metadata. BOLD images should have it set to "fMRI-BOLD". We can ask for BOLD +images only : + + >>> bold = fetch_neurovault(image_terms={'modality': 'fMRI-BOLD'}, # doctest: +SKIP + ... max_images=7) # doctest: +SKIP + +Here we set the max_images parameter to 7, so that you can try this snippet +without waiting for a long time. To get all the images which match your +filters, you should set max_images to ``None``, which means "get as many +images as possible". The default for max_images is 100. + +The default values for the ``collection_terms`` and ``image_terms`` parameters +filter out empty collections, and exclude an image if one of the following is +true: + + - it is not in MNI space. + - its metadata field "is_valid" is cleared. + - it is thresholded. + - its map type is one of "ROI/mask", "anatomical", or "parcellation". + - its image type is "atlas" + +Extra keyword arguments are treated as additional image filters, so if we want +to keep the default filters, and add the requirement that the modality should +be "fMRI-BOLD", we can write: + + >>> bold = fetch_neurovault(modality='fMRI-BOLD', max_images=7) # doctest: +SKIP + + +Sometimes the selection criteria are more complex than a simple +comparison to a single value. For example, we may also be interested +in CBF and CBV images. In ``nilearn``, the ``dataset.neurovault`` module +provides ``IsIn`` which makes this easy : + + >>> from nilearn.datasets import neurovault + >>> fmri = fetch_neurovault( # doctest: +SKIP + ... modality=neurovault.IsIn('fMRI-BOLD', 'fMRI-CBF', 'fMRI-CBV'), # doctest: +SKIP + ... max_images=100) # doctest: +SKIP + +We could also have used ``Contains`` : + + >>> fmri = fetch_neurovault( # doctest: +SKIP + ... modality=neurovault.Contains('fMRI'), # doctest: +SKIP + ... max_images=7) # doctest: +SKIP + +If we need regular expressions, we can also use ``Pattern`` : + + >>> fmri = fetch_neurovault( # doctest: +SKIP + ... modality=neurovault.Pattern('fmri(-.*)?', neurovault.re.IGNORECASE), # doctest: +SKIP + ... max_images=7) # doctest: +SKIP + +The complete list of such special values available in +``nilearn.datasets.neurovault`` is: +``IsNull``, ``NotNull``, ``NotEqual``, ``GreaterOrEqual``, +``GreaterThan``, ``LessOrEqual``, ``LessThan``, ``IsIn``, ``NotIn``, +``Contains``, ``NotContains``, ``Pattern``. + +You can also use ``ResultFilter`` to easily express boolean logic +(AND, OR, XOR, NOT). + + +**If you need more complex filters**, and using dictionaries as shown above is +not convenient, you can express filters as functions. The parameter +``collection_filter`` should be a callable, which will be called once for each +collection. The sole argument will be a dictionary containing the metadata for +the collection. The filter should return ``True`` if the collection is to be +kept, and ``False`` if it is to be discarded. ``image_filter`` does the same +job for images. The default values for these parameters don't filter out +anything. +Using a filter rather than a dictionary, the first example becomes: + + >>> bold = fetch_neurovault( # doctest: +SKIP + ... image_filter=lambda meta: meta.get('modality') == 'fMRI-BOLD', # doctest: +SKIP + ... image_terms={}, max_images=7) # doctest: +SKIP + +.. note:: + + Even if you specify a filter as a function, the default filters for + ``image_terms`` and ``collection_terms`` still apply; pass an empty + dictionary if you want to disable them. Without ``image_terms={}`` in the + call above, parcellations, images not in MNI space, etc. would be still be + filtered out. + + +The example above can be rewritten using dictionaries, but in some cases you +will need to use ``image_filter`` or ``collection_filter``. For example, +suppose that for some weird reason you only want images that don't have too +many metadata fields - say, an image should only be kept if its metadata has +less than 50 fields. This cannot be done by simply comparing each key in a +metadata dictionary to a required value, so we need to write our own filter: + + >>> small_meta_images = fetch_neurovault(image_filter=lambda meta: len(meta) < 50, # doctest: +SKIP + ... max_images=7) # doctest: +SKIP + + +Output +------ + +Both functions return a dict-like object which exposes its items as +attributes. + +It contains: + + - ``images``, the paths to downloaded files. + - ``images_meta``, the metadata for the images in a list of + dictionaries. + - ``collections_meta``, the metadata for the collections. + - ``description``, a short description of the Neurovault dataset. + +Note to ``pandas`` users: passing ``images_meta`` or ``collections_meta`` +to the ``DataFrame`` constructor yields the expected result, with +images (or collections) as rows and metadata fields as columns. + +Neurosynth annotations +---------------------- + +It is also possible to ask Neurosynth to annotate the maps found on +Neurovault. Neurosynth is a platform for large-scale, automated +synthesis of fMRI data. It can be used to perform decoding. You can +learn more about Neurosynth at http://www.neurosynth.org. + +Neurosynth was introduced in [2]_. + +If you set the parameter ``fetch_neurosynth_words`` when calling +``fetch_neurovault`` or ``fetch_neurovault_ids``, we will also +download the annotations for the resulting images. They will be stored +as json files on your disk. The result will also contain (unless you +clear the ``vectorize_words`` parameter to save computation time): + + - ``vocabulary``, a list of words + - ``word_frequencies``, the weight of the words returned by + neurosynth.org for each image, such that the weight of word + ``vocabulary[j]`` for the image found in ``images[i]`` is + ``word_frequencies[i, j]`` + +Examples using Neurovault +------------------------- + + - :ref:`sphx_glr_auto_examples_05_advanced_plot_ica_neurovault.py` + Download images from Neurovault and extract some networks + using ICA. + + - :ref:`sphx_glr_auto_examples_05_advanced_plot_neurovault_meta_analysis.py` + Meta-analysis of "Stop minus go" studies available on + Neurovault. + +References +---------- + +.. [1] Gorgolewski KJ, Varoquaux G, Rivera G, Schwartz Y, Ghosh SS, + Maumet C, Sochat VV, Nichols TE, Poldrack RA, Poline J-B, + Yarkoni T and Margulies DS (2015) NeuroVault.org: a web-based + repository for collecting and sharing unthresholded + statistical maps of the human brain. Front. Neuroinform. 9:8. + doi: 10.3389/fninf.2015.00008 + +.. [2] Yarkoni, Tal, Russell A. Poldrack, Thomas E. Nichols, David + C. Van Essen, and Tor D. Wager. "Large-scale automated synthesis + of human functional neuroimaging data." Nature methods 8, no. 8 + (2011): 665-670. + + diff --git a/doc/conf.py b/doc/conf.py index 87278cc30b..f95d6f2abf 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -15,6 +15,18 @@ import sys import os import shutil +import sphinx +from distutils.version import LooseVersion + +# jquery is included in plotting package data because it is needed for +# interactive plots. It is also needed by the documentation, so we copy +# it to the themes/nilearn/static folder. +shutil.copy( + os.path.join(os.path.dirname(os.path.dirname(__file__)), + 'nilearn', 'plotting', 'data', 'js', 'jquery.min.js'), + os.path.join(os.path.dirname(__file__), 'themes', 'nilearn', 'static', + 'jquery.js')) + # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory @@ -26,20 +38,17 @@ # We also add the directory just above to enable local imports of nilearn sys.path.insert(0, os.path.abspath('..')) -try: - shutil.copy('../AUTHORS.rst', '.') -except IOError: - # When nose scans this file, it is not in the right working - # directory, and thus the line above fails - pass - # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', - 'sphinx.ext.pngmath', 'sphinx.ext.intersphinx', - 'numpy_ext.numpydoc', +extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + ('sphinx.ext.imgmath' # only available for sphinx >= 1.4 + if sphinx.version_info[:2] >= (1, 4) + else 'sphinx.ext.pngmath'), + 'sphinx.ext.intersphinx', + 'numpydoc.numpydoc', 'sphinx_gallery.gen_gallery', ] @@ -91,7 +100,10 @@ # List of documents that shouldn't be included in the build. #unused_docs = [] -exclude_patterns = ['tune_toc.rst', ] +exclude_patterns = ['tune_toc.rst', + 'includes/big_toc_css.rst', + 'includes/bigger_toc_css.rst', + ] # List of directories, relative to source directory, that shouldn't be # searched for source files. @@ -228,11 +240,6 @@ #latex_use_parts = False # Additional stuff for the LaTeX preamble. -latex_preamble = r""" -\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats} -\let\oldfootnote\footnote -\def\footnote#1{\oldfootnote{\small #1}} -""" # Documents to append as an appendix to all manuals. #latex_appendices = [] @@ -243,8 +250,24 @@ 'printindex': '', } +if LooseVersion(sphinx.__version__) < LooseVersion('1.5'): + latex_preamble = r""" + \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats} + \let\oldfootnote\footnote + \def\footnote#1{\oldfootnote{\small #1}} + """ +else: + latex_elements['preamble'] = r""" + \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats} + \let\oldfootnote\footnote + \def\footnote#1{\oldfootnote{\small #1}} + """ + + # If false, no module index is generated. -latex_use_modindex = False +if LooseVersion(sphinx.__version__) < LooseVersion('1.5'): + latex_use_modindex = False + latex_domain_indices = False # Show the page numbers in the references @@ -255,7 +278,7 @@ trim_doctests_flags = True -_python_doc_base = 'http://docs.python.org/2.7' +_python_doc_base = 'http://docs.python.org/3.6' # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { @@ -263,12 +286,12 @@ 'http://docs.scipy.org/doc/numpy': None, 'http://docs.scipy.org/doc/scipy/reference': None, 'http://matplotlib.org/': None, - 'http://scikit-learn.org/stable': None, + 'http://scikit-learn.org/0.18': None, 'http://nipy.org/nibabel': None, + 'http://pandas.pydata.org': None, #'http://scikit-image.org/docs/0.8.0/': None, #'http://docs.enthought.com/mayavi/mayavi/': None, #'http://statsmodels.sourceforge.net/': None, - #'http://pandas.pydata.org': None, } extlinks = { @@ -278,15 +301,23 @@ sphinx_gallery_conf = { 'doc_module' : 'nilearn', + 'backreferences_dir': os.path.join('modules', 'generated'), 'reference_url' : { 'nilearn': None, 'matplotlib': 'http://matplotlib.org', - 'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0', - 'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference', + 'numpy': 'http://docs.scipy.org/doc/numpy-1.11.0', + 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference', 'nibabel': 'http://nipy.org/nibabel', - 'sklearn': 'http://scikit-learn.org/stable'} + 'sklearn': 'http://scikit-learn.org/0.18/', + 'pandas': 'http://pandas.pydata.org'} } +# Get rid of spurious warnings due to some interaction between +# autosummary and numpydoc. See +# https://github.com/phn/pytpm/issues/3#issuecomment-12133978 for more +# details +numpydoc_show_class_members = False + def touch_example_backreferences(app, what, name, obj, options, lines): # generate empty examples files, so that we don't get @@ -299,6 +330,8 @@ def touch_example_backreferences(app, what, name, obj, options, lines): # Add the 'copybutton' javascript, to hide/show the prompt in code # examples + + def setup(app): app.add_javascript('copybutton.js') app.connect('autodoc-process-docstring', touch_example_backreferences) diff --git a/doc/connectivity/connectome_extraction.rst b/doc/connectivity/connectome_extraction.rst index 1c79848eee..1c523ab4b2 100644 --- a/doc/connectivity/connectome_extraction.rst +++ b/doc/connectivity/connectome_extraction.rst @@ -33,7 +33,7 @@ covariance (or correlation) matrix for signals from different brain regions. The same information can be represented as a weighted graph, vertices being brain regions, weights on edges being covariances (gaussian graphical model). However, coefficients in a covariance matrix -reflects direct as well as indirect connections. Covariance matrices form +reflect direct as well as indirect connections. Covariance matrices form very dense brain connectomes, and it is rather difficult to extract from them only the direct connections between two regions. @@ -68,19 +68,19 @@ of the estimator:: >>> estimator.precision_ # doctest: +SKIP -.. |covariance| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_001.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |covariance| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_001.png + :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html :scale: 40 -.. |precision| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_003.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |precision| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_003.png + :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html :scale: 40 -.. |covariance_graph| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |covariance_graph| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png + :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html :scale: 55 -.. |precision_graph| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |precision_graph| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png + :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html :scale: 55 .. centered:: |covariance| |precision| @@ -99,7 +99,7 @@ of the estimator:: .. topic:: **Full example** See the following example for a full file running the analysis: - :ref:`sphx_glr_auto_examples_connectivity_plot_inverse_covariance_connectome.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_inverse_covariance_connectome.py` .. topic:: **Exercise: computing sparse inverse covariance** :class: green @@ -144,7 +144,7 @@ One specific case where this may be interesting is for group analysis across multiple subjects. Indeed, one challenge when doing statistics on the coefficients of a connectivity matrix is that the number of coefficients to compare grows quickly with the number of regions, and as -a result correcting for multiple comparisions takes a heavy toll on +a result correcting for multiple comparisons takes a heavy toll on statistical power. In such a situation, you can use the :class:`GroupSparseCovariance` and @@ -157,7 +157,7 @@ group analysis only on the non zero coefficients. .. topic:: **Full example** See the following example for a full file running the analysis: - :ref:`sphx_glr_auto_examples_connectivity_plot_multi_subject_connectome.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py` .. topic:: **Exercise: computing the correlation matrix of rest fmri** @@ -196,8 +196,8 @@ Finally, we use the The results are the following: -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_simulated_connectome_001.png - :target: ../auto_examples/connectivity/plot_simulated_connectome.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_simulated_connectome_001.png + :target: ../auto_examples/03_connectivity/plot_simulated_connectome.html :scale: 60 The group-sparse estimation outputs matrices with @@ -211,7 +211,7 @@ information. .. topic:: **Full Example** The complete source code for this example can be found here: - :ref:`sphx_glr_auto_examples_connectivity_plot_simulated_connectome.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_simulated_connectome.py` .. [#] A lot of technical details on the algorithm used for group-sparse @@ -250,7 +250,7 @@ Deviations from this mean in the tangent space are provided in the connectivitie .. topic:: **Full example** See the following example for a full file running the analysis: - :ref:`sphx_glr_auto_examples_connectivity_plot_connectivity_measures.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_group_level_connectivity.py` .. topic:: **Exercise: computing connectivity in tangent space** :class: green diff --git a/doc/connectivity/functional_connectomes.rst b/doc/connectivity/functional_connectomes.rst index d31714f8ae..e816d1a081 100644 --- a/doc/connectivity/functional_connectomes.rst +++ b/doc/connectivity/functional_connectomes.rst @@ -1,8 +1,8 @@ .. _functional_connectomes: -=============================================================== +======================================================== Extracting times series to build a functional connectome -=============================================================== +======================================================== .. topic:: **Page summary** @@ -17,55 +17,56 @@ Extracting times series to build a functional connectome .. topic:: **References** - * `Varoquaux and Craddock, Learning and comparing functional - connectomes across subjects, NeuroImage 2013 - `_ + * `Varoquaux and Craddock, "Learning and comparing functional + connectomes across subjects", NeuroImage 2013 + `_. .. _parcellation_time_series: Time-series from a brain parcellation or "MaxProb" atlas -=========================================================== +======================================================== Brain parcellations --------------------- +------------------- .. currentmodule:: nilearn.datasets Regions used to extract the signal can be defined by a "hard" parcellation. For instance, the :mod:`nilearn.datasets` has functions to -download atlases forming reference parcellation, eg +download atlases forming reference parcellation, e.g., :func:`fetch_atlas_craddock_2012`, :func:`fetch_atlas_harvard_oxford`, :func:`fetch_atlas_yeo_2011`. -For instance to retrieve the Harvard-Oxford cortical parcelation, sampled -at 2mm, and with a threshold of a probability of .25:: +For instance to retrieve the Harvard-Oxford cortical parcellation, sampled +at 2mm, and with a threshold of a probability of 0.25:: from nilearn import datasets dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm') - atlas_filename, labels = dataset.maps, dataset.labels + atlas_filename = dataset.maps + labels = dataset.labels Plotting can then be done as:: from nilearn import plotting plotting.plot_roi(atlas_filename) -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_atlas_001.png - :target: ../auto_examples/manipulating_visualizing/plot_atlas.html +.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_atlas_001.png + :target: ../auto_examples/01_plotting/plot_atlas.html :scale: 60 .. seealso:: - * The :ref:`plotting documentation ` + * The :ref:`plotting documentation `; - * The :ref:`dataset downloaders ` + * The :ref:`dataset downloaders `. Extracting signals on a parcellation ----------------------------------------- +------------------------------------ .. currentmodule:: nilearn.input_data To extract signal on the parcellation, the easiest option is to use the -:class:`nilearn.input_data.NiftiLabelsMasker`. As any ''maskers'' in +:class:`nilearn.input_data.NiftiLabelsMasker`. As any "maskers" in nilearn, it is a processing object that is created by specifying all the important parameters, but not the data:: @@ -86,17 +87,17 @@ obtain time series that capture well the functional interactions between regions, regressing out noise sources is indeed very important `[Varoquaux & Craddock 2013] `_. -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_signal_extraction_001.png - :target: ../auto_examples/connectivity/plot_signal_extraction.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_signal_extraction_001.png + :target: ../auto_examples/03_connectivity/plot_signal_extraction.html :scale: 40 -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_signal_extraction_002.png - :target: ../auto_examples/connectivity/plot_signal_extraction.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_signal_extraction_002.png + :target: ../auto_examples/03_connectivity/plot_signal_extraction.html :scale: 40 .. topic:: **Full example** See the following example for a full file running the analysis: - :ref:`sphx_glr_auto_examples_connectivity_plot_signal_extraction.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_signal_extraction.py`. .. topic:: **Exercise: computing the correlation matrix of rest fmri** @@ -104,46 +105,48 @@ regions, regressing out noise sources is indeed very important Try using the information above to compute the correlation matrix of the first subject of the ADHD dataset downloaded with - :func:`nilearn.datasets.fetch_adhd` + :func:`nilearn.datasets.fetch_adhd`. **Hints:** * Inspect the '.keys()' of the object returned by - :func:`nilearn.datasets.fetch_adhd` + :func:`nilearn.datasets.fetch_adhd`. - * :func:`numpy.corrcoef` can be used to compute a correlation matrix - (check the shape of your matrices) + * :class:`nilearn.connectome.ConnectivityMeasure` can be used to compute + a correlation matrix (check the shape of your matrices). - * :func:`matplotlib.pyplot.imshow` can show a correlation matrix + * :func:`matplotlib.pyplot.imshow` can show a correlation matrix. - * The example above has the solution + * The example above has the solution. | Time-series from a probabilistic atlas -======================================== +====================================== Probabilistic atlases ----------------------- +--------------------- The definition of regions as by a continuous probability map captures better our imperfect knowledge of boundaries in brain images (notably because of inter-subject registration errors). One example of such an atlas well suited to resting-state data analysis is the `MSDL atlas -`_ (:func:`nilearn.datasets.fetch_atlas_msdl`). +`_ +(:func:`nilearn.datasets.fetch_atlas_msdl`). Probabilistic atlases are represented as a set of continuous maps, in a 4D nifti image. Visualization the atlas thus requires to visualize each of these maps, which requires accessing them with -:func:`nilearn.image.index_img` (see the :ref:`corresponding example `). +:func:`nilearn.image.index_img` (see the :ref:`corresponding example +`). -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_overlay_001.png - :target: ../auto_examples/manipulating_visualizing/plot_overlay.html +.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_overlay_001.png + :target: ../auto_examples/01_plotting/plot_overlay.html :scale: 60 Extracting signals from a probabilistic atlas ----------------------------------------------- +--------------------------------------------- .. currentmodule:: nilearn.input_data @@ -164,33 +167,33 @@ The procedure is the same as with `brain parcellations `_ but using the :class:`NiftiMapsMasker`, and the same considerations on using confounds regressors apply. -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_001.png - :target: ../auto_examples/connectivity/plot_probabilistic_atlas_extraction.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_001.png + :target: ../auto_examples/03_connectivity/plot_probabilistic_atlas_extraction.html :scale: 30 .. topic:: **Full example** A full example of extracting signals on a probabilistic: - :ref:`sphx_glr_auto_examples_connectivity_plot_probabilistic_atlas_extraction.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py`. -.. topic:: **Exercise: correlation matrix of rest fmri on probabilistic atlas** +.. topic:: **Exercise: correlation matrix of rest fMRI on probabilistic atlas** :class: green Try to compute the correlation matrix of the first subject of the ADHD dataset downloaded with :func:`nilearn.datasets.fetch_adhd` with the MSDL atlas downloaded via - :func:`nilearn.datasets.fetch_atlas_msdl` + :func:`nilearn.datasets.fetch_atlas_msdl`. - **Hint:** The example above has the solution + **Hint:** The example above has the solution. A functional connectome: a graph of interactions -==================================================== +================================================ A square matrix, such as a correlation matrix, can also be seen as a -`"graph" `_: a set +`"graph" `_: a set of "nodes", connected by "edges". When these nodes are brain regions, and the edges capture interactions between them, this graph is a "functional connectome". @@ -200,7 +203,10 @@ function that take the matrix, and coordinates of the nodes in MNI space. In the case of the MSDL atlas (:func:`nilearn.datasets.fetch_atlas_msdl`), the CSV file readily comes with MNI coordinates for each region (see for instance example: -:ref:`sphx_glr_auto_examples_connectivity_plot_probabilistic_atlas_extraction.py`). +:ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py`). + +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_002.png + :target: ../auto_examples/03_connectivity/plot_probabilistic_atlas_extraction.html .. For doctesting @@ -208,31 +214,35 @@ with MNI coordinates for each region (see for instance example: >>> from nilearn import datasets >>> atlas_filename = datasets.fetch_atlas_msdl().maps # doctest: +SKIP -For another atlas this information can be computed for each region with -the :func:`nilearn.plotting.find_xyz_cut_coords` function -(see example: -:ref:`sphx_glr_auto_examples_connectivity_plot_multi_subject_connectome.py`):: +As you can see, the correlation matrix gives a very "full" graph: every +node is connected to every other one. This is because it also captures +indirect connections. In the next section we will see how to focus on +direct connections only. - >>> from nilearn import image, plotting - >>> atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in image.iter_img(atlas_filename)] # doctest: +SKIP +A functional connectome: extracting coordinates of regions +========================================================== +For atlases without readily available label coordinates, center coordinates +can be computed for each region on hard parcellation or probabilistic atlases. + * For hard parcellation atlases (eg. :func:`nilearn.datasets.fetch_atlas_destrieux_2009`), + use the :func:`nilearn.plotting.find_parcellation_cut_coords` + function. See example: + :ref:`sphx_glr_auto_examples_03_connectivity_plot_atlas_comparison.py` + * For probabilistic atlases (eg. :func:`nilearn.datasets.fetch_atlas_msdl`), use the + :func:`nilearn.plotting.find_probabilistic_atlas_cut_coords` function. + See example: :ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py`:: -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_002.png - :target: ../auto_examples/connectivity/plot_probabilistic_atlas_extraction.html + >>> from nilearn import plotting + >>> atlas_region_coords = plotting.find_probabilistic_atlas_cut_coords(atlas_filename) # doctest: +SKIP -As you can see, the correlation matrix gives a very "full" graph: every -node is connected to every other one. This is because is also captures -indirect connections. In the next section we will see how to focus on -only direct connections. | .. topic:: **References** - * `Zalesky NeuroImage 2012 "On the use of correlation as a measure of - network connectivity" `_ + * `Zalesky et al., NeuroImage 2012, "On the use of correlation as a measure of + network connectivity" `_. - * `Varoquaux NeuroImage 2013, Learning and comparing functional - connectomes across subjects, - `_ + * `Varoquaux et al., NeuroImage 2013, "Learning and comparing functional + connectomes across subjects" `_. diff --git a/doc/connectivity/index.rst b/doc/connectivity/index.rst index 71f2a99cf9..ce88855ea0 100644 --- a/doc/connectivity/index.rst +++ b/doc/connectivity/index.rst @@ -22,6 +22,6 @@ and networks, via resting-state networks or parcellations. functional_connectomes.rst connectome_extraction.rst resting_state_networks.rst - parcellating.rst region_extraction.rst + parcellating.rst diff --git a/doc/connectivity/parcellating.rst b/doc/connectivity/parcellating.rst index cdcd732481..406b4dc2ee 100644 --- a/doc/connectivity/parcellating.rst +++ b/doc/connectivity/parcellating.rst @@ -1,51 +1,71 @@ .. _parcellating_brain: -================================== -Parcellating the brain in regions -================================== +============================================== +Clustering to parcellate the brain in regions +============================================== -.. topic:: **Page summary** +This page discusses how clustering can be used to parcellate the brain +into homogeneous regions from functional imaging data. - This page demonstrates how clustering can be used to parcellate the - brain into homogeneous regions from resting-state time series. +| +.. topic:: **Reference** -A resting-state dataset -======================== + A big-picture reference on the use of clustering for brain + parcellations. + + Thirion, et al. `"Which fMRI clustering gives good brain + parcellations?." + `_ + Frontiers in neuroscience 8.167 (2014): 13. + +Data loading: Resting-state data +================================= .. currentmodule:: nilearn.datasets -Here, we use a `resting-state `_ -dataset from test-retest study performed at NYU. Details on the data -can be found in the documentation for the downloading function -:func:`fetch_nyu_rest`. +Clustering is commonly applied to resting-state data, but any brain +functional data will give rise of a functional parcellation, capturing +intrinsic brain architecture in the case of resting-state data. +In the examples, we use rest data downloaded with the function +:func:`fetch_adhd` (see :ref:`loading_data`). + +Applying clustering +==================== -Preprocessing: loading and masking -================================== +.. topic:: **Which clustering to use** -We fetch the data from Internet and load it with a dedicated function -(see :ref:`loading_data`): + The question of which clustering method to use is in itself subject + to debate. There are many clustering methods; their computational + cost will vary, as well as their results. A `well-cited empirical + comparison paper, Thirion et al. 2014 + `_ + suggests that: -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py - :start-after: ### Load nyu_rest dataset ##################################################### - :end-before: ### Ward ###################################################################### + * For a large number of clusters, it is preferable to use Ward + agglomerative clustering with spatial constraints -No mask is given with the data so we let the masker compute one. -The result is a niimg from which we extract a numpy array that is -used to mask our original images. + * For a small number of clusters, it is preferable to use Kmeans + clustering after spatially-smoothing the data. -Applying Ward clustering -========================== + Both clustering algorithms (as well as many others) are provided by + this object :class:`nilearn.regions.Parcellations` and full + code example in + :ref:`here`. + Ward clustering is the easiest to use, as it can be done with the Feature + agglomeration object. It is also quite fast. We detail it below. + +| **Compute a connectivity matrix** Before applying Ward's method, we compute a spatial neighborhood matrix, aka connectivity matrix. This is useful to constrain clusters to form contiguous parcels (see `the scikit-learn documentation -`_) +`_) + +This is done from the mask computed by the masker: a niimg from which we +extract a numpy array and then the connectivity matrix. -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py - :start-after: # Compute connectivity matrix: which voxel is connected to which - :end-before: # Computing the ward for the first time, this is long... **Ward clustering principle** Ward's algorithm is a hierarchical clustering algorithm: it @@ -62,81 +82,63 @@ the *memory* parameter is used to cache the computed component tree. You can give it either a *joblib.Memory* instance or the name of a directory used for caching. -Running the Ward algorithm ---------------------------- -Here we simply launch Ward's algorithm to find 1000 clusters and we time it. +.. note:: -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py - :start-after: # Computing the ward for the first time, this is long... - :end-before: # Compute the ward with more clusters, should be faster + The Ward clustering computing 1000 parcels runs typically in about 10 + seconds. Admitedly, this is very fast. -This runs in about 10 seconds (depending on your computer configuration). Now, -we are not satisfied of the result and we want to cluster the picture in 2000 -elements. +.. note:: -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py - :start-after: # Compute the ward with more clusters, should be faster - :end-before: ### Show result ############################################################### + The steps detailed above such as computing connectivity matrix for + Ward, caching and clustering are all implemented within the + :class:`nilearn.regions.Parcellations` object. -Now that the component tree has been computed, computation is must faster -thanks to caching. You should have the result in less than 1 second. +.. seealso:: -Post-Processing and visualizing the parcels -============================================ + * A function :func:`nilearn.regions.connected_label_regions` which can be useful to + break down connected components into regions. For instance, clusters defined using + KMeans whereas it is not necessary for Ward clustering due to its + spatial connectivity. -Unmasking ---------- -After applying the ward, we must unmask the data. This can be done simply : +Using and visualizing the resulting parcellation +================================================== -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py - :start-after: # Unmask data - :end-before: # Display the labels +.. currentmodule:: nilearn.input_data -You can see that masked data is filled with -1 values. This is done for the -sake of visualization. In fact, clusters are labeled from 0 to -(n_clusters - 1). By putting every background value to -1, we assure that -they will not mess with the visualization. +Visualizing the parcellation +----------------------------- -Label visualization --------------------- +The labels of the parcellation are found in the `labels_img_` attribute of +the :class:`nilearn.regions.Parcellations` object after fitting it to the data +using *ward.fit*. We directly use the result for visualization. To visualize the clusters, we assign random colors to each cluster for the labels visualization. -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py - :start-after: ### Show result ############################################################### - :end-before: # Display the original data - - -.. figure:: ../auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_001.png - :target: ../auto_examples/connectivity/plot_rest_clustering.html +.. figure:: ../auto_examples/03_connectivity/images/sphx_glr_plot_rest_parcellations_001.png + :target: ../auto_examples/03_connectivity/plot_rest_parcellations.html :align: center :scale: 80 -Compressed picture ------------------- +Compressed representation +-------------------------- -By transforming a picture in a new one in which the value of each voxel -is the mean value of the cluster it belongs to, we are creating a -compressed version of the original picture. We can obtain this -representation thanks to a two-step procedure : +The clustering can be used to transform the data into a smaller +representation, taking the average on each parcel: - call *ward.transform* to obtain the mean value of each cluster (for each scan) - call *ward.inverse_transform* on the previous result to turn it back into the masked picture shape -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py - :start-after: # Display the original data - -.. |left_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_002.png - :target: ../auto_examples/connectivity/plot_rest_clustering.html +.. |left_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_rest_parcellations_002.png + :target: ../auto_examples/03_connectivity/plot_rest_parcellations.html :width: 49% -.. |right_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_003.png - :target: ../auto_examples/connectivity/plot_rest_clustering.html +.. |right_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_rest_parcellations_003.png + :target: ../auto_examples/03_connectivity/plot_rest_parcellations.html :width: 49% |left_img| |right_img| @@ -144,3 +146,12 @@ representation thanks to a two-step procedure : We can see that using only 2000 parcels, the original image is well approximated. +| + +.. topic:: **Example code** + + All the steps discussed in this section can be seen implemented in + :ref:`a full code example + `. + + diff --git a/doc/connectivity/region_extraction.rst b/doc/connectivity/region_extraction.rst index 3b5ccf9881..1b51e04ee2 100644 --- a/doc/connectivity/region_extraction.rst +++ b/doc/connectivity/region_extraction.rst @@ -6,10 +6,10 @@ Region Extraction for better brain parcellations .. topic:: **Page summary** - This section shows how to use Region Extractor to extract each connected - brain regions/components into a separate brain activation regions and also + This section shows how to use Region Extractor to extract brain connected + regions/components into a separate brain activation region and also shows how to learn functional connectivity interactions between each - separate regions. + separate region. .. contents:: **Contents** :local: @@ -34,50 +34,50 @@ which is already preprocessed and publicly available at datasets. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py - :start-after: # utilities +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py + :start-after: # We use nilearn's datasets downloading utilities :end-before: ################################################################################ .. currentmodule:: nilearn.decomposition -Data decomposition using Canonical ICA -====================================== +Brain maps using Dictionary Learning +==================================== -Here, we use :class:`CanICA`, a multi subject model to decompose previously -fetched multi subjects datasets. We do this by setting the parameters in the -object and calling fit on the functional filenames without necessarily -converting each filename to Nifti1Image object. +Here, we use object :class:`DictLearning`, a multi subject model to decompose multi +subjects fMRI datasets into functionally defined maps. We do this by setting +the parameters and calling the object fit on the filenames of datasets without +necessarily converting each file to Nifti1Image object. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py - :start-after: # decomposition module - :end-before: # Visualization +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py + :start-after: # object and fit the model to the functional datasets + :end-before: # Visualization of resting state networks .. currentmodule:: nilearn.plotting -Visualization of Canonical ICA maps -=================================== +Visualization of Dictionary Learning maps +========================================= -Showing ICA maps stored in components_img using nilearn plotting utilities. +Showing maps stored in components_img using nilearn plotting utilities. Here, we use :func:`plot_prob_atlas` for easy visualization of 4D atlas maps -onto the anatomical standard template. Each ICA map is displayed in different +onto the anatomical standard template. Each map is displayed in different color and colors are random and automatically picked. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py - :start-after: # Show ICA maps by using plotting utilities +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py + :start-after: # Show networks using plotting utilities :end-before: ################################################################################ -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_001.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_001.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 60 .. currentmodule:: nilearn.regions -Region Extraction with CanICA maps -================================== +Region Extraction with Dictionary Learning maps +=============================================== We use object :class:`RegionExtractor` for extracting brain connected regions -from ICA maps into separated brain activation regions with automatic +from dictionary maps into separated brain activation regions with automatic thresholding strategy selected as thresholding_strategy='ratio_n_voxels'. We use thresholding strategy to first get foreground information present in the maps and then followed by robust region extraction on foreground information using @@ -93,9 +93,9 @@ regions. We control the small spurious regions size by thresholding in voxel uni to adapt well to the resolution of the image. Please see the documentation of nilearn.regions.connected_regions for more details. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py - :start-after: # regions, both can be done by importing Region Extractor from regions module - :end-before: # Visualization +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py + :start-after: # maps, less the threshold means that more intense non-voxels will be survived. + :end-before: # Visualization of region extraction results .. currentmodule:: nilearn.plotting @@ -107,12 +107,12 @@ for visualizing extracted regions on a standard template. Each extracted brain region is assigned a color and as you can see that visual cortex area is extracted quite nicely into each hemisphere. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py - :start-after: # Show region extraction results +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py + :start-after: # Visualization of region extraction results :end-before: ################################################################################ -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_002.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_002.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 60 .. currentmodule:: nilearn.connectome @@ -133,9 +133,9 @@ shape=(176, 23) where 176 is the length of time series and 23 is the number of extracted regions. Likewise, we have a total of 20 subject specific time series signals. The third step, we compute the mean correlation across all subjects. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py :start-after: # To estimate correlation matrices we import connectome utilities from nilearn - :end-before: # Visualization + :end-before: ################################################################# .. currentmodule:: nilearn.plotting @@ -148,16 +148,16 @@ automatically the coordinates required, for plotting connectome relations. Left image is the correlations in a matrix form and right image is the connectivity relations to brain regions plotted using :func:`plot_connectome` -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py - :start-after: # Import image utilities in utilising to operate on 4th dimension +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py + :start-after: # Plot resulting connectomes :end-before: ################################################################################ -.. |matrix| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_003.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. |matrix| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_003.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 60 -.. |connectome| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_004.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. |connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_004.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 60 .. centered:: |matrix| |connectome| @@ -165,22 +165,22 @@ connectivity relations to brain regions plotted using :func:`plot_connectome` Validating results ================== -Showing only Default Mode Network (DMN) regions before and after region -extraction by manually identifying the index of DMN in ICA decomposed maps. +Showing only one specific network regions before and after region extraction. -Left image displays the DMN regions without region extraction and right image -displays the DMN regions after region extraction. Here, we can validate that -the DMN regions are nicely separated displaying each extracted region in different color. +Left image displays the regions of one specific resting network without region extraction +and right image displays the regions split apart after region extraction. Here, we can +validate that regions are nicely separated identified by each extracted region in different +color. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py - :start-after: # First we plot DMN without region extraction, interested in only index=[3] +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py + :start-after: # First, we plot a network of index=4 without region extraction -.. |dmn| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_005.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. |dmn| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_005.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 50 -.. |dmn_reg| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_006.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. |dmn_reg| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_006.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 50 .. centered:: |dmn| |dmn_reg| @@ -188,4 +188,4 @@ the DMN regions are nicely separated displaying each extracted region in differe .. seealso:: The full code can be found as an example: - :ref:`sphx_glr_auto_examples_connectivity_plot_extract_regions_canica_maps.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_extract_regions_dictlearning_maps.py` diff --git a/doc/connectivity/resting_state_networks.rst b/doc/connectivity/resting_state_networks.rst index ab40765733..b4c6ffb104 100644 --- a/doc/connectivity/resting_state_networks.rst +++ b/doc/connectivity/resting_state_networks.rst @@ -34,9 +34,9 @@ functions to fetch data from Internet and get the filenames (:ref:`more on data loading `): -.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py +.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # First we load the ADHD200 data - :end-before: # Here we apply CanICA on the data + :end-before: #################################################################### Applying CanICA --------------- @@ -47,12 +47,19 @@ perform a multi-subject ICA decomposition following the CanICA model. As with every object in nilearn, we give its parameters at construction, and then fit it on the data. -.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py +.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # Here we apply CanICA on the data - :end-before: # To visualize we plot the outline of all components on one figure + :end-before: #################################################################### -The components estimated are found as the `components_` attribute of the -object. +The components estimated are found as the `components_img_` attribute +of the object. A 4D Nifti image. + +.. note:: + The `components_img_` attribute is implemented from version 0.4.1 which + is easy for visualization without any additional step to unmask to image. + For users who have older versions, components image can be done by + unmasking attribute `components_`. See :ref:`section Inverse transform: + unmasking data `. Visualizing the results ----------------------- @@ -61,23 +68,23 @@ We can visualize the components as in the previous examples. The first plot shows a map generated from all the components. Then we plot an axial cut for each component separately. -.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py +.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # To visualize we plot the outline of all components on one figure - :end-before: # Finally, we plot the map for each ICA component separately + :end-before: #################################################################### -.. figure:: ../auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_001.png +.. figure:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_001.png :align: center - :target: ../auto_examples/connectivity/plot_canica_resting_state.html + :target: ../auto_examples/03_connectivity/plot_canica_resting_state.html Finally, we can plot the map for different ICA components separately: -.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py +.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # Finally, we plot the map for each ICA component separately -.. |left_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_003.png +.. |left_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_003.png :width: 23% -.. |right_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_004.png +.. |right_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_004.png :width: 23% .. centered:: |left_img| |right_img| @@ -85,7 +92,7 @@ Finally, we can plot the map for different ICA components separately: .. seealso:: The full code can be found as an example: - :ref:`sphx_glr_auto_examples_connectivity_plot_canica_resting_state.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_canica_resting_state.py` .. note:: @@ -106,47 +113,46 @@ good extracted maps. .. topic:: **References** - * Gael Varoquaux et al. `Multi-subject dictionary learning to segment an atlas of brain spontaneous activity `_, - IPMI 2011, pp. 562-573, Lecture - Notes in Computer Science + * Arthur Mensch et al. `Compressed online dictionary learning for fast resting-state fMRI decomposition `_, + ISBI 2016, Lecture Notes in Computer Science Applying DictLearning --------------------- -:class:'DictLearning' is a ready-to-use class with the same interface as CanICA. +:class:`DictLearning` is a ready-to-use class with the same interface as CanICA. Sparsity of output map is controlled by a parameter alpha: using a larger alpha yields sparser maps. -.. literalinclude:: ../../examples/connectivity/plot_compare_resting_state_decomposition.py +.. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py :start-after: # Dictionary learning - :end-before: # CanICA + :end-before: ############################################################################### We can fit both estimators to compare them -.. literalinclude:: ../../examples/connectivity/plot_compare_resting_state_decomposition.py +.. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py :start-after: # Fit both estimators - :end-before: # Visualize the results + :end-before: ############################################################################### Visualizing the results ----------------------- 4D plotting offers an efficient way to compare both resulting outputs -.. literalinclude:: ../../examples/connectivity/plot_compare_resting_state_decomposition.py +.. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py :start-after: # Visualize the results -.. |left_img_decomp| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_001.png - :target: ../auto_examples/plot_compare_resting_state_decomposition.html +.. |left_img_decomp| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_001.png + :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html :width: 50% -.. |right_img_decomp| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_003.png - :target: ../auto_examples/plot_compare_resting_state_decomposition.html +.. |right_img_decomp| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_003.png + :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html :width: 50% -.. |left_img_decomp_single| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_002.png - :target: ../auto_examples/plot_compare_resting_state_decomposition.html +.. |left_img_decomp_single| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_002.png + :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html :width: 50% -.. |right_img_decomp_single| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_004.png - :target: ../auto_examples/plot_compare_resting_state_decomposition.html +.. |right_img_decomp_single| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_004.png + :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html :width: 50% @@ -163,4 +169,4 @@ classification tasks. .. seealso:: The full code can be found as an example: - :ref:`sphx_glr_auto_examples_connectivity_plot_compare_resting_state_decomposition.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_resting_state_decomposition.py` diff --git a/doc/contributing.rst b/doc/contributing.rst new file mode 100644 index 0000000000..e582053ea0 --- /dev/null +++ b/doc/contributing.rst @@ -0,0 +1 @@ +.. include:: ../CONTRIBUTING.rst diff --git a/doc/decoding/decoding_intro.rst b/doc/decoding/decoding_intro.rst new file mode 100644 index 0000000000..4b92f5ff76 --- /dev/null +++ b/doc/decoding/decoding_intro.rst @@ -0,0 +1,528 @@ +.. for doctests to run, we need to define variables that are define in + the literal includes + >>> import numpy as np + >>> from sklearn import datasets + >>> iris = datasets.load_iris() + >>> fmri_masked = iris.data + >>> target = iris.target + >>> session = np.ones_like(target) + >>> n_samples = len(target) + +.. Remove doctest: +SKIP at LDA while dropping support for sklearn older than + versions 0.17 + +.. _decoding_intro: + +============================= +An introduction to decoding +============================= + +This section gives an introduction to the main concept of decoding: +predicting from brain images. + +The discussion and examples are articulated on the analysis of the Haxby +2001 dataset, showing how to predict from fMRI images the stimuli that +the subject is viewing. However the process is the same in other settings +predicting from other brain imaging modalities, for instance predicting +phenotype or diagnostic status from VBM (Voxel Based Morphometry) maps +(as illustrated in :ref:`a more complex example +`), or from FA maps +to capture diffusion mapping. + + +.. contents:: **Contents** + :local: + :depth: 1 + + +Loading and preparing the data +=============================== + +The Haxby 2001 experiment +------------------------- + +In the Haxby experiment, +subjects were presented visual stimuli from different categories. We are +going to predict which category the subject is seeing from the fMRI +activity recorded in masks of the ventral stream. Significant prediction +shows that the signal in the region contains information on the +corresponding category. + +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_stimuli_004.png + :target: ../auto_examples/02_decoding/plot_haxby_stimuli.html + :scale: 30 + :align: left + + Face stimuli + +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_stimuli_002.png + :target: ../auto_examples/02_decoding/plot_haxby_stimuli.html + :scale: 30 + :align: left + + Cat stimuli + +.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_haxby_masks_001.png + :target: ../auto_examples/01_plotting/plot_haxby_masks.html + :scale: 30 + :align: left + + Masks + +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_full_analysis_001.png + :target: ../auto_examples/02_decoding/plot_haxby_full_analysis.html + :scale: 35 + :align: left + + Decoding scores per mask + +_____ + +.. topic:: **fMRI: using beta maps of a first-level analysis** + + The Haxby experiment is unusual because the experimental paradigm is + made of many blocks of continuous stimulation. Most cognitive + experiments have a more complex temporal structure with rich sequences + of events. + + The standard approach to decoding consists in fitting a first-level + GLM to retrieve one response map (a beta map) per trial. This is + sometimes known as "beta-series regressions" (see Mumford et al, + *Deconvolving bold activation in event-related designs for multivoxel + pattern classification analyses*, NeuroImage 2012). These maps can + then be input to the decoder as below, predicting the conditions + associated to trial. + + For simplicity, we will work on the raw time-series of the data. + However, **it is strongly recomended that you fit a first level to + include an HRF model and isolate the responses from various + confounds**. + + +Loading the data into nilearn +----------------------------- + +.. topic:: **Full code example** + + The documentation here just gives the big idea. A full code example, + with explanation, can be found on + :ref:`sphx_glr_auto_examples_plot_decoding_tutorial.py` + +* **Starting an environment**: Launch IPython via "ipython --matplotlib" + in a terminal, or use the Jupyter notebook. + +* **Retrieving the data**: In the tutorial, we load the data using nilearn + data downloading function, :func:`nilearn.datasets.fetch_haxby`. + However, all this function does is to download the data and return + paths to the files downloaded on the disk. To input your own data to + nilearn, you can pass in the path to your own files + (:ref:`more on data input `). + +* **Loading the behavioral labels**: Behavioral information is often stored + in a text file such as a CSV, and must be load with + **numpy.recfromcsv** or `pandas `_ + +* **Extracting the fMRI data**: we then use the + :class:`nilearn.input_data.NiftiMasker`: we extract only the voxels on + the mask of the ventral temporal cortex that comes with the data, + applying the `mask_vt` mask to + the 4D fMRI data. The resulting data is then a matrix with a shape that is + (n_timepoints, n_voxels) + (see :ref:`mask_4d_2_3d` for a discussion on using masks). + +* **Sample mask**: Masking some of the time points may be useful to + restrict to a specific pair of conditions (*eg* cats versus faces). + +.. note:: + + Seemingly minor data preparation can matter a lot on the final score, + for instance standardizing the data. + + +.. seealso:: + + * :ref:`loading_data` + * :ref:`masking` + + + +Performing a simple decoding analysis +======================================= + +The prediction engine +--------------------- + +An estimator object +................... + +To perform decoding we need to use an estimator from the `scikit-learn +` machine-learning library. This object can +predict a condition label **y** given a set **X** of imaging data. + +A simple and yet performant choice is the `Support Vector Classifier +`_ (or SVC) with a +linear kernel. The corresponding class, :class:`sklearn.svm.SVC`, needs +to be imported from the scikit-learn. + +Note that the documentation of the object details all parameters. In +IPython, it can be displayed as follows:: + + In [10]: svc? + Type: SVC + Base Class: + String Form: + SVC(kernel=linear, C=1.0, probability=False, degree=3, coef0=0.0, tol=0.001, + cache_size=200, shrinking=True, gamma=0.0) + Namespace: Interactive + Docstring: + C-Support Vector Classification. + Parameters + ---------- + C : float, optional (default=1.0) + penalty parameter C of the error term. + ... + +.. seealso:: + + the `scikit-learn documentation on SVMs + `_ + + +Applying it to data: fit (train) and predict (test) +................................................... + +The prediction objects have two important methods: + +- a `fit` function that "learns" the parameters of the model from the data. + Thus, we need to give some training data to `fit`. +- a `predict` function that "predicts" a target from new data. + Here, we just have to give the new set of images (as the target should be + unknown): + +.. warning:: + + **Do not predict on data used by the fit: this would yield misleadingly optimistic scores.** + +.. for doctests (smoke testing): + >>> from sklearn.svm import SVC + >>> svc = SVC() + +Measuring prediction performance +-------------------------------- + +Cross-validation +................ + +We cannot measure a prediction error on the same set of data that we have +used to fit the estimator: it would be much easier than on new data, and +the result would be meaningless. We need to use a technique called +*cross-validation* to split the data into different sets, called "folds", +in a `K-Fold strategy +`_. + +.. for doctests: + >>> cv = 2 + +There is a specific function, +:func:`sklearn.model_selection.cross_val_score` that computes for you +the score for the different folds of cross-validation:: + + >>> from sklearn.model_selection import cross_val_score # doctest: +SKIP + >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=5) # doctest: +SKIP + +`cv=5` stipulates a 5-fold cross-validation. Note that this function is located +in `sklearn.model_selection.cross_val_score` in the newest version of +scikit-learn. + +You can speed up the computation by using n_jobs=-1, which will spread +the computation equally across all processors (but might not work under +Windows):: + + >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=5, n_jobs=-1, verbose=10) #doctest: +SKIP + +**Prediction accuracy**: We can take a look at the results of the +`cross_val_score` function:: + + >>> print(cv_scores) # doctest: +SKIP + [0.72727272727272729, 0.46511627906976744, 0.72093023255813948, 0.58139534883720934, 0.7441860465116279] + +This is simply the prediction score for each fold, i.e. the fraction of +correct predictions on the left-out data. + +Choosing a good cross-validation strategy +......................................... + +There are many cross-validation strategies possible, including K-Fold or +leave-one-out. When choosing a strategy, keep in mind that: + +* The test set should be as litte correlated as possible with the train + set +* The test set needs to have enough samples to enable a good measure of + the prediction error (a rule of thumb is to use 10 to 20% of the data). + +In these regards, leave one out is often one of the worst options (see +Varoquaux et al, *Assessing and tuning brain decoders: cross-validation, +caveats, and guidelines*, Neuroimage 2017). + +Here, in the Haxby example, we are going to leave a session out, in order +to have a test set independent from the train set. For this, we are going +to use the session label, present in the behavioral data file, and +:class:`sklearn.model_selection.LeaveOneGroupOut`. + +.. note:: + + Full code for the above can be found on + :ref:`sphx_glr_auto_examples_plot_decoding_tutorial.py` + +| + +.. topic:: **Exercise** + :class: green + + Compute the mean prediction accuracy using `cv_scores`. + +.. topic:: Solution + + >>> classification_accuracy = np.mean(cv_scores) # doctest: +SKIP + >>> classification_accuracy # doctest: +SKIP + 0.76851... + +For discriminating human faces from cats, we measure a total prediction +accuracy of *77%* across the different sessions. + +Choice of the prediction accuracy measure +......................................... + +The default metric used for measuring errors is the accuracy score, i.e. +the number of total errors. It is not always a sensible metric, +especially in the case of very imbalanced classes, as in such situations +choosing the dominant class can achieve a low number of errors. + +Other metrics, such as the AUC (Area Under the Curve, for the ROC: the +Receiver Operating Characteristic), can be used:: + + >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv, scoring='roc_auc') # doctest: +SKIP + +.. seealso:: + + the `list of scoring options + `_ + +Measuring the chance level +.......................... + +**Dummy estimators**: The simplest way to measure prediction performance +at chance, is to use a *"dummy"* classifier, +:class:`sklearn.dummy.DummyClassifier` (purely random):: + + >>> from sklearn.dummy import DummyClassifier + >>> null_cv_scores = cross_val_score(DummyClassifier(), fmri_masked, target, cv=cv) # doctest: +SKIP + +**Permutation testing**: A more controlled way, but slower, is to do +permutation testing on the labels, with +:func:`sklearn.model_selection.permutation_test_score`:: + + >>> from sklearn.model_selection import permutation_test_score + >>> null_cv_scores = permutation_test_score(svc, fmri_masked, target, cv=cv) # doctest: +SKIP + +| + +.. topic:: **Putting it all together** + + The :ref:`ROI-based decoding example + ` does a decoding analysis per + mask, giving the f1-score of the prediction for each object. + + It uses all the notions presented above, with ``for`` loop to iterate + over masks and categories and Python dictionaries to store the + scores. + + +.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_haxby_masks_001.png + :target: ../auto_examples/01_plotting/plot_haxby_masks.html + :scale: 55 + :align: left + + Masks + + +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_full_analysis_001.png + :target: ../auto_examples/02_decoding/plot_haxby_full_analysis.html + :scale: 70 + :align: left + + + +Visualizing the decoder's weights +--------------------------------- + +We can visualize the weights of the decoder: + +- we first inverse the masking operation, to retrieve a 3D brain volume + of the SVC's weights. +- we then create a figure and plot as a background the first EPI image +- finally we plot the SVC's weights after masking the zero values + + +.. figure:: ../auto_examples/images/sphx_glr_plot_decoding_tutorial_002.png + :target: ../auto_examples/plot_decoding_tutorial.html + :scale: 65 + +.. note:: + + Full code for the above can be found on + :ref:`sphx_glr_auto_examples_plot_decoding_tutorial.py` + + +.. seealso:: + + * :ref:`plotting` + + +Decoding without a mask: Anova-SVM +================================== + +Dimension reduction with feature selection +------------------------------------------ + +If we do not start from a mask of the relevant regions, there is a very +large number of voxels and not all are useful for +face vs cat prediction. We thus add a `feature selection +`_ +procedure. The idea is to select the `k` voxels most correlated to the +task. + +For this, we need to import the :mod:`sklearn.feature_selection` module and use +:func:`sklearn.feature_selection.f_classif`, a simple F-score +based feature selection (a.k.a. +`Anova `_), +that we will put before the SVC in a `pipeline` +(:class:`sklearn.pipeline.Pipeline`): + +.. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py + :start-after: # Build the decoder + :end-before: # Visualize the results + + + +We can use our ``anova_svc`` object exactly as we were using our ``svc`` +object previously. + +Visualizing the results +----------------------- + +To visualize the results, we need to: + +- first get the support vectors of the SVC and inverse the feature + selection mechanism +- then, as before, inverse the masking process to retrieve the weights + and plot them. + +.. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py + :start-after: # Visualize the results + :end-before: # Saving the results as a Nifti file may also be important + +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_anova_svm_001.png + :target: ../auto_examples/02_decoding/plot_haxby_anova_svm.html + :scale: 65 + +.. seealso:: + + * :ref:`plotting` + + +.. topic:: **Final script** + + The complete script to do an SVM-Anova analysis can be found as + :ref:`an example `. + + +.. seealso:: + + * :ref:`space_net` + * :ref:`searchlight` + + +Going further with scikit-learn +=============================== + +We have seen a very simple analysis with scikit-learn, but it may be +interesting to explore the `wide variety of supervised learning +algorithms in the scikit-learn +`_. + +Changing the prediction engine +------------------------------ + +.. for doctest: + >>> from sklearn.feature_selection import SelectKBest, f_classif + >>> from sklearn.svm import LinearSVC + >>> feature_selection = SelectKBest(f_classif, k=4) + + +We now see how one can easily change the prediction engine, if needed. +We can try Fisher's `Linear Discriminant Analysis (LDA) +`_ + +Import the module:: + + >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # doctest: +SKIP + +Construct the new estimator object and use it in a pipeline:: + + >>> from sklearn.pipeline import Pipeline + >>> lda = LinearDiscriminantAnalysis() # doctest: +SKIP + >>> anova_lda = Pipeline([('anova', feature_selection), ('LDA', lda)]) # doctest: +SKIP + +.. note:: + Import Linear Discriminant Analysis method in "sklearn.lda.LDA" if you are using + scikit-learn older than version 0.17. + +and recompute the cross-validation score:: + + >>> cv_scores = cross_val_score(anova_lda, fmri_masked, target, cv=cv, verbose=1) # doctest: +SKIP + >>> classification_accuracy = np.mean(cv_scores) # doctest: +SKIP + >>> n_conditions = len(set(target)) # number of target classes + >>> print("Classification accuracy: %.4f / Chance Level: %.4f" % \ + ... (classification_accuracy, 1. / n_conditions)) # doctest: +SKIP + Classification accuracy: 0.7846 / Chance level: 0.5000 + + +Changing the feature selection +------------------------------ +Let's start by defining a linear SVM as a first classifier:: + + >>> clf = LinearSVC() + + +Let's say that you want a more sophisticated feature selection, for example a +`Recursive Feature Elimination (RFE) +`_ + +Import the module:: + + >>> from sklearn.feature_selection import RFE + +Construct your new fancy selection:: + + >>> rfe = RFE(SVC(kernel='linear', C=1.), 50, step=0.25) + +and create a new pipeline, composing the two classifiers `rfe` and `clf`:: + + >>> rfe_svc = Pipeline([('rfe', rfe), ('svc', clf)]) + +and recompute the cross-validation score:: + + >>> cv_scores = cross_val_score(rfe_svc, fmri_masked, target, cv=cv, + ... n_jobs=-1, verbose=1) # doctest: +SKIP + +But, be aware that this can take *A WHILE*... + +| + +.. seealso:: + + * The `scikit-learn documentation `_ + has very detailed explanations on a large variety of estimators and + machine learning techniques. To become better at decoding, you need + to study it. diff --git a/doc/decoding/decoding_simulated.rst b/doc/decoding/decoding_simulated.rst deleted file mode 100644 index 3377824fcd..0000000000 --- a/doc/decoding/decoding_simulated.rst +++ /dev/null @@ -1,113 +0,0 @@ -.. _decoding_simulated: - -========================== -Decoding on simulated data -========================== - -.. topic:: Objectives - - 1. Understand linear estimators (SVM, elastic net, ridge) - 2. Use the scikit-learn's linear models - -Simple NeuroImaging-like simulations -===================================== - -We simulate data as in -`Michel et al. 2012 `_ : -a linear model with a random design matrix **X**: - -.. math:: - - \mathbf{y} = \mathbf{X} \mathbf{w} + \mathbf{e} - -* **w**: the weights of the linear model correspond to the predictive - brain regions. Here, in the simulations, they form a 3D image with 5, four - of which in opposite corners and one in the middle. - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_001.png - :target: auto_examples/decoding/plot_simulated_data.html - :align: center - :scale: 90 - -* **X**: the design matrix corresponds to the observed fMRI data. Here - we simulate random normal variables and smooth them as in Gaussian - fields. - -* **e** is random normal noise. - -We provide a black-box function to create the data in the -:ref:`example script `. - - -Running various estimators -=========================== - -We can now run different estimators and look at their prediction score, -as well as the feature maps that they recover. Namely, we will use - -* A support vector regression (`SVM - `_) - -* An `elastic-net - `_ - -* A *Bayesian* ridge estimator, i.e. a ridge estimator that sets its - parameter according to a metaprior - -* A ridge estimator that set its parameter by cross-validation - -Note that the `RidgeCV` and the `ElasticNetCV` have names ending in `CV` -that stands for `cross-validation`: in the list of possible `alpha` -values that they are given, they choose the best by cross-validation. - -As the estimators expose a fairly consistent API, we can all fit them in -a for loop: they all have a `fit` method for fitting the data, a `score` -method to retrieve the prediction score, and because they are all linear -models, a `coef_` attribute that stores the coefficients **w** estimated -(see the :ref:`code of the simulation -`). - -.. note:: All parameters estimated from the data end with an underscore - -.. |estimator1| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_002.png - :target: ../auto_examples/decoding/plot_simulated_data.html - :scale: 60 - -.. |estimator2| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_003.png - :target: ../auto_examples/decoding/plot_simulated_data.html - :scale: 60 - -.. |estimator3| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_004.png - :target: ../auto_examples/decoding/plot_simulated_data.html - :scale: 60 - -.. |estimator4| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_005.png - :target: ../auto_examples/decoding/plot_simulated_data.html - :scale: 60 - -|estimator1| |estimator2| |estimator3| |estimator4| - -.. topic:: **Exercise** - :class: green - - Use recursive feature elimination (RFE) with the SVM:: - - >>> from sklearn.feature_selection import RFE - - Read the object's documentation to find out how to use RFE. - - **Performance tip**: increase the `step` parameter, or it will be very - slow. - - -.. topic:: **Source code to run the simulation** - - The full file to run the simulation can be found in - :ref:`sphx_glr_auto_examples_decoding_plot_simulated_data.py` - -.. seealso:: - - * :ref:`space_net` - * :ref:`searchlight` - - diff --git a/doc/decoding/decoding_tutorial.rst b/doc/decoding/decoding_tutorial.rst deleted file mode 100644 index 72b434cb83..0000000000 --- a/doc/decoding/decoding_tutorial.rst +++ /dev/null @@ -1,510 +0,0 @@ -.. for doctests to run, we need to define variables that are define in - the literal includes - >>> import numpy as np - >>> from sklearn import datasets - >>> iris = datasets.load_iris() - >>> fmri_masked = iris.data - >>> target = iris.target - >>> session = np.ones_like(target) - >>> n_samples = len(target) - -.. _decoding_tutorial: - -===================== -A decoding tutorial -===================== - -This page is a decoding tutorial articulated on the analysis of the Haxby -2001 dataset. It shows how to predict from brain activity images the -stimuli that the subject is viewing. - - -.. contents:: **Contents** - :local: - :depth: 1 - - -Data loading and preparation -================================ - -The Haxby 2001 experiment -------------------------- - -Subjects are presented visual stimuli from different categories. We are -going to predict which category the subject is seeing from the fMRI -activity recorded in masks of the ventral stream. Significant prediction -shows that the signal in the region contains information on the -corresponding category. - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_stimuli_004.png - :target: ../auto_examples/decoding/plot_haxby_stimuli.html - :scale: 30 - :align: left - - Face stimuli - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_stimuli_002.png - :target: ../auto_examples/decoding/plot_haxby_stimuli.html - :scale: 30 - :align: left - - Cat stimuli - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_haxby_masks_001.png - :target: ../auto_examples/manipulating_visualizing/plot_haxby_masks.html - :scale: 30 - :align: left - - Masks - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_full_analysis_001.png - :target: ../auto_examples/decoding/plot_haxby_full_analysis.html - :scale: 35 - :align: left - - Decoding scores per mask - - -Loading the data into Python ------------------------------ - -Launch IPython:: - - ipython --matplotlib - -First, load the data using nilearn data downloading function, -:func:`nilearn.datasets.fetch_haxby`: - -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Retrieve and load the Haxby dataset - :end-before: # Load the behavioral labels - -The ``haxby_dataset`` object has several entries that contain paths to the files -downloaded on the disk:: - - >>> print(haxby_dataset) # doctest: +SKIP - {'anat': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/anat.nii.gz'], - 'func': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/bold.nii.gz'], - 'mask_face': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask8b_face_vt.nii.gz'], - 'mask_face_little': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask8_face_vt.nii.gz'], - 'mask_house': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask8b_house_vt.nii.gz'], - 'mask_house_little': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask8_house_vt.nii.gz'], - 'mask_vt': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask4_vt.nii.gz'], - 'session_target': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/labels.txt']} - - -We load the behavioral labels from the corresponding text file and limit -our analysis to the `face` and `cat` conditions: - -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Load the behavioral labels - :end-before: # Prepare the data: apply the mask - -.. currentmodule:: nilearn.input_data - -Then we prepare the fMRI data: we use the :class:`NiftiMasker` to apply the -`mask_vt` mask to the 4D fMRI data, so that its shape becomes (n_samples, -n_features) (see :ref:`mask_4d_2_3d` for a discussion on using masks). - - -.. note:: - - seemingly minor data preparation can matter a lot on the final score, - for instance standardizing the data. - - -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Prepare the data: apply the mask - :end-before: # The decoding - -.. seealso:: - - * :ref:`loading_data` - * :ref:`masking` - - - -Performing the decoding analysis -==================================== - -The prediction engine ------------------------ - -An estimator object -.................... - -To perform decoding we construct an estimator, predicting a condition -label **y** given a set **X** of images. - -We use here a simple `Support Vector Classification -`_ (or SVC) with a -linear kernel. We first import the correct module from scikit-learn and we -define the classifier, :class:`sklearn.svm.SVC`: - -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Here we use a Support Vector Classification, with a linear kernel - :end-before: # And we run it - - -The documentation of the object details all parameters. In IPython, it -can be displayed as follows:: - - In [10]: svc? - Type: SVC - Base Class: - String Form: - SVC(kernel=linear, C=1.0, probability=False, degree=3, coef0=0.0, eps=0.001, - cache_size=100.0, shrinking=True, gamma=0.0) - Namespace: Interactive - Docstring: - C-Support Vector Classification. - Parameters - ---------- - C : float, optional (default=1.0) - penalty parameter C of the error term. - ... - -.. seealso:: - - the `scikit-learn documentation on SVMs - `_ - - -Applying it to data: fit (train) and predict (test) -.................................................... - -In scikit-learn, the prediction objects have two important methods: - -- a *fit* function that "learns" the parameters of the model from the data. - Thus, we need to give some training data to *fit*. -- a *predict* function that "predicts" a target from new data. - Here, we just have to give the new set of images (as the target should be - unknown): - -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # And we run it - :end-before: # Compute prediction scores using cross-validation - -.. warning:: - - **Do not predict on data used by the fit:** the prediction that we obtain here - is to good to be true (see next paragraph). Here we are just doing a sanity - check. - -.. for doctests (smoke testing): - >>> from sklearn.svm import SVC - >>> svc = SVC() - -Measuring prediction performance ---------------------------------- - -Cross-validation -................. - -However, the last analysis is *wrong*, as we have learned and tested on -the same set of data. We need to use a cross-validation to split the data -into different sets, called "folds", in a `K-Fold strategy -`_. - -We use a cross-validation object, -:class:`sklearn.cross_validation.KFold`, that simply generates the -indices of the folds within a loop. - -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Compute prediction scores using cross-validation - :end-before: print(cv_scores) - - -.. for doctests: - >>> cv = 2 - -There is a specific function, -:func:`sklearn.cross_validation.cross_val_score` that computes for you -the score for the different folds of cross-validation:: - - >>> from sklearn.cross_validation import cross_val_score - >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv) # doctest: +SKIP - -You can speed up the computation by using n_jobs=-1, which will spread -the computation equally across all processors (but will probably not work -under Windows):: - - >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv, n_jobs=-1, verbose=10) #doctest: +SKIP - -**Prediction accuracy**: We can take a look at the results of the -*cross_val_score* function:: - - >>> print(cv_scores) # doctest: +SKIP - [0.72727272727272729, 0.46511627906976744, 0.72093023255813948, 0.58139534883720934, 0.7441860465116279] - -This is simply the prediction score for each fold, i.e. the fraction of -correct predictions on the left-out data. - -Choosing a good cross-validation strategy -......................................... - -There are many cross-validation strategies possible, including K-Fold or -leave-one-out. When choosing a strategy, keep in mind that: - -* The test set should be as litte correlated as possible with the train - set -* The test set needs to have enough samples to enable a good measure of - the prediction error (a rule of thumb is to use 10 to 20% of the data). - -In these regards, leave one out is often one of the worst options. - -Here, in the Haxby example, we are going to leave a session out, in order -to have a test set independent from the train set. For this, we are going -to use the session label, present in the behavioral data file, and -:class:`sklearn.cross_validation.LeaveOneLabelOut`:: - - >>> from sklearn.cross_validation import LeaveOneLabelOut - >>> session_label = labels['chunks'] # doctest: +SKIP - >>> # We need to remember to remove the rest conditions - >>> session_label = session_label[condition_mask] # doctest: +SKIP - >>> cv = LeaveOneLabelOut(labels=session_label) # doctest: +SKIP - >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv) # doctest: +SKIP - >>> print(cv_scores) # doctest: +SKIP - [ 1. 0.61111111 0.94444444 0.88888889 0.88888889 0.94444444 - 0.72222222 0.94444444 0.5 0.72222222 0.5 0.55555556] - -.. topic:: **Exercise** - :class: green - - Compute the mean prediction accuracy using *cv_scores* - -.. topic:: Solution - - >>> classification_accuracy = np.mean(cv_scores) # doctest: +SKIP - >>> classification_accuracy # doctest: +SKIP - 0.76851851851851849 - -We have a total prediction accuracy of 77% across the different sessions. - -Choice of the prediction accuracy measure -.......................................... - -The default metric used for measuring errors is the accuracy score, i.e. -the number of total errors. It is not always a sensible metric, -especially in the case of very imbalanced classes, as in such situations -choosing the dominant class can achieve a low number of errors. - -Other metrics, such as the f1-score, can be used:: - - >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv, scoring='f1') # doctest: +SKIP - -.. seealso:: - - the `list of scoring options - `_ - -Measuring the chance level -........................... - -**Dummy estimators**: The simplest way to measure prediction performance -at chance, is to use a dummy classifier, -:class:`sklearn.dummy.DummyClassifier`:: - - >>> from sklearn.dummy import DummyClassifier - >>> null_cv_scores = cross_val_score(DummyClassifier(), fmri_masked, target, cv=cv) # doctest: +SKIP - -**Permutation testing**: A more controlled way, but slower, is to do -permutation testing on the labels, with -:func:`sklearn.cross_validation.permutation_test_score`:: - - >>> from sklearn.cross_validation import permutation_test_score - >>> null_cv_scores = permutation_test_score(svc, fmri_masked, target, cv=cv) # doctest: +SKIP - -| - -.. topic:: **Putting it all together** - - The :ref:`ROI-based decoding example - ` does a decoding analysis per - mask, giving the f1-score of the prediction for each object. - - It uses all the notions presented above, with ``for`` loop to iterate - over masks and categories and Python dictionnaries to store the - scores. - - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_haxby_masks_001.png - :target: ../auto_examples/manipulating_visualizing/plot_haxby_masks.html - :scale: 55 - :align: left - - Masks - - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_full_analysis_001.png - :target: ../auto_examples/decoding/plot_haxby_full_analysis.html - :scale: 70 - :align: left - - - -Visualizing the decoder's weights ---------------------------------- - -We can visualize the weights of the decoder: - -- we first inverse the masking operation, to retrieve a 3D brain volume - of the SVC's weights. -- we then create a figure and plot as a background the first EPI image -- finally we plot the SVC's weights after masking the zero values - -.. literalinclude:: ../../examples/plot_haxby_simple.py - :start-after: # Retrieve the discriminating weights and save them - :end-before: # Visualize the discriminating weights over the mean EPI - -.. figure:: ../auto_examples/images/sphx_glr_plot_haxby_simple_001.png - :target: ../auto_examples/plot_haxby_simple.html - :scale: 65 - - -.. seealso:: - - * :ref:`plotting` - - -Decoding without a mask: Anova-SVM -=================================== - -Dimension reduction with feature selection -------------------------------------------- - -If we do not start from a mask of the relevant regions, there is a very -large number of voxels and not all are useful for -face vs cat prediction. We thus add a `feature selection -`_ -procedure. The idea is to select the `k` voxels most correlated to the -task. - -For this, we need to import the :mod:`sklearn.feature_selection` module and use -:func:`sklearn.feature_selection.f_classif`, a simple F-score -based feature selection (a.k.a. -`Anova `_), -that we will put before the SVC in a `pipeline` -(:class:`sklearn.pipeline.Pipeline`): - -.. literalinclude:: ../../examples/decoding/plot_haxby_anova_svm.py - :start-after: # Build the decoder - :end-before: # Visualize the results - - - -We can use our ``anova_svc`` object exactly as we were using our ``svc`` -object previously. - -Visualizing the results -------------------------- - -To visualize the results, we need to: - -- first get the support vectors of the SVC and inverse the feature - selection mechanism -- then, as before, inverse the masking process to retrieve the weights - and plot them. - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_anova_svm_001.png - :target: ../auto_examples/decoding/plot_haxby_anova_svm.html - :align: right - :scale: 65 - -.. literalinclude:: ../../examples/decoding/plot_haxby_anova_svm.py - :start-after: # Visualize the results - :end-before: # Obtain prediction scores via cross validation - -.. seealso:: - - * :ref:`plotting` - - -.. topic:: **Final script** - - The complete script to do an SVM-Anova analysis can be found as - :ref:`an example `. - - -.. seealso:: - - * :ref:`decoding_simulated` - * :ref:`space_net` - * :ref:`searchlight` - - -Going further with scikit-learn -=================================== - -We have seen a very simple analysis with scikit-learn, but it may be -interesting to explore the `wide variety of supervised learning -algorithms in the scikit-learn -`_. - -Changing the prediction engine --------------------------------- - -.. for doctest: - >>> from sklearn.feature_selection import SelectKBest, f_classif - >>> from sklearn.svm import LinearSVC - >>> feature_selection = SelectKBest(f_classif, k=4) - >>> clf = LinearSVC() - -We now see how one can easily change the prediction engine, if needed. -We can try Fisher's `Linear Discriminant Analysis (LDA) -`_ - -Import the module:: - - >>> from sklearn.lda import LDA - -Construct the new estimator object and use it in a pipeline:: - - >>> from sklearn.pipeline import Pipeline - >>> lda = LDA() - >>> anova_lda = Pipeline([('anova', feature_selection), ('LDA', lda)]) - -and recompute the cross-validation score:: - - >>> cv_scores = cross_val_score(anova_lda, X, y, cv=cv, verbose=1) # doctest: +SKIP - >>> classification_accuracy = np.mean(cv_scores) # doctest: +SKIP - >>> print("Classification accuracy: %.4f / Chance Level: %.4f" % \ - ... (classification_accuracy, 1. / n_conditions)) # doctest: +SKIP - Classification accuracy: 1.0000 / Chance level: 0.5000 - - -Changing the feature selection ------------------------------- - -Let's say that you want a more sophisticated feature selection, for example a -`Recursive Feature Elimination (RFE) -`_ - -Import the module:: - - >>> from sklearn.feature_selection import RFE - -Construct your new fancy selection:: - - >>> rfe = RFE(SVC(kernel='linear', C=1.), 50, step=0.25) - -and create a new pipeline:: - - >>> rfe_svc = Pipeline([('rfe', rfe), ('svc', clf)]) - -and recompute the cross-validation score:: - - >>> cv_scores = cross_val_score(rfe_svc, X, y, cv=cv, n_jobs=-1, - ... verbose=1) # doctest: +SKIP - -But, be aware that this can take A WHILE... - -| - -.. seealso:: - - * The `scikit-learn documentation `_ - has very detailed explanations on a large variety of estimators and - machine learning techniques. To become better at decoding, you need - to study it. - diff --git a/doc/decoding/estimator_choice.rst b/doc/decoding/estimator_choice.rst index 23181b794b..78490a0f6b 100644 --- a/doc/decoding/estimator_choice.rst +++ b/doc/decoding/estimator_choice.rst @@ -10,7 +10,7 @@ It is slightly oriented towards a *decoding* application, that is the prediction of external variables such as behavior or clinical traits from brain images. For a didactic introduction to decoding with nilearn, see the :ref:`dedicated section of the nilearn documentation -`. +`. .. contents:: **Contents** :local: @@ -68,7 +68,7 @@ There are two noteworthy strategies: The "One vs One" strategy is more computationally costly than the "One vs All". The former scales as the square of the number of classes, -whereas the former is linear with the number of classes. +whereas the latter is linear with the number of classes. .. seealso:: @@ -81,18 +81,18 @@ whereas the former is linear with the number of classes. :func:`sklearn.metrics.confusion_matrix` is a useful tool to understand the classifier's errors in a multiclass problem. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_multiclass_001.png - :target: ../auto_examples/decoding/plot_haxby_multiclass.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_multiclass_001.png + :target: ../auto_examples/02_decoding/plot_haxby_multiclass.html :align: left :scale: 60 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_multiclass_002.png - :target: ../auto_examples/decoding/plot_haxby_multiclass.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_multiclass_002.png + :target: ../auto_examples/02_decoding/plot_haxby_multiclass.html :align: left :scale: 40 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_multiclass_003.png - :target: ../auto_examples/decoding/plot_haxby_multiclass.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_multiclass_003.png + :target: ../auto_examples/02_decoding/plot_haxby_multiclass.html :align: left :scale: 40 @@ -109,13 +109,13 @@ will have bumps and peaks due to this noise. These will not generalize to new data and chances are that the corresponding choice of parameter will not perform as well on new data. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_grid_search_001.png - :target: ../auto_examples/decoding/plot_haxby_grid_search.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_grid_search_001.png + :target: ../auto_examples/02_decoding/plot_haxby_grid_search.html :align: center :scale: 60 With scikit-learn nested cross-validation is done via -:class:`sklearn.grid_search.GridSearchCV`. It is unfortunately time +:class:`sklearn.model_selection.GridSearchCV`. It is unfortunately time consuming, but the ``n_jobs`` argument can spread the load on multiple CPUs. @@ -125,7 +125,7 @@ CPUs. * `The scikit-learn documentation on parameter selection `_ - * The example :ref:`sphx_glr_auto_examples_decoding_plot_haxby_grid_search.py` + * The example :ref:`sphx_glr_auto_examples_02_decoding_plot_haxby_grid_search.py` Different linear models ======================= @@ -163,8 +163,8 @@ Here we apply a few linear models to fMRI data: in every situation. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_001.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_001.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: center :scale: 80 @@ -181,52 +181,50 @@ the other, although the prediction scores are fairly similar. In other terms, a well-performing estimator in terms of prediction error gives us little guarantee on the brain maps. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_007.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html - :align: left +.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_007.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :scale: 70 - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_008.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html - :align: left +.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_008.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :scale: 70 - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_005.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html - :align: left +.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_005.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :scale: 70 - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_006.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html - :align: left +.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_006.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :scale: 70 - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_004.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html - :align: left +.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_004.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :scale: 70 - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_002.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html - :align: left +.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_002.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :scale: 70 - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_003.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html - :align: left +.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_003.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :scale: 70 - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_009.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html - :align: left +.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_009.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :scale: 70 - -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_010.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html - :align: left +.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_010.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :scale: 70 + .. seealso:: * :ref:`space_net` +| + +.. topic:: **Decoding on simulated data** + + Simple simulations may be useful to understand the behavior of a given + decoder on data. In particular, simulations enable us to set the true + weight maps and compare them to the ones retrieved by decoders. A full + example running simulations and discussing them can be found in + :ref:`sphx_glr_auto_examples_02_decoding_plot_simulated_data.py` + Simulated data cannot easily mimic all properties of brain data. An + important aspect, however, is its spatial structure, that we create in + the simulations. + + diff --git a/doc/decoding/index.rst b/doc/decoding/index.rst index d11838fb5a..7a3b371b22 100644 --- a/doc/decoding/index.rst +++ b/doc/decoding/index.rst @@ -21,9 +21,8 @@ predicting an output value. .. toctree:: - decoding_tutorial.rst + decoding_intro.rst estimator_choice.rst - decoding_simulated.rst space_net.rst searchlight.rst diff --git a/doc/decoding/searchlight.rst b/doc/decoding/searchlight.rst index bc4267ab95..a5a8ddba97 100644 --- a/doc/decoding/searchlight.rst +++ b/doc/decoding/searchlight.rst @@ -25,7 +25,7 @@ Loading Fetching the data from internet and loading it can be done with the provided functions (see :ref:`loading_data`): -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # Load Haxby dataset :end-before: # Restrict to faces and houses @@ -37,9 +37,9 @@ For this example we need: - to put X in the form *n_samples* x *n_features* - compute a mean image for visualization background - limit our analysis to the `face` and `house` conditions - (like in the :ref:`decoding tutorial `) + (like in the :ref:`introduction to decoding `) -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # Restrict to faces and houses :end-before: # Prepare masks @@ -62,7 +62,7 @@ be used here : back of the brain. *mask_img* will ensure that no value outside the brain is taken into account when iterating with the sphere. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # brain to speed up computation) :end-before: # Searchlight computation @@ -99,7 +99,7 @@ validation method that does not take too much time. *K*-Fold along with *K* = 4 is a good compromise between running time and quality. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # set once and the others as learning sets :end-before: import nilearn.decoding @@ -109,11 +109,11 @@ Running Searchlight Running :class:`SearchLight` is straightforward now that everything is set. The only parameter left is the radius of the ball that will run through the data. -Kriegskorte et al. use a 4mm radius because it yielded the best detection +Kriegskorte et al. use a 5.6mm radius because it yielded the best detection performance in their simulation. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py - :start-after: import nilearn.decoding +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py + :start-after: cv = KFold(n_splits=4) :end-before: # F-scores computation Visualization @@ -127,12 +127,12 @@ background. We can see here that voxels in the visual cortex contains information to distinguish pictures showed to the volunteers, which was the expected result. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # Visualization :end-before: # F_score results -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_searchlight_001.png - :target: ../auto_examples/decoding/plot_haxby_searchlight.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_searchlight_001.png + :target: ../auto_examples/02_decoding/plot_haxby_searchlight.html :align: center :scale: 80 @@ -149,11 +149,11 @@ parametric tests (F-tests ot t-tests). Here we compute the *p-values* of the voxels [1]_. To display the results, we use the negative log of the p-value. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # F_score results -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_searchlight_002.png - :target: ../auto_examples/decoding/plot_haxby_searchlight.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_searchlight_002.png + :target: ../auto_examples/02_decoding/plot_haxby_searchlight.html :align: center :scale: 80 @@ -185,7 +185,7 @@ is its associated p-value. The :func:`nilearn.mass_univariate.permuted_ols` function returns the p-values computed with a permutation test. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_haxby_mass_univariate.py +.. literalinclude:: ../../examples/05_advanced/plot_haxby_mass_univariate.py :start-after: # Perform massively univariate analysis with permuted OLS :end-before: neg_log_pvals_unmasked @@ -206,8 +206,8 @@ every voxel so that the F-statistics are comparable. This correction strategy is applied in nilearn :func:`nilearn.mass_univariate.permuted_ols` function. -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_haxby_mass_univariate_001.png - :target: ../auto_examples/manipulating_visualizing/plot_haxby_searchlight.html +.. figure:: ../auto_examples/05_advanced/images/sphx_glr_plot_haxby_mass_univariate_001.png + :target: ../auto_examples/05_advanced/plot_haxby_mass_univariate.html :align: center :scale: 60 @@ -227,7 +227,7 @@ on the original (non-permuted) data. Thus, we can perform two one-sided tests (a given contrast and its opposite) for the price of one single run. The example results can be interpreted as follows: viewing faces significantly activates the Fusiform Face Area as compared to viewing houses, while viewing -houses does not reveals significant supplementary activations as compared to +houses does not reveal significant supplementary activations as compared to viewing faces. diff --git a/doc/decoding/space_net.rst b/doc/decoding/space_net.rst index 5f6e7a3660..8c5aebd714 100644 --- a/doc/decoding/space_net.rst +++ b/doc/decoding/space_net.rst @@ -10,24 +10,26 @@ .. _space_net: -===================================== -Multivariate decoding with SpaceNet -===================================== +========================================================== +SpaceNet: decoding with spatial structure for better maps +========================================================== The SpaceNet decoder --------------------- -SpaceNet implements a suite of multi-variate priors which for improved -brain decoding. It uses priors like TV (Total Variation) `[Michel et -al. 2011] `_, TV-L1 -`[Baldassarre et al. 2012] -`_, -`[Gramfort et al. 2013] `_ -(option: penalty="tvl1"), and Graph-Net `[Hebiri et al. 2011] -`_ (known -as GraphNet in neuroimaging `[Grosenick et al. 2013] -`_) (option: -penalty="graph-net") to regularize classification and regression -problems in brain imaging. The result are brain maps which are both +===================== + +SpaceNet implements spatial penalties which improve brain decoding power as well as decoder maps: + +* penalty="tvl1": priors inspired from TV (Total Variation) `[Michel et + al. 2011] `_, TV-L1 + `[Baldassarre et al. 2012] + `_, + `[Gramfort et al. 2013] `_ (option: ), + +* penalty="graph-net": GraphNet prior `[Grosenick et al. 2013] + `_) + +These regularize classification and regression +problems in brain imaging. The results are brain maps which are both sparse (i.e regression coefficients are zero everywhere, except at predictive voxels) and structured (blobby). The superiority of TV-L1 over methods without structured priors like the Lasso, SVM, ANOVA, @@ -35,18 +37,11 @@ Ridge, etc. for yielding more interpretable maps and improved prediction scores is now well established `[Baldassarre et al. 2012] `_, `[Gramfort et al. 2013] `_, -`[Grosenick et al. 2013] `_. +`[Grosenick et al. 2013] `_. -The following table summarizes the parameter(s) used to activate a -given penalty: - -- TV-L1: `penalty="tv-l1"` -- Graph-Net: `penalty="graph-net"` (this is the default prior in - SpaceNet) - -Note that TV-L1 prior leads to a hard optimization problem, and so can -be slow to run. Under the hood, a few heuristics are used to make +Note that TV-L1 prior leads to a difficult optimization problem, and so +can be slow to run. Under the hood, a few heuristics are used to make things a bit faster. These include: - Feature preprocessing, where an F-test is used to eliminate @@ -55,7 +50,7 @@ things a bit faster. These include: - Continuation is used along the regularization path, where the solution of the optimization problem for a given value of the regularization parameter `alpha` is used as initialization - of for next the regularization (smaller) value on the regularization + for the next regularization (smaller) value on the regularization grid. **Implementation:** See `[Dohmatob et al. 2015 (PRNI)] @@ -63,40 +58,44 @@ things a bit faster. These include: et al. 2014 (PRNI)] `_ for technical details regarding the implementation of SpaceNet. -Mixed gambles -............. +Empirical comparisons +===================== + + +Comparison on mixed gambles study +---------------------------------- -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_mixed_gambles_space_net_001.png +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_mixed_gambles_space_net_001.png :align: right :scale: 60 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_mixed_gambles_space_net_002.png +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_mixed_gambles_space_net_002.png :scale: 60 .. topic:: **Code** The complete script can be found - :ref:`here `. + :ref:`here `. -Haxby -..... +Comparison on Haxby study +-------------------------- -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_space_net_001.png +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_space_net_001.png :align: right :scale: 60 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_space_net_002.png +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_space_net_002.png :scale: 60 .. topic:: **Code** The complete script can be found - :ref:`here `. + :ref:`here `. .. seealso:: - * :ref:`Age prediction on OASIS dataset with SpaceNet `. + * :ref:`Age prediction on OASIS dataset with SpaceNet `. * The `scikit-learn documentation `_ has very detailed explanations on a large variety of estimators and diff --git a/doc/images/papaya_stat_map_plot_screenshot.png b/doc/images/papaya_stat_map_plot_screenshot.png new file mode 100644 index 0000000000..7950348745 Binary files /dev/null and b/doc/images/papaya_stat_map_plot_screenshot.png differ diff --git a/doc/images/papaya_stat_map_plot_screenshot_notebook.png b/doc/images/papaya_stat_map_plot_screenshot_notebook.png new file mode 100644 index 0000000000..b703dc597b Binary files /dev/null and b/doc/images/papaya_stat_map_plot_screenshot_notebook.png differ diff --git a/doc/images/plotly_connectome_plot.png b/doc/images/plotly_connectome_plot.png new file mode 100644 index 0000000000..e56d9b47eb Binary files /dev/null and b/doc/images/plotly_connectome_plot.png differ diff --git a/doc/images/plotly_markers_plot.png b/doc/images/plotly_markers_plot.png new file mode 100644 index 0000000000..be0e34d3cb Binary files /dev/null and b/doc/images/plotly_markers_plot.png differ diff --git a/doc/images/plotly_surface_atlas_plot.png b/doc/images/plotly_surface_atlas_plot.png new file mode 100644 index 0000000000..44058d0f66 Binary files /dev/null and b/doc/images/plotly_surface_atlas_plot.png differ diff --git a/doc/images/plotly_surface_plot.png b/doc/images/plotly_surface_plot.png new file mode 100644 index 0000000000..3a9b357009 Binary files /dev/null and b/doc/images/plotly_surface_plot.png differ diff --git a/doc/images/plotly_surface_plot_notebook_screenshot.png b/doc/images/plotly_surface_plot_notebook_screenshot.png new file mode 100644 index 0000000000..38f72c8c3c Binary files /dev/null and b/doc/images/plotly_surface_plot_notebook_screenshot.png differ diff --git a/doc/index.rst b/doc/index.rst index 69004cdfe0..4d5683b53e 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -15,29 +15,32 @@ .. Here we are building the carrousel -.. |glass_brain| image:: auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_glass_brain_002.png - :target: auto_examples/manipulating_visualizing/plot_demo_glass_brain.html +.. |glass_brain| image:: auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_002.png + :target: auto_examples/01_plotting/plot_demo_glass_brain.html -.. |connectome| image:: auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png - :target: auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |connectome| image:: auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png + :target: auto_examples/03_connectivity/plot_inverse_covariance_connectome.html -.. |haxby_weights| image:: auto_examples/images/sphx_glr_plot_haxby_simple_001.png - :target: auto_examples/plot_haxby_simple.html +.. |surface_plot| image:: auto_examples/01_plotting/images/sphx_glr_plot_3d_map_to_surface_projection_001.png + :target: auto_examples/01_plotting/plot_3d_map_to_surface_projection.html -.. |oasis_weights| image:: auto_examples/decoding/images/sphx_glr_plot_oasis_vbm_002.png - :target: auto_examples/decoding/plot_oasis_vbm.html +.. |haxby_weights| image:: auto_examples/images/sphx_glr_plot_decoding_tutorial_002.png + :target: auto_examples/plot_decoding_tutorial.html -.. |rest_clustering| image:: auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_001.png - :target: auto_examples/connectivity/plot_rest_clustering.html +.. |oasis_weights| image:: auto_examples/02_decoding/images/sphx_glr_plot_oasis_vbm_002.png + :target: auto_examples/02_decoding/plot_oasis_vbm.html -.. |canica| image:: auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_011.png - :target: auto_examples/connectivity/plot_canica_resting_state.html +.. |rest_parcellations| image:: auto_examples/03_connectivity/images/sphx_glr_plot_rest_parcellations_001.png + :target: auto_examples/03_connectivity/plot_rest_parcellations.html -.. |tvl1_haxby| image:: auto_examples/decoding/images/sphx_glr_plot_haxby_space_net_002.png - :target: auto_examples/decoding/plot_haxby_space_net.html +.. |canica| image:: auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_011.png + :target: auto_examples/03_connectivity/plot_canica_resting_state.html -.. |searchlight| image:: auto_examples/decoding/images/sphx_glr_plot_haxby_searchlight_001.png - :target: auto_examples/decoding/plot_haxby_searchlight.html +.. |tvl1_haxby| image:: auto_examples/02_decoding/images/sphx_glr_plot_haxby_space_net_002.png + :target: auto_examples/02_decoding/plot_haxby_space_net.html + +.. |searchlight| image:: auto_examples/02_decoding/images/sphx_glr_plot_haxby_searchlight_001.png + :target: auto_examples/02_decoding/plot_haxby_searchlight.html .. raw:: html @@ -58,29 +61,31 @@ * |glass_brain| -* |haxby_weights| +* |surface_plot| * |oasis_weights| * |connectome| -* |rest_clustering| +* |rest_parcellations| * |canica| * |tvl1_haxby| +* |haxby_weights| + * |searchlight| .. raw:: html - + - +

- +

@@ -92,10 +97,10 @@ .. toctree:: :hidden: - AUTHORS.rst + authors.rst user_guide.rst auto_examples/index.rst whats_new.rst + contributing.rst Nilearn is part of the `NiPy ecosystem `_. - diff --git a/doc/install_doc_component.html b/doc/install_doc_component.html index 82f8ec85f5..a81b5869f5 100644 --- a/doc/install_doc_component.html +++ b/doc/install_doc_component.html @@ -43,16 +43,17 @@ Anaconda

-

We recommend that you install a complete scientific Python - distribution like 64 bit Anaconda - . Since it meets all the requirements of nilearn, it will save +

We recommend that you install a complete + 64 bit scientific Python + distribution like Anaconda + . Since it meets all the requirements of nilearn, it will save you time and trouble. You could also check PythonXY + href="http://python-xy.github.io/" target="_blank">PythonXY as an alternative.

Nilearn requires a Python installation and the following - dependencies: ipython, scikit-learn, matplotlib and nibabel

+ dependencies: ipython, scikit-learn, matplotlib and nibabel.

Second: open a Command Prompt

(Press "Win-R", type "cmd" and press "Enter". This will open @@ -74,14 +75,15 @@ reference external" href="https://store.continuum.io/cshop/anaconda/" target="_blank">Anaconda

-

We recommend that you install a complete scientific Python - distribution like 64 bit We recommend that you install a complete + 64 bit scientific + Python distribution like - Anaconda. Since it meets all the requirements of nilearn, + Anaconda. Since it meets all the requirements of nilearn, it will save you time and trouble.

Nilearn requires a Python installation and the following - dependencies: ipython, scikit-learn, matplotlib and nibabel

+ dependencies: ipython, scikit-learn, matplotlib and nibabel.

Second: open a Terminal

(Navigate to /Applications/Utilities and double-click on @@ -103,7 +105,7 @@

If you are using Ubuntu or Debian and you have access to - Neurodebian, then simply install the + Neurodebian, then simply install the python-nilearn package through Neurodebian.

@@ -113,15 +115,16 @@ packages using the distribution package manager: ipython , scikit-learn (sometimes called sklearn, or python-sklearn), matplotlib (sometimes - called python-matplotlib) and nibabel - (sometimes called python-nibabel)

+ called python-matplotlib) and nibabel + (sometimes called python-nibabel).

If you do not have access to the package manager we recommend - that you install a complete scientific Python distribution like 64 bit + that you install a complete 64 bit + scientific Python distribution like - Anaconda. Since it meets all the requirements of nilearn, - it will save you time and trouble..

+ Anaconda
. Since it meets all the requirements of nilearn, + it will save you time and trouble.

Second: open a Terminal

(Press ctrl+alt+t and a Terminal console will pop up)
diff --git a/doc/introduction.rst b/doc/introduction.rst index aa3a46b248..a9f354b468 100644 --- a/doc/introduction.rst +++ b/doc/introduction.rst @@ -1,3 +1,10 @@ +.. for doc tests to run with recent NumPy 1.14, we need to set print options + to older versions. See issue #1593 for more details + >>> import numpy as np + >>> from distutils.version import LooseVersion + >>> if LooseVersion(np.__version__) >= LooseVersion('1.14'): + ... np.set_printoptions(legacy='1.13') + ===================================== Introduction: nilearn in a nutshell ===================================== @@ -22,9 +29,9 @@ What is nilearn: MVPA, decoding, predictive models, functional connectivity :ref:`brain parcellations `, :ref:`connectomes `. - Nilearn can readily be used on :ref:`task fMRI `, + Nilearn can readily be used on :ref:`task fMRI `, :ref:`resting-state `, or - :ref:`VBM ` data. + :ref:`VBM ` data. For a machine-learning expert, the value of nilearn can be seen as domain-specific **feature engineering** construction, that is, shaping @@ -201,11 +208,11 @@ the file name:: The filename could be given as "~/t_map000.nii' as nilearn expands "~" to the home directory. - :ref:`See more on file name matchings `. + :ref:`See more on file name matchings `. -.. image:: auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_glass_brain_001.png - :target: auto_examples/manipulating_visualizing/plot_demo_glass_brain.html +.. image:: auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_001.png + :target: auto_examples/01_plotting/plot_demo_glass_brain.html :align: center :scale: 60 @@ -264,10 +271,20 @@ To loop over each individual volume of a 4D image, use :func:`image.iter_img`:: * To perform a for loop in Python, you can use the "range" function * The solution can be found :ref:`here - ` + ` | + +.. topic:: **Warm up examples** + + The two following examples may be useful to get familiar with data + representation in nilearn: + + * :ref:`sphx_glr_auto_examples_plot_nilearn_101.py` + + * :ref:`sphx_glr_auto_examples_plot_3d_and_4d_niimg.py` + ____ Now, if you want out-of-the-box methods to process neuroimaging data, jump @@ -450,4 +467,4 @@ Finding help * For machine-learning and scikit-learn questions, expertise can be found on the scikit-learn mailing list: - https://lists.sourceforge.net/lists/listinfo/scikit-learn-general + https://mail.python.org/mailman/listinfo/scikit-learn diff --git a/doc/logos/digi-saclay-logo-small.png b/doc/logos/digi-saclay-logo-small.png new file mode 100644 index 0000000000..2190fc5a51 Binary files /dev/null and b/doc/logos/digi-saclay-logo-small.png differ diff --git a/doc/logos/nilearn-logo-tagline.svg b/doc/logos/nilearn-logo-tagline.svg new file mode 100644 index 0000000000..1bbe279035 --- /dev/null +++ b/doc/logos/nilearn-logo-tagline.svg @@ -0,0 +1,117 @@ + + + +image/svg+xmlni +Easier machine learningfor neuroimaging + \ No newline at end of file diff --git a/doc/logos/nilearn_logo_tagline.png b/doc/logos/nilearn_logo_tagline.png new file mode 100644 index 0000000000..c0c381cbd2 Binary files /dev/null and b/doc/logos/nilearn_logo_tagline.png differ diff --git a/doc/manipulating_images/index.rst b/doc/manipulating_images/index.rst new file mode 100644 index 0000000000..28870327fe --- /dev/null +++ b/doc/manipulating_images/index.rst @@ -0,0 +1,22 @@ +.. include:: ../tune_toc.rst + + +.. _image_manipulation: + +======================================== +Manipulation brain volumes with nilearn +======================================== + +In this section, we detail the general tools to manipulate of +brain images with nilearn. + +| + +.. include:: ../includes/big_toc_css.rst + + +.. toctree:: + + input_output.rst + manipulating_images.rst + masker_objects.rst diff --git a/doc/manipulating_images/input_output.rst b/doc/manipulating_images/input_output.rst new file mode 100644 index 0000000000..0254e0711d --- /dev/null +++ b/doc/manipulating_images/input_output.rst @@ -0,0 +1,264 @@ +.. _extracting_data: + +===================================================== +Input and output: neuroimaging data representation +===================================================== + +.. contents:: **Contents** + :local: + :depth: 1 + +| + +.. currentmodule:: nilearn.image + +.. _loading_data: + +Inputing data: file names or image objects +=========================================== + +File names and objects, 3D and 4D images +----------------------------------------- + +All Nilearn functions accept file names as arguments:: + + >>> from nilearn import image + >>> smoothed_img = image.smooth_img('/home/user/t_map001.nii') # doctest: +SKIP + +Nilearn can operate on either file names or `NiftiImage objects +`_. The later represent the +data loaded in memory. In the example above, the +function :func:`smooth_img` returns a Nifti1Image object, which can then +be readily passed to other nilearn functions. + +In nilearn, we often use the term *"niimg"* as abbreviation that denotes +either a file name or a `NiftiImage object +`_. + +Niimgs can be 3D or 4D. A 4D niimg may for instance represent a time +series of 3D images. It can be **a list of file names**, if these contain +3D information:: + + >>> # dataset folder contains subject1.nii and subject2.nii + >>> from nilearn.image import smooth_img + >>> result_img = smooth_img(['dataset/subject1.nii', 'dataset/subject2.nii']) # doctest: +SKIP + +``result_img`` is a 4D in-memory image, containing the data of both +subjects. + + +.. _filename_matching: + +File name matching: "globbing" and user path expansion +------------------------------------------------------ + +You can specify files with *wildcard* matching patterns (as in Unix +shell): + + * **Matching multiple files**: suppose the dataset folder contains + subject_01.nii, subject_03.nii, and subject_03.nii; + ``dataset/subject_*.nii`` is a glob expression matching all filenames:: + + >>> # Example with a smoothing process: + >>> from nilearn.image import smooth_img + >>> result_img = smooth_img("dataset/subject_*.nii") # doctest: +SKIP + + Note that the resulting is a 4D image. + + * **Expanding the home directory** ``~`` is expanded to your home + directory:: + + >>> result_img = smooth_img("~/dataset/subject_01.nii") # doctest: +SKIP + + Using ``~`` rather than specifying the details of the path is good + practice, as it will make it more likely that your script work on + different computers. + + +.. topic:: **Python globbing** + + For more complicated use cases, Python also provides functions to work + with file paths, in particular, :func:`glob.glob`. + + .. warning:: + + Unlike nilearn's path expansion, the result of :func:`glob.glob` is + not sorted and, depending on the computer you are running, they + might not be in alphabetic order. We advise you to rely on + nilearn's path expansion. + + To load data with globbing, we suggest that you use + :func:`nilearn.image.load_img`. + + +.. currentmodule:: nilearn.datasets + +.. _datasets: + +Fetching open datasets from Internet +===================================== + +Nilearn provides dataset fetching function that +automatically downloads reference +datasets and atlases. They can be imported from +:mod:`nilearn.datasets`:: + + >>> from nilearn import datasets + >>> haxby_dataset = datasets.fetch_haxby() # doctest: +SKIP + +They return a data structure that contains different pieces of +information on the retrieved dataset, including the +file names on hard disk:: + + >>> # The different files + >>> print(sorted(list(haxby_dataset.keys()))) # doctest: +SKIP + ['anat', 'description', 'func', 'mask', 'mask_face', 'mask_face_little', + 'mask_house', 'mask_house_little', 'mask_vt', 'session_target'] + >>> # Path to first functional file + >>> print(haxby_dataset.func[0]) # doctest: +SKIP + /.../nilearn_data/haxby2001/subj1/bold.nii.gz + +Explanation and further resources of the dataset at hand can be retrieved as +follows:: + + >>> print(haxby_dataset.description) # doctest: +SKIP + Haxby 2001 results + + + Notes + ----- + Results from a classical fMRI study that... + +| + +.. seealso:: + + For a list of all the data fetching functions in nilearn, see + :ref:`datasets_ref`. + +| + +.. topic:: **nilearn_data: Where is the downloaded data stored?** + + The fetching functions download the reference datasets to the disk. + They save it locally for future use, in one of the + following directories (in order of priority, if present): + + * the folder specified by `data_dir` parameter in the fetching function + * the global environment variable `NILEARN_SHARED_DATA` + * the user environment variable `NILEARN_DATA` + * the `nilearn_data` folder in the user home folder + + The two different environment variables (NILEARN_SHARED_DATA and + NILEARN_DATA) are provided for multi-user systems, to distinguish a + global dataset repository that may be read-only at the user-level. + Note that you can copy that folder to another user's computers to + avoid the initial dataset download on the first fetching call. + + You can check in which directory nilearn will store the data with the + function :func:`nilearn.datasets.get_data_dirs`. + + +| + +Understanding neuroimaging data +=============================== + +Nifti and Analyze data +----------------------- + +For volumetric data, nilearn works with data stored as in the Nifti +structure (via the nibabel_ package). + +The `NifTi `_ data structure (also used in +Analyze files) is the standard way of sharing data in neuroimaging +research. Three main components are: + +:data: + raw scans in form of a numpy array: ``data = img.get_data()`` +:affine: + returns the transformation matrix that maps + from voxel indices of the numpy array to actual real-world + locations of the brain: + ``affine = img.affine`` +:header: + low-level informations about the data (slice duration, etc.): + ``header = img.header`` + +If you need to load the data without using nilearn, read the nibabel_ +documentation. + +Note: For older versions of nibabel_, affine and header can be retrieved +with ``get_affine()`` and ``get_header()``. + + +.. topic:: **Dataset formatting: data shape** + + It is important to appreciate two main representations for + storing and accessing more than one Nifti images, that is sets + of MRI scans: + + - a big 4D matrix representing (3D MRI + 1D for time), stored in a single + Nifti file. + `FSL `_ users tend to + prefer this format. + - several 3D matrices representing each time point (single 3D volume) of the + session, stored in set of 3D Nifti or analyse files. + `SPM `_ users tend + to prefer this format. + +.. _niimg: + +Niimg-like objects +------------------- + +Nilearn functions take as input argument what we call "Niimg-like +objects": + +**Niimg:** A Niimg-like object can be one of the following: + + * A string with a file path to a Nifti or Analyse image + * An ``SpatialImage`` from nibabel, ie an object exposing ``get_data()`` + method and ``affine`` attribute, typically a ``Nifti1Image`` from nibabel_. + +**Niimg-4D:** Similarly, some functions require 4D Nifti-like +data, which we call Niimgs or Niimg-4D. Accepted input arguments are: + + * A path to a 4D Nifti image + * List of paths to 3D Nifti images + * 4D Nifti-like object + * List of 3D Nifti-like objects + +.. topic:: **Image affines** + + If you provide a sequence of Nifti images, all of them must have the same + affine. + +Text files: phenotype or behavior +---------------------------------- + +Phenotypic or behavioral data are often provided as text or CSV +(Comma Separated Values) file. They +can be loaded with `pd.read_csv` but you may have to specify some options +(typically `sep` if fields aren't delimited with a comma). + +For the Haxby datasets, we can load the categories of the images +presented to the subject:: + + >>> from nilearn import datasets + >>> haxby_dataset = datasets.fetch_haxby() # doctest: +SKIP + >>> import pandas as pd # doctest: +SKIP + >>> labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ") # doctest: +SKIP + >>> stimuli = labels['labels'] # doctest: +SKIP + >>> print(stimuli.unique()) # doctest: +SKIP + ['bottle' 'cat' 'chair' 'face' 'house' 'rest' 'scissors' 'scrambledpix' + 'shoe'] + +.. topic:: **Reading CSV with pandas** + + `Pandas `_ is a powerful package to read + data from CSV files and manipulate them. + +| + +.. _nibabel: http://nipy.sourceforge.net/nibabel/ diff --git a/doc/manipulating_images/manipulating_images.rst b/doc/manipulating_images/manipulating_images.rst new file mode 100644 index 0000000000..57993de8ad --- /dev/null +++ b/doc/manipulating_images/manipulating_images.rst @@ -0,0 +1,274 @@ +.. _data_manipulation: + +===================================================================== +Manipulating images: resampling, smoothing, masking, ROIs... +===================================================================== + +This chapter discusses how nilearn can be used to do simple operations on +brain images. + + +.. contents:: **Chapters contents** + :local: + :depth: 1 + +.. _preprocessing_functions: + +Functions for data preparation and image transformation +========================================================= + +Nilearn comes with many simple functions for simple data preparation and +transformation. Note that if you want to perform these operations while +loading the data into a data matrix, most are also integrated in the +:ref:`masker objects `. + +.. currentmodule:: nilearn + + +* Computing the mean of images (along the time/4th dimension): + :func:`nilearn.image.mean_img` +* Applying numpy functions on an image or a list of images: + :func:`nilearn.image.math_img` +* Swapping voxels of both hemisphere (e.g., useful to homogenize masks + inter-hemispherically): + :func:`nilearn.image.swap_img_hemispheres` +* Smoothing: :func:`nilearn.image.smooth_img` +* Cleaning signals (e.g., linear detrending, standardization, + confound removal, low/high pass filtering): + :func:`nilearn.image.clean_img` + + .. seealso:: + + To apply this cleaning on signal matrices rather than images: + :func:`nilearn.signal.clean` + +.. _resampling: + +Resampling images +================= + +Resampling one image to match another one +------------------------------------------ + +:func:`nilearn.image.resample_to_img` resamples an image to a reference +image. + +.. topic:: **Example** + + * :ref:`sphx_glr_auto_examples_04_manipulating_images_plot_resample_to_template.py` + +.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_resample_to_template_001.png + :target: ../auto_examples/04_manipulating_images/plot_resample_to_template.html + :width: 45% +.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_resample_to_template_002.png + :target: ../auto_examples/04_manipulating_images/plot_resample_to_template.html + :width: 45% + +This can be useful to display two images as overlays in some +viewers (e.g., FSLView) that require all images to be on the same grid. + +Resampling to a specific target affine, shape, or resolution +------------------------------------------------------------- + +:func:`nilearn.image.resample_img` specifies the resampling in terms of +the `target_affine` to match the spatial configuration defined by the new +affine. + +Additionally, a `target_shape` can be used to resize images +(i.e., cropping or padding with zeros) to match an expected data +image dimensions (shape composed of x, y, and z). + +Resampling can be useful to downsample images to increase processing +speed and lower memory consumption. + +On an advanced note, automatic computation of offset and bounding box +can be performed by specifying a 3x3 matrix instead of the 4x4 affine. +In this case, nilearn computes automatically the translation part +of the transformation matrix (i.e., affine). + +.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_affine_transformation_002.png + :target: ../auto_examples/04_manipulating_images/plot_affine_transformation.html + :width: 30% +.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_affine_transformation_004.png + :target: ../auto_examples/04_manipulating_images/plot_affine_transformation.html + :width: 30% +.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_affine_transformation_003.png + :target: ../auto_examples/04_manipulating_images/plot_affine_transformation.html + :width: 30% + + +.. topic:: **Special case: resampling to a given voxel size** + + Specifying a 3x3 matrix that is diagonal as a target_affine fixes the + voxel size. For instance to resample to 3x3x3 mm voxels:: + + >>> import numpy as np + >>> target_affine = np.diag((3, 3, 3)) + +.. seealso:: + + :ref:`An example illustrating affine transforms on data and bounding boxes ` + +Accessing individual volumes in 4D images +=========================================== + +* :func:`nilearn.image.index_img`: selects one or more volumes in a 4D + image. + +* :func:`nilearn.image.iter_img`: loops over all the volumes of a 4D + image. + +.. seealso:: + + * :func:`nilearn.image.concat_imgs`: merge multiple 3D (or 4D) images + into one 4D image by concatenation along the 4th (time) axis + + * :func:`nilearn.image.load_img`: load an image into memory. The + benefit of this function is that it will convert various + representations, such as filename, list of filenames, wildcards, + list of in-memory objects, to an in-memory NiftiImage. + + * :func:`nilearn.image.new_img_like`: given data in a numpy array, + creates a new image using an existing reference image for the + metadata. + +| + +.. topic:: **Examples** + + * :ref:`sphx_glr_auto_examples_plot_3d_and_4d_niimg.py` + + * :ref:`sphx_glr_auto_examples_01_plotting_plot_overlay.py` + +Computing and applying spatial masks +===================================== + +Relevant functions: + +* compute a mask from EPI images: :func:`nilearn.masking.compute_epi_mask` +* compute a grey-matter mask using the MNI template: + :func:`nilearn.masking.compute_gray_matter_mask`. +* compute a mask from images with a flat background: + :func:`nilearn.masking.compute_background_mask` +* compute for multiple sessions/subjects: + :func:`nilearn.masking.compute_multi_epi_mask` + :func:`nilearn.masking.compute_multi_background_mask` +* apply: :func:`nilearn.masking.apply_mask` +* intersect several masks (useful for multi sessions/subjects): :func:`nilearn.masking.intersect_masks` +* unmasking: :func:`nilearn.masking.unmask` + + +Extracting a brain mask +------------------------ + +If we do not have a spatial mask of the target regions, a brain mask +can be computed from the data: + +- :func:`nilearn.masking.compute_background_mask` for brain images where + the brain stands out of a constant background. This is typically the + case when working on statistic maps output after a brain extraction +- :func:`nilearn.masking.compute_epi_mask` for EPI images +- :func:`nilearn.masking.compute_gray_matter_mask` to compute a + gray-matter mask using the MNI template. + + +.. literalinclude:: ../../examples/01_plotting/plot_visualization.py + :start-after: # Simple computation of a mask from the fMRI data + :end-before: # Applying the mask to extract the corresponding time series + +.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_visualization_002.png + :target: ../auto_examples/01_plotting/plot_visualization.html + :scale: 50% + + +.. _mask_4d_2_3d: + +Masking data: from 4D Nifti images to 2D data arrays +--------------------------------------------------------------- + +fMRI data is usually represented as a 4D block of data: 3 spatial +dimensions and one time dimension. In practice, we are usually +interested in working on the voxel time-series in the +brain. It is thus convenient to apply a brain mask in order to convert the +4D brain images representation into a restructured 2D data representation, +`voxel` **x** `time`, as depicted below: + +.. image:: ../images/masking.jpg + :align: center + :width: 100% + +Note that in an analysis pipeline, this operation is best done using the +:ref:`masker objects `. For completness, we give code to +do it manually below: + +.. literalinclude:: ../../examples/01_plotting/plot_visualization.py + :start-after: # Applying the mask to extract the corresponding time series + +.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_visualization_003.png + :target: ../auto_examples/01_plotting/plot_visualization.html + :align: center + :scale: 40 + + + +Image operations: creating a ROI mask manually +=============================================== + +A region of interest (ROI) mask can be computed for instance with a +statistical test. This requires a chain of image +operations on the input data. Here is a possible recipe for computing an +ROI mask: + + * **Smoothing**: Before a statistical test, it is often use to smooth a bit + the image using :func:`nilearn.image.smooth_img`, typically fwhm=6 for + fMRI. + + * **Selecting voxels**: Given the smoothed data, we can select voxels + with a statistical test (eg opposing face and house experimental + conditions), for instance with a simple Student's t-test using scipy + function :func:`scipy.stats.ttest_ind`. + + * **Thresholding**: Then we need threshold the statistical map to have + better representation of voxels of interest. + + * **Mask intersection and dilation**: Post-processing the results with + simple morphological operations, mask intersection and dilation. + + * we can use another mask, such as a grey-matter mask, to select + only the voxels which are common in both masks. + + * we can do `morphological dilation + `_ to achieve + more compact blobs with more regular boundaries. The function is + used from :func:`scipy.ndimage.binary_dilation`. + + * **Extracting connected components**: We end with splitting the connected + ROIs into two separate regions (ROIs), one in each hemisphere. The + function :func:`scipy.ndimage.label` from the scipy library is used. + + * **Saving the result**: The final voxel mask is saved to disk using + the 'to_filename' method of the image object. + (or **nibabel.save**). + + +.. seealso:: + + For extracting connected components: + + * A function :func:`nilearn.regions.connected_regions` can be used readily + on probabilistic atlas Nifti-like images whereas + + * A function :func:`nilearn.regions.connected_label_regions` can be used on + atlases denoted as labels. For instance, atlases labelled using KMeans. + +.. _nibabel: http://nipy.sourceforge.net/nibabel/ + +.. topic:: **Code** + + A complete script of above steps with full description can be found :ref:`here + `. + +.. seealso:: + + * :ref:`Automatic region extraction on 4D atlas images + `. diff --git a/doc/manipulating_images/masker_objects.rst b/doc/manipulating_images/masker_objects.rst new file mode 100644 index 0000000000..d968254b4b --- /dev/null +++ b/doc/manipulating_images/masker_objects.rst @@ -0,0 +1,423 @@ +.. _masker_objects: + +===================================================================== +From neuroimaging volumes to data matrices: the masker objects +===================================================================== + +This chapter introduces the maskers: objects that go from +neuroimaging volumes, on the disk or in memory, to data matrices, eg of +time series. + +.. contents:: **Chapters contents** + :local: + :depth: 1 + + +The concept of "masker" objects +=============================== + +In any analysis, the first step is to load the data. +It is often convenient to apply some basic data +transformations and to turn the data in a 2D (samples x features) matrix, +where the samples could be different time points, and the features derived +from different voxels (e.g., restrict analysis to the ventral visual stream), +regions of interest (e.g., extract local signals from spheres/cubes), or +pre-specified networks (e.g., look at data from all voxels of a set of +network nodes). Think of masker objects as swiss-army knifes for shaping +the raw neuroimaging data in 3D space into the units of observation +relevant for the research questions at hand. + + +.. |niimgs| image:: ../images/niimgs.jpg + :scale: 50% + +.. |arrays| image:: ../images/feature_array.jpg + :scale: 35% + +.. |arrow| raw:: html + + + +.. centered:: |niimgs| |arrow| |arrays| + + + +"masker" objects (found in modules :mod:`nilearn.input_data`) +simplify these "data folding" steps that often preceed the +statistical analysis. + +Note that the masker objects may not cover all the image transformations +for specific tasks. Users who want to make some specific processing may +have to call :ref:`specific functions ` +(modules :mod:`nilearn.signal`, :mod:`nilearn.masking`). + +| + +.. topic:: **Advanced: Design philosophy of "Maskers"** + + The design of these classes is similar to `scikit-learn + `_\ 's transformers. First, objects are + initialized with some parameters guiding the transformation + (unrelated to the data). Then the `fit()` method should be called, + possibly specifying some data-related information (such as number of + images to process), to perform some initial computation (e.g., + fitting a mask based on the data). Finally, `transform()` can be + called, with the data as argument, to perform some computation on + data themselves (e.g., extracting time series from images). + + +.. currentmodule:: nilearn.input_data + +.. _nifti_masker: + +:class:`NiftiMasker`: applying a mask to load time-series +========================================================== + +:class:`NiftiMasker` is a powerful tool to load images and +extract voxel signals in the area defined by the mask. +It applies some basic preprocessing +steps with commonly used parameters as defaults. +But it is *very important* to look at your data to see the effects +of the preprocessings and validate them. + +.. topic:: **Advanced: scikit-learn Pipelines** + + :class:`NiftiMasker` is a `scikit-learn + `_ compliant + transformer so that you can directly plug it into a `scikit-learn + pipeline `_. + + +Custom data loading: loading only the first 100 time points +------------------------------------------------------------ + +Suppose we want to restrict a dataset to the first 100 frames. Below, we load +a resting-state dataset with :func:`fetch_adhd() +`, restrict it to 100 frames and +build a new niimg object that we can give to the masker. Although +possible, there is no need to save your data to a file to pass it to a +:class:`NiftiMasker`. Simply use :func:`nilearn.image.index_img` to apply a +slice and create a :ref:`Niimg ` in memory: + + +.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py + :start-after: Load ADHD resting-state dataset + :end-before: # To display the background + +Controlling how the mask is computed from the data +-------------------------------------------------- + +In this section, we show how the masker object can compute a mask +automatically for subsequent statistical analysis. +On some datasets, the default algorithm may however perform poorly. +This is why it is very important to +**always look at your data** before and after feature +engineering using masker objects. + +.. note:: + + The full example described in this section can be found here: + :doc:`plot_mask_computation.py <../auto_examples/04_manipulating_images/plot_mask_computation>`. + It is also related to this example: + :doc:`plot_nifti_simple.py <../auto_examples/04_manipulating_images/plot_nifti_simple>`. + + +Visualizing the computed mask +.............................. + +If a mask is not specified as an argument, :class:`NiftiMasker` will try to +compute one from the provided neuroimaging data. +It is *very important* to verify the quality of the generated mask by visualization. +This allows to see whether it is suitable for your data and intended analyses. +Alternatively, the mask computation parameters can still be modified. +See the :class:`NiftiMasker` documentation for a complete list of +mask computation parameters. + +The mask can be retrieved and visualized from the `mask_img_` attribute +of the masker: + +.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py + :start-after: # We need to specify an 'epi' mask_strategy, as this is raw EPI data + :end-before: # Generate mask with strong opening + + +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_004.png + :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html + :scale: 50% + +Different masking strategies +............................. + +The `mask_strategy` argument controls how the mask is computed: + +* `background`: detects a continuous background +* `epi`: suitable for EPI images +* `template`: uses an MNI grey-matter template + +Extra mask parameters: opening, cutoff... +.......................................... + +The underlying function is :func:`nilearn.masking.compute_epi_mask` +called using the `mask_args` argument of the :class:`NiftiMasker`. +Controling these arguments set the fine aspects of the mask. See the +functions documentation, or :doc:`the NiftiMasker example +<../auto_examples/04_manipulating_images/plot_mask_computation>`. + +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_005.png + :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html + :scale: 50% + +.. _masker_preprocessing_steps: + +Common data preparation steps: smoothing, filtering, resampling +---------------------------------------------------------------- + +:class:`NiftiMasker` comes with many parameters that enable data +preparation:: + + >>> from nilearn import input_data + >>> masker = input_data.NiftiMasker() + >>> masker + NiftiMasker(detrend=False, dtype=None, high_pass=None, low_pass=None, + mask_args=None, mask_img=None, mask_strategy='background', + memory=Memory(cachedir=None), memory_level=1, sample_mask=None, + sessions=None, smoothing_fwhm=None, standardize=False, t_r=None, + target_affine=None, target_shape=None, verbose=0) + +The meaning of each parameter is described in the documentation of +:class:`NiftiMasker` (click on the name :class:`NiftiMasker`), here we +comment on the most important. + +.. topic:: **`dtype` argument** + + Forcing your data to have a `dtype` of **float32** can help + save memory and is often a good-enough numerical precision. + You can force this cast by choosing `dtype` to be 'auto'. + In the future this cast will be the default behaviour. + + +.. seealso:: + + If you do not want to use the :class:`NiftiMasker` to perform these + simple operations on data, note that they can also be manually + accessed in nilearn such as in + :ref:`corresponding functions `. + +Smoothing +......... + +:class:`NiftiMasker` can apply Gaussian spatial smoothing to the +neuroimaging data, useful to fight noise or for inter-individual +differences in neuroanatomy. It is achieved by specifying the full-width +half maximum (FWHM; in millimeter scale) with the `smoothing_fwhm` +parameter. Anisotropic filtering is also possible by passing 3 scalars +``(x, y, z)``, the FWHM along the x, y, and z direction. + +The underlying function handles properly non-cubic voxels by scaling the +given widths appropriately. + +.. seealso:: + + :func:`nilearn.image.smooth_img` + +.. _temporal_filtering: + +Temporal Filtering and confound removal +........................................ + +:class:`NiftiMasker` can also improve aspects of temporal data +properties, before conversion to voxel signals. + +- **Standardization**. Parameter ``standardize``: Signals can be + standardized (scaled to unit variance). + +- **Frequency filtering**. Low-pass and high-pass filters can be used to + remove artifacts. Parameters: ``high_pass`` and ``low_pass``, specified + in Hz (note that you must specific the sampling rate in seconds with + the ``t_r`` parameter: ``loss_pass=.5, t_r=2.1``). + +- **Confound removal**. Two ways of removing confounds are provided: simple + detrending or using prespecified confounds, such as behavioral or movement + information. + + * Linear trends can be removed by activating the `detrend` parameter. + This accounts for slow (as opposed to abrupt or transient) changes + in voxel values along a series of brain images that are unrelated to the + signal of interest (e.g., the neural correlates of cognitive tasks). + It is not activated by default in :class:`NiftiMasker` but is recommended + in almost all scenarios. + + * More complex confounds, measured during the acquision, can be removed + by passing them to :meth:`NiftiMasker.transform`. If the dataset + provides a confounds file, just pass its path to the masker. + +.. topic:: **Exercise** + :class: green + + You can, more as a training than as an exercise, try to play with + the parameters in + :ref:`sphx_glr_auto_examples_plot_decoding_tutorial.py`. + Try to enable detrending and run the script: + does it have a big impact on the result? + + +.. seealso:: + + :func:`nilearn.signal.clean` + + + + +Resampling: resizing and changing resolutions of images +....................................................... + +:class:`NiftiMasker` and many similar classes enable resampling +(recasting of images into different resolutions and transformations of +brain voxel data). Two parameters control resampling: + +* `target_affine` to resample (resize, rotate...) images in order to match + the spatial configuration defined by the new affine (i.e., matrix + transforming from voxel space into world space). + +* Additionally, a `target_shape` can be used to resize images + (i.e., cropping or padding with zeros) to match an expected data + image dimensions (shape composed of x, y, and z). + +How to combine these parameter to obtain the specific resampling desired +is explained in details in :ref:`resampling`. + +.. seealso:: + + :func:`nilearn.image.resample_img`, :func:`nilearn.image.resample_to_img` + +.. _unmasking_step: + +Inverse transform: unmasking data +--------------------------------- + +Once voxel signals have been processed, the result can be visualized as +images after unmasking (masked-reduced data transformed back into +the original whole-brain space). This step is present in many +:ref:`examples ` provided in nilearn. Below you will find +an excerpt of :ref:`the example performing Anova-SVM on the Haxby data +`): + +.. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py + :start-after: # Look at the SVC's discriminating weights + :end-before: # Use the mean image as a background + +| + +.. topic:: **Examples to better understand the NiftiMasker** + + * :ref:`sphx_glr_auto_examples_04_manipulating_images_plot_nifti_simple.py` + + * :ref:`sphx_glr_auto_examples_04_manipulating_images_plot_mask_computation.py` + +| + +.. _region: + +Extraction of signals from regions:\ :class:`NiftiLabelsMasker`, :class:`NiftiMapsMasker` +========================================================================================== + +The purpose of :class:`NiftiLabelsMasker` and :class:`NiftiMapsMasker` is to +compute signals from regions containing many voxels. They make it easy to get +these signals once you have an atlas or a parcellation into brain regions. + +Regions definition +------------------ + +Nilearn understands two different ways of defining regions, which are called +labels and maps, handled by :class:`NiftiLabelsMasker` and +:class:`NiftiMapsMasker`, respectively. + +- labels: a single region is defined as the set of all the voxels that have a + common label (e.g., anatomical brain region definitions as integers) + in the region definition array. The set of + regions is defined by a single 3D array, containing a voxel-wise + dictionary of label numbers that denote what + region a given voxel belongs to. This technique has a big advantage: the + required memory load is independent of the number of regions, allowing + for a large number of regions. On the other hand, there are + several disadvantages: regions cannot spatially overlap + and are represented in a binary present/nonpresent coding (no weighting). + +- maps: a single region is defined as the set of all the voxels that have a + non-zero weight. A set of regions is thus defined by a set of 3D images (or a + single 4D image), one 3D image per region (as opposed to all regions in a + single 3D image such as for labels, cf. above). + While these defined weighted regions can exhibit spatial + overlap (as opposed to labels), storage cost scales linearly with the + number of regions. Handling a large number (e.g., thousands) + of regions will prove difficult with this data transformation of + whole-brain voxel data into weighted region-wise data. + +.. note:: + + These usage are illustrated in the section :ref:`functional_connectomes`. + +:class:`NiftiLabelsMasker` Usage +-------------------------------- + +Usage of :class:`NiftiLabelsMasker` is similar to that of +:class:`NiftiMapsMasker`. The main difference is that it requires a labels image +instead of a set of maps as input. + +The `background_label` keyword of :class:`NiftiLabelsMasker` deserves +some explanation. The voxels that correspond to the brain or a region +of interest in an fMRI image do not fill the entire image. +Consequently, in the labels image, there must be a label value that corresponds +to "outside" the brain (for which no signal should be extracted). +By default, this label is set to zero in nilearn (refered to as "background"). +Should some non-zero value encoding be necessary, it is possible +to change the background value with the `background_label` keyword. + +.. topic:: **Examples** + + * :ref:`sphx_glr_auto_examples_03_connectivity_plot_signal_extraction.py` + +:class:`NiftiMapsMasker` Usage +------------------------------ + +This atlas defines its regions using maps. The path to the corresponding +file is given in the `maps_img` argument. + +One important thing that happens transparently during the execution of +:meth:`NiftiMasker.fit_transform` is resampling. Initially, the images +and the atlas do typically not have the same shape nor the same affine. +Casting them into the same format is required for successful signal extraction +The keyword argument `resampling_target` specifies which format +(i.e., dimensions and affine) the data should be resampled to. +See the reference documentation for :class:`NiftiMapsMasker` for every +possible option. + +.. topic:: **Examples** + + * :ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py` + +Extraction of signals from seeds:\ :class:`NiftiSpheresMasker` +=============================================================== + +The purpose of :class:`NiftiSpheresMasker` is to compute signals from +seeds containing voxels in spheres. It makes it easy to get these signals once +you have a list of coordinates. +A single seed is a sphere defined by the radius (in millimeters) and the +coordinates (typically MNI or TAL) of its center. + +Using :class:`NiftiSpheresMasker` needs to define a list of coordinates. +`seeds` argument takes a list of 3D coordinates (tuples) of the spheres centers, +they should be in the same space as the images. +Seeds can overlap spatially and are represented in a binary present/nonpresent +coding (no weighting). +Below is an example of a coordinates list of four seeds from the default mode network:: + + >>> dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (0, 50, -5)] + +`radius` is an optional argument that takes a real value in millimeters. +If no value is given for the `radius` argument, the single voxel at the given +seed position is used. + +.. topic:: **Examples** + + * :ref:`sphx_glr_auto_examples_03_connectivity_plot_adhd_spheres.py` diff --git a/doc/manipulating_visualizing/data_preparation.rst b/doc/manipulating_visualizing/data_preparation.rst deleted file mode 100644 index 7ea3dc1081..0000000000 --- a/doc/manipulating_visualizing/data_preparation.rst +++ /dev/null @@ -1,463 +0,0 @@ -.. _extracting_data: - -========================================================= -Data preparation: loading and basic signal extraction -========================================================= - -.. contents:: **Contents** - :local: - :depth: 1 - -| - -.. topic:: **File names as arguments** - - Nilearn functions and objects accept file names as arguments:: - - >>> from nilearn import image - >>> smoothed_img = image.smooth_img('/home/user/t_map001.nii') # doctest: +SKIP - - Nilearn can operate on either file names or `NiftiImage objects - `_. The later represent - the specified nifti files loaded in memory. - - In nilearn, we often use the term 'niimg' as abbreviation that denotes - either a file name or a NiftiImage object. In the example above, the - function smooth_img returns a NiftiImage object, which can then be - readily passed to any other nilearn function that accepts niimg - arguments. - - Niimgs can be 3D or 4D, and a 4D niimg can be a list of file names, or - even a *wildcard* matching patterns. The '~' symbol is also expanded to the - user home folder.For instance, to retrieve a 4D volume of - all t maps smoothed:: - - >>> smoothed_imgs = image.smooth_img('~/t_map*.nii') # doctest: +SKIP - - -| - -The concept of "masker" objects -================================= - -In any analysis, the first step is to load the data. -It is often convenient to apply some basic data -transformations and to turn the data in a 2D (samples x features) matrix, -where the samples could be different time points, and the features derived -from different voxels (e.g., restrict analysis to the ventral visual stream), -regions of interest (e.g., extract local signals from spheres/cubes), or -prespecified networks (e.g., look at data from all voxels of a set of -network nodes). Think of masker objects as swiss army knifes for shaping -the raw neuroimaging data in 3D space into the units of observation -relevant for the research questions at hand. - - -.. |niimgs| image:: ../images/niimgs.jpg - :scale: 50% - -.. |arrays| image:: ../images/feature_array.jpg - :scale: 35% - -.. |arrow| raw:: html - - - -.. centered:: |niimgs| |arrow| |arrays| - - - -"masker" objects (found in modules :mod:`nilearn.input_data`) aim at -simplifying these "data folding" steps that often preceed the actual -statistical analysis. - -On an advanced note, -the underlying philosophy of these classes is similar to `scikit-learn -`_\ 's -transformers. First, objects are initialized with some parameters guiding -the transformation (unrelated to the data). Then the fit() method -should be called, possibly specifying some data-related -information (such as number of images to process), to perform some -initial computation (e.g., fitting a mask based on the data). Finally, -transform() can be called, with the data as argument, to perform some -computation on data themselves (e.g. extracting time series from images). - -Note that the masker objects may not cover all the image transformations -for specific tasks. Users who want to make some specific processing may -have to call low-level functions (see e.g. :mod:`nilearn.signal`, -:mod:`nilearn.masking`). - -.. currentmodule:: nilearn.input_data - -.. _nifti_masker: - -:class:`NiftiMasker`: loading, masking and filtering -========================================================= - -This section details how to use the :class:`NiftiMasker` class. -:class:`NiftiMasker` is a -powerful tool to load images and extract voxel signals in the area -defined by the mask. It is designed to apply some basic preprocessing -steps by default with commonly used parameters as defaults. But it is -*very important* to look at your data to see the effects of the -preprocessings and validate them. - -In particular, :class:`NiftiMasker` is a `scikit-learn -`_ compliant -transformer so that you can directly plug it into a `scikit-learn -pipeline `_. - -Custom data loading --------------------- - -Sometimes, some custom preprocessing of data is necessary. For instance -we can restrict a dataset to the first 100 frames. Below, we load -a resting-state dataset with :func:`fetch_fetch_nyu_rest() -`, restrict it to 100 frames and -build a brand new Nifti-like object to give it to the masker. Although -possible, there is no need to save your data to a file to pass it to a -:class:`NiftiMasker`. Simply use `nibabel -`_ to create a :ref:`Niimg ` -in memory: - - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py - :start-after: Load NYU resting-state dataset - :end-before: # To display the background - -Controlling how the mask is computed from the data ------------------------------------------------------ - -In this tutorial, we show how the masker object can compute a mask -automatically for subsequent statistical analysis. -On some datasets, the default algorithm may however perform poorly. -This is why it is very important to -**always look at your data** before and after feature -engineering using masker objects. - -Computing the mask -................... - -.. note:: - - The full example described in this section can be found here: - :doc:`plot_mask_computation.py <../auto_examples/manipulating_visualizing/plot_mask_computation>`. - It is also related to this example: - :doc:`plot_nifti_simple.py <../auto_examples/manipulating_visualizing/plot_nifti_simple>`. - -If a mask is not specified as an argument, -:class:`NiftiMasker` will try to compute -one from the provided neuroimaging data. -It is *very important* to verify the quality of the generated mask by -visualization. This allows to see whether it -is suitable for your data and intended analyses. -Alternatively, the mask computation parameters can still be modified. See the -:class:`NiftiMasker` documentation for a complete list of mask computation -parameters. - -As a first example, we will now automatically build a mask from a dataset. -We will here use the Haxby dataset because it provides the original mask that -we can compare the data-derived mask against. - -The first step is to generate a mask with default parameters and visualize it. - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py - :start-after: # Simple mask extraction from EPI images - :end-before: # Generate mask with strong opening - - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_mask_computation_002.png - :target: ../auto_examples/plot_mask_computation.html - :scale: 50% - - -We can then fine-tune the outline of the mask by increasing the number of -opening steps (*opening=10*) using the `mask_args` argument of the -:class:`NiftiMasker`. This effectively performs erosion and dilation operations -on the outer voxel layers of the mask, which can for example remove remaining -skull parts in the image. - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py - :start-after: # Generate mask with strong opening - :end-before: # Generate mask with a high lower cutoff - - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_mask_computation_003.png - :target: ../auto_examples/plot_mask_computation.html - :scale: 50% - - -Looking at the :func:`nilearn.masking.compute_epi_mask` called by the -:class:`NiftiMasker` object, we see two interesting parameters: -*lower_cutoff* and *upper_cutoff*. These set the grey-value bounds in -which the masking algorithm will search for its threshold -(0 being the minimum of the image and 1 the maximum). We will here increase -the lower cutoff to enforce selection of those -voxels that appear as bright in the EPI image. - - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py - :start-after: # Generate mask with a high lower cutoff - :end-before: ################################################################################ - - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_mask_computation_004.png - :target: ../auto_examples/plot_mask_computation.html - :scale: 50% - - - - -Common data preparation steps: resampling, smoothing, filtering ------------------------------------------------------------------ - -.. seealso:: - - If you do not want to use the :class:`NiftiMasker` to perform these - simple operations on data, note that they can also be manually - accessed in nilearn such as in - :ref:`corresponding functions `. - -.. _resampling: - -Resampling -.......... - -:class:`NiftiMasker` and many similar classes enable resampling -(recasting of images into different resolutions and transformations of -brain voxel data). The resampling procedure takes as input the -*target_affine* to resample (resize, rotate...) images in order to match -the spatial configuration defined by the new affine (i.e., matrix -transforming from voxel space into world space). Additionally, a -*target_shape* can be used to resize images (i.e., cropping or padding -with zeros) to match an expected data image dimensions (shape composed of -x, y, and z). - -As a common use case, resampling can be a viable means to -downsample image quality on purpose to increase processing speed -and lower memory consumption of an analysis pipeline. -In fact, certain image viewers (e.g., FSLView) also require images to be -resampled to display overlays. - -On an advanced note, -automatic computation of offset and bounding box can be performed by -specifying a 3x3 matrix instead of the 4x4 affine. -In this case, nilearn -computes automatically the translation part of the transformation -matrix (i.e., affine). - -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_affine_transformation_002.png - :target: ../auto_examples/plot_affine_transformation.html - :scale: 33% -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_affine_transformation_004.png - :target: ../auto_examples/plot_affine_transformation.html - :scale: 33% -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_affine_transformation_003.png - :target: ../auto_examples/plot_affine_transformation.html - :scale: 33% - - -.. topic:: **Special case: resampling to a given voxel size** - - Specifying a 3x3 matrix that is diagonal as a target_affine fixes the - voxel size. For instance to resample to 3x3x3 mm voxels:: - - >>> import numpy as np - >>> target_affine = np.diag((3, 3, 3)) - -| - -.. seealso:: - - :func:`nilearn.image.resample_img` - - -Smoothing -......... - -:class:`NiftiMasker` can further be used for local spatial filtering of -the neuroimaging data to make the data more homogeneous and thus account -for inter-individual differences in neuroanatomy. -It is achieved by passing the full-width -half maximum (FWHM; in millimeter scale) -along the x, y, and z image axes by specifying the `smoothing_fwhm` parameter. -For an isotropic filtering, passing a scalar is also possible. The underlying -function handles properly the tricky case of non-cubic voxels by scaling the -given widths appropriately. - -.. seealso:: - - :func:`nilearn.image.smooth_img` - - -.. _temporal_filtering: - -Temporal Filtering -.................. - -Rather than optimizing spatial properties of the neuroimaging data, -the user may want to improve aspects of temporal data properties, -before conversion to voxel signals. -:class:`NiftiMasker` can also process voxel signals. Here are the possibilities: - -- Confound removal. Two ways of removing confounds are provided. Any linear - trend can be removed by activating the `detrend` option. - This accounts for slow (as opposed to abrupt or transient) changes - in voxel values along a series of brain images that are unrelated to the - signal of interest (e.g., the neural correlates of cognitive tasks). - It is not activated - by default in :class:`NiftiMasker` but is recommended in almost all scenarios. - More complex confounds can - be removed by passing them to :meth:`NiftiMasker.transform`. If the - dataset provides a confounds file, just pass its path to the masker. - -- Linear filtering. Low-pass and high-pass filters can be used to remove artifacts. - It simply removes all voxel values lower or higher than the specified - parameters, respectively. - Care has been taken to automatically - apply this processing to confounds if it appears necessary. - -- Normalization. Signals can be normalized (scaled to unit variance) before - returning them. This is performed by default. - -.. topic:: **Exercise** - - You can, more as a training than as an exercise, try to play with - the parameters in :ref:`sphx_glr_auto_examples_plot_haxby_simple.py`. Try to enable detrending - and run the script: does it have a big impact on the result? - - -.. seealso:: - - :func:`nilearn.signal.clean` - - -Inverse transform: unmasking data ----------------------------------- - -Once voxel signals have been processed, the result can be visualized as -images after unmasking (masked-reduced data transformed back into -the original whole-brain space). This step is present in almost all -the :ref:`examples ` provided in nilearn. Below you will find -an excerpt of :ref:`the example performing Anova-SVM on the Haxby data -`): - -.. literalinclude:: ../../examples/decoding/plot_haxby_anova_svm.py - :start-after: # Look at the SVC's discriminating weights - :end-before: # Create the figure - -| - -.. topic:: **Examples to better understand the NiftiMasker** - - * :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_nifti_simple.py` - - * :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_mask_computation.py` - - -.. _region: - -Extraction of signals from regions:\ :class:`NiftiLabelsMasker`, :class:`NiftiMapsMasker`. -=========================================================================================== - -The purpose of :class:`NiftiLabelsMasker` and :class:`NiftiMapsMasker` is to -compute signals from regions containing many voxels. They make it easy to get -these signals once you have an atlas or a parcellation into brain regions. - -Regions definition ------------------- - -Nilearn understands two different ways of defining regions, which are called -labels and maps, handled by :class:`NiftiLabelsMasker` and -:class:`NiftiMapsMasker`, respectively. - -- labels: a single region is defined as the set of all the voxels that have a - common label (e.g., anatomical brain region definitions as integers) - in the region definition array. The set of - regions is defined by a single 3D array, containing a voxel-wise - dictionary of label numbers that denote what - region a given voxel belongs to. This technique has a big advantage: the - required memory load is independent of the number of regions, allowing - for a large number of regions. On the other hand, there are - several disadvantages: regions cannot spatially overlap - and are represented in a binary present-nonpresent coding (no weighting). -- maps: a single region is defined as the set of all the voxels that have a - non-zero weight. A set of regions is thus defined by a set of 3D images (or a - single 4D image), one 3D image per region (as opposed to all regions in a - single 3D image such as for labels, cf. above). - While these defined weighted regions can exhibit spatial - overlap (as opposed to labels), storage cost scales linearly with the - number of regions. Handling a large number (e.g., thousands) - of regions will prove - difficult with this data transformation of whole-brain voxel data - into weighted region-wise data. - -.. note:: - - These usage are illustrated in the section :ref:`functional_connectomes` - -:class:`NiftiLabelsMasker` Usage ---------------------------------- - -Usage of :class:`NiftiLabelsMasker` is similar to that of -:class:`NiftiMapsMasker`. The main difference is that it requires a labels image -instead of a set of maps as input. - -The `background_label` keyword of :class:`NiftiLabelsMasker` deserves -some explanation. The voxels that correspond to the brain or a region -of interest in an fMRI image do not fill the entire -image. Consequently, in the labels image, there must be a label value that -corresponds to "outside" the brain (for which no signal should be -extracted). By default, this label is set to zero in nilearn -(refered to as "background"). -Should some non-zero value encoding be necessary, it is -possible to change the background value with the `background_label` -keyword. - -.. topic:: **Examples** - - * :ref:`sphx_glr_auto_examples_connectivity_plot_signal_extraction.py` - -:class:`NiftiMapsMasker` Usage ------------------------------- - -This atlas defines its regions using maps. The path to the corresponding -file is given in the "maps_img" argument. - -One important thing that happens transparently during the execution of -:meth:`NiftiMasker.fit_transform` is resampling. Initially, the images -and the atlas do typically not have the same shape nor the same affine. Casting -them into the same format is required for successful signal extraction -The keyword argument `resampling_target` specifies which format (i.e., -dimensions and affine) the data should be resampled to. -See the reference documentation for :class:`NiftiMapsMasker` for every -possible option. - -.. topic:: **Examples** - - * :ref:`sphx_glr_auto_examples_connectivity_plot_probabilistic_atlas_extraction.py` - -Extraction of signals from seeds:\ :class:`NiftiSpheresMasker`. -================================================================== - -The purpose of :class:`NiftiSpheresMasker` is to compute signals from -seeds containing voxels in spheres. It makes it easy to get these signals once -you have a list of coordinates. -A single seed is a sphere defined by the radius (in millimeters) and the -coordinates (typically MNI or TAL) of its center. - -Using :class:`NiftiSpheresMasker` needs to define a list of coordinates. -"seeds" argument takes a list of 3D coordinates (tuples) of the spheres centers, -they should be in the same space as the images. -Seeds can overlap spatially and are represented in a binary present-nonpresent -coding (no weighting). -Below is an example of a coordinates list of four seeds from the default mode network:: - - >>> dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (0, 50, -5)] - -"radius" is an optional argument that takes a real value in millimeters. -If no value is given for the "radius" argument, the single voxel at the given -seed position is used. - -.. topic:: **Examples** - - * :ref:`sphx_glr_auto_examples_connectivity_plot_adhd_spheres.py` diff --git a/doc/manipulating_visualizing/index.rst b/doc/manipulating_visualizing/index.rst deleted file mode 100644 index e06891257e..0000000000 --- a/doc/manipulating_visualizing/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. include:: ../tune_toc.rst - - -.. _manipulation_visualization: - -============================================ -Image manipulation and visualization -============================================ - -In this section, we detail the general tools to manipulation and -visualize neuroimaging volume with nilearn. - -| - -.. include:: ../includes/big_toc_css.rst - - -.. toctree:: - - plotting.rst - data_preparation.rst - manipulating_images.rst - diff --git a/doc/manipulating_visualizing/manipulating_images.rst b/doc/manipulating_visualizing/manipulating_images.rst deleted file mode 100644 index 476c6b5b28..0000000000 --- a/doc/manipulating_visualizing/manipulating_images.rst +++ /dev/null @@ -1,453 +0,0 @@ -.. _data_manipulation: - -===================================================================== -Manipulating brain volume: input/output, masking, ROIs, smoothing... -===================================================================== - -This chapter introduces the data structure of brain images and tools to -manipulation these. - - -.. contents:: **Chapters contents** - :local: - :depth: 1 - - - -.. _loading_data: - -Loading data -============ - -.. currentmodule:: nilearn.datasets - -.. _datasets: - -Fetching open datasets ----------------------- - -The nilearn package provides a dataset fetching utility that -automatically downloads reference -datasets and atlases. Dataset fetching functions can be imported from -:mod:`nilearn.datasets`:: - - >>> from nilearn import datasets - >>> haxby_files = datasets.fetch_haxby(n_subjects=1) # doctest: +SKIP - -They return a data structure that contains different pieces of -information on the retrieved dataset, including the -file names on hard disk:: - - >>> # The different files - >>> print(sorted(list(haxby_files.keys()))) # doctest: +SKIP - ['anat', 'description', 'func', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target'] - >>> # Path to first functional file - >>> print(haxby_files.func[0]) # doctest: +ELLIPSIS +SKIP - /.../nilearn_data/haxby2001/subj1/bold.nii.gz - >>> # Provide information on the dataset - >>> print(haxby_files.description) # doctest: +ELLIPSIS +SKIP - Haxby 2001 results - - - Notes - ----- - Results from a classical fMRI study that... - -| - -Explanation and further resources of the dataset at hand can be -retrieved as follows: - - >>> print haxby_dataset['description'] # doctest: +SKIP - -For a list of all the data fetching functions in nilearn, see :ref:`datasets_ref`. - -Besides convenient downloading of openly accessible reference datasets -including important meta-data (e.g., stimulus characteristics and -participant information for confound removal), the fetching functions -perform data downloads only once and return the locally saved data upon -any later function calls. -The locally stored data can be found in one of the -following directories (in order of priority, if present): - - * default system paths used by third party software that may already - provide the data (e.g., the Harvard-Oxford atlas - is provided by the FSL software suite) - * the folder specified by `data_dir` parameter in the fetching function - * the global environment variable `NILEARN_SHARED_DATA` - * the user environment variable `NILEARN_DATA` - * the `nilearn_data` folder in the user home folder - -Two different environment variables are provided to distinguish a global dataset -repository that may be read-only at the user-level. -Note that you can copy that folder to another user's computers to avoid -the initial dataset download on the first fetching call. - - -Loading your own data ---------------------- - -Using your own data images in nilearn is as simple as creating a list of -file name strings :: - - # dataset folder contains subject1.nii and subject2.nii - my_data = ['dataset/subject1.nii', 'dataset/subject2.nii'] - -Nilearn also provides a "wildcard" pattern to list many files with one -expression: - -:: - - >>> # dataset folder contains subject_01.nii to subject_03.nii - >>> # dataset/subject_*.nii is a glob expression matching all filenames. - >>> # Example with a smoothing process: - >>> from nilearn.image import smooth_img - >>> result_img = smooth_img("dataset/subject_*") # doctest: +SKIP - -.. topic:: **Python globbing** - - For more complicated use cases, Python also provides functions to work - with file paths, in particular, :func:`glob.glob`. - - .. warning:: - - Unlike nilearn's path expansion, the result of :func:`glob.glob` is - not sorted and depending on the computer you are running they - might not be in alphabetic order. We advise you to rely on - nilearn's path expansion. - -Understanding neuroimaging data -=============================== - -Nifti and Analyze files ------------------------ - -.. topic:: **NIfTI and Analyze file structures** - - `NifTi `_ files (or Analyze files) are - the standard way of sharing data in neuroimaging research. - Three main components are: - - :data: - raw scans in form of a numpy array: ``data = img.get_data()`` - :affine: - returns the transformation matrix that maps - from voxel indices of the numpy array to actual real-world - locations of the brain: - ``affine = img.get_affine()`` - :header: - low-level informations about the data (slice duration, etc.): - ``header = img.get_header()`` - - -Neuroimaging data can be loaded in a simple way thanks to nibabel_. -A Nifti file on disk can be loaded with a single line. - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_visualization.py - :start-after: # Fetch data - :end-before: # Visualization - -.. topic:: **Dataset formatting: data shape** - - It is important to appreciate two main representations for - storing and accessing more than one Nifti images, that is sets - of MRI scans: - - - a big 4D matrix representing (3D MRI + 1D for time), stored in a single - Nifti file. - `FSL `_ users tend to - prefer this format. - - several 3D matrices representing each time point (single 3D volume) of the - session, stored in set of 3D Nifti or analyse files. - `SPM `_ users tend - to prefer this format. - -.. _niimg: - -Niimg-like objects -------------------- - -As a baseline, nilearn functions take as input argument what we call -"Niimg-like objects": - -**Niimg:** A Niimg-like object can be one of the following: - - * A string variable with a file path to a Nifti or Analyse image - * Any object exposing ``get_data()`` and ``get_affine()`` methods, typically - a ``Nifti1Image`` from nibabel_. - -**Niimg-4D:** Similarly, some functions require 4D Nifti-like -data, which we call Niimgs or Niimg-4D. Accepted input arguments are: - - * A path to a 4D Nifti image - * List of paths to 3D Nifti images - * 4D Nifti-like object - * List of 3D Nifti-like objects - -.. note:: **Image affines** - - If you provide a sequence of Nifti images, all of them must have the same - affine. - -Text files: phenotype or behavior ----------------------------------- - -Phenotypic or behavioral data are often provided as text or CSV -(Comma Separated Values) file. They -can be loaded with `numpy.genfromtxt` but you may have to specify some options -(typically `skip_header` ignores column titles if needed). - -For the Haxby datasets, we can load the categories of the images -presented to the subject:: - - >>> from nilearn import datasets - >>> haxby_dataset = datasets.fetch_haxby(n_subjects=1) # doctest: +SKIP - >>> import numpy as np - >>> labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ") # doctest: +SKIP - >>> stimuli = labels['labels'] # doctest: +SKIP - >>> print(np.unique(stimuli)) # doctest: +SKIP - ['bottle' 'cat' 'chair' 'face' 'house' 'rest' 'scissors' 'scrambledpix' - 'shoe'] - -| - -Masking data manually -===================== - -Extracting a brain mask ------------------------- - -If we do not have a spatial mask of the target regions, a brain mask -can be easily extracted from the fMRI data by the -:func:`nilearn.masking.compute_epi_mask` function: - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_visualization_002.png - :target: ../auto_examples/manipulating_visualizing/plot_visualization.html - :align: right - :scale: 50% - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_visualization.py - :start-after: # Extracting a brain mask - :end-before: # Applying the mask to extract the corresponding time series - - -.. _mask_4d_2_3d: - -From 4D Nifti images to 2D data arrays --------------------------------------- - -fMRI data is usually represented as a 4D block of data: 3 spatial -dimensions and one time dimension. In practice, we are usually -interested in working on the voxel time-series in the -brain. It is thus convenient to apply a brain mask in order to convert the -4D brain images representation into a restructured 2D data representation, -`voxel` **x** `time`, as depicted below: - -.. image:: ../images/masking.jpg - :align: center - :width: 100% - - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_visualization.py - :start-after: # Applying the mask to extract the corresponding time series - :end-before: # Find voxels of interest - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_visualization_003.png - :target: ../auto_examples/manipulating_visualizing/plot_visualization.html - :align: center - :scale: 50 - -.. _preprocessing_functions: - -Functions for data preparation steps -===================================== - -.. currentmodule:: nilearn.input_data - -The :class:`NiftiMasker` can automatically perform important data preparation -steps. These steps are also available as independent functions if you want to -set up your own data preparation procedure: - -.. currentmodule:: nilearn - -* Resampling: :func:`nilearn.image.resample_img`. See the example - :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_affine_transformation.py` to - see the effect of affine transforms on data and bounding boxes. -* Computing the mean of images (along the time/4th dimension): - :func:`nilearn.image.mean_img` -* Swapping voxels of both hemisphere (e.g., useful to homogenize masks - inter-hemispherically): - :func:`nilearn.image.swap_img_hemispheres` -* Smoothing: :func:`nilearn.image.smooth_img` -* Masking: - - * compute from EPI images: :func:`nilearn.masking.compute_epi_mask` - * compute from images with a flat background: - :func:`nilearn.masking.compute_background_mask` - * compute for multiple sessions/subjects: - :func:`nilearn.masking.compute_multi_epi_mask` - :func:`nilearn.masking.compute_multi_background_mask` - * apply: :func:`nilearn.masking.apply_mask` - * intersect several masks (useful for multi sessions/subjects): :func:`nilearn.masking.intersect_masks` - * unmasking: :func:`nilearn.masking.unmask` - -* Cleaning signals (e.g., linear detrending, standardization, - confound removal, low/high pass filtering): :func:`nilearn.signal.clean` - - -Image operations: creating a ROI mask manually -=============================================== - -This section shows manual steps to create and further modify a ROI -(region of interest) spatial mask. They represent a means for "data folding", -that is, extracting and later analyzing data from a subset of voxels rather -than the entire brain images. As a convenient side effect, this can help -alleviate the curse of dimensionality (i.e., statistical problems that -arise in the context of high-dimensional input variables). - -Smoothing ---------- - -Functional MRI data have a low signal-to-noise ratio (yet much better -than EEG or MEG measurements). -When using simple methods -that are not robust to noise, it is useful to apply a spatial filtering -kernel on the data. Such data smoothing is -usually applied using a Gaussian function with 4mm to 12mm full-width at -half-maximum (this is where the FWHM comes from). -The function :func:`nilearn.image.smooth_img` accounts for potential -anisotropy in the image affine (i.e., non-identical voxel size in all -the three dimensions). Analogous to the majority of nilearn functions, -it can also use file names as input parameters. - - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py - :start-after: # Smooth the data - :end-before: # Run a T-test for face and houses - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_001.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html - :align: center - :scale: 50% - -Selecting features ------------------- - -Functional MRI data can be considered "high dimensional" given the -p-versus-n ratio (e.g., p=~50,000-200,000 voxels for n=1000 samples). -In this setting, machine-learning -algorithms can perform poorly (i.e., curse-of-dimensionality problem). -However, simple means from the realms of classical statistics can help -reducing the number of voxels. - -The Student's t-test (:func:`scipy.stats.ttest_ind`) is an established -method to determine whether two -distributions are statistically different. It can be used to compare voxel -time-series from two different experimental conditions -(e.g., when houses or faces are shown to individuals during brain scanning). -If the time-series distribution is similar in the two conditions, then the -voxel is not very interesting to discriminate the condition. - -This test returns p-values that represent probabilities that the two -time-series had been drawn from the same distribution. The lower is the p-value, the -more discriminative is the voxel in distinguishing the two conditions. - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py - :start-after: # Run a T-test for face and houses - :end-before: # Build a mask from this statistical map - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_002.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html - :align: center - :scale: 50% - -This feature selection method is available in the scikit-learn Python -package, where it has been -extended to several classes, using the -:func:`sklearn.feature_selection.f_classif` function. - -Thresholding ------------- - -Voxels with better p-values are kept as voxels of interest. -Applying a threshold to an array -is easy thanks to numpy indexing à la Matlab. - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py - :start-after: # Thresholding - :end-before: # Binarization and intersection with VT mask - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_003.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html - :align: center - :scale: 50% - -Mask intersection ------------------ - -We now want to restrict our investigation to the ventral temporal area. The -corresponding spatial mask is provided in `haxby.mask_vt`. -We want to compute the -intersection of this provided mask with our self-computed mask. -The first step is to load it with -nibabel's **nibabel.load**. We can then use a logical "and" operation --- **numpy.logical_and** -- to keep only voxels -that have been selected in both masks. In neuroimaging jargon, this is -called an "AND conjunction." - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py - :start-after: # Binarization and intersection with VT mask - :end-before: # Dilation - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_004.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html - :align: center - :scale: 50% - -Mask dilation -------------- - -Tresholded functional brain images often contain scattered voxels -across the brain. -To consolidate such brain images towards more -compact shapes, we use a `morphological dilation `_. This is a common step to be sure -not to forget voxels located on the edge of a ROI. -Put differently, such operations can fill "holes" in masked voxel -representations. - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py - :start-after: # Dilation - :end-before: # Identification of connected components - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_005.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html - :align: center - :scale: 50% - -Extracting connected components -------------------------------- - -The function **scipy.ndimage.label** from the scipy Python library -identifies immediately neighboring -voxels in our voxels mask. It assigns a separate integer label to each -one of them. - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py - :start-after: # Identification of connected components - :end-before: # Use the new ROIs to extract data maps in both ROIs - -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_006.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html - :align: center - :scale: 50% - -Saving the result ------------------ - -The final voxel mask is saved using nibabel for further inspection -with a software such as FSLView. - -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py - :start-after: # save the ROI 'atlas' to a single output Nifti - -.. _nibabel: http://nipy.sourceforge.net/nibabel/ diff --git a/doc/manipulating_visualizing/plotting.rst b/doc/manipulating_visualizing/plotting.rst deleted file mode 100644 index 09227e452e..0000000000 --- a/doc/manipulating_visualizing/plotting.rst +++ /dev/null @@ -1,267 +0,0 @@ -.. _plotting: - -====================== -Plotting brain images -====================== - -Nilearn comes with plotting function to display brain maps coming from -Nifti-like images, in the :mod:`nilearn.plotting` module. - -.. currentmodule:: nilearn.plotting - -Different plotting functions -============================= - -Nilearn has a set of plotting functions to plot brain volumes that are -fined tuned to specific applications. Amongst other things, they use -different heuristics to find cutting coordinates. - -.. |plot_stat_map| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_001.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html - :scale: 50 - -.. |plot_glass_brain| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_glass_brain_extensive_001.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_glass_brain_extensive.html - :scale: 50 - -.. |plot_connectome| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html - :scale: 50 - -.. |plot_anat| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_003.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html - :scale: 50 - -.. |plot_roi| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_004.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html - :scale: 50 - -.. |plot_epi| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_005.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html - :scale: 50 - -.. |plot_prob_atlas| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_prob_atlas_003.png - :target: ../auto_examples/manipulating_visualizing/plot_prob_atlas.html - :scale: 50 - -.. A temporary hack to avoid a sphinx bug -.. |hack| raw:: html - -
- - -=================== ========================================================= -=================== ========================================================= -|plot_anat| :func:`plot_anat` - |hack| - Plotting an anatomical image - -|plot_epi| :func:`plot_epi` - |hack| - Plotting an EPI, or T2* image - -|plot_glass_brain| :func:`plot_glass_brain` - |hack| - Glass brain visualization. By default plots maximum - intensity projection of the absolute values. To plot - positive and negative values set plot_abs parameter to - False. - -|plot_stat_map| :func:`plot_stat_map` - |hack| - Plotting a statistical map, like a T-map, a Z-map, or - an ICA, with an optional background - -|plot_roi| :func:`plot_roi` - |hack| - Plotting ROIs, or a mask, with an optional background - -|plot_connectome| :func:`plot_connectome` - |hack| - Plotting a connectome - -|plot_prob_atlas| :func:`plot_prob_atlas` - |hack| - Plotting 4D probabilistic atlas maps - -**plot_img** :func:`plot_img` - |hack| - General-purpose function, with no specific presets -=================== ========================================================= - - -.. warning:: **Opening too many figures without closing** - - Each call to a plotting function creates a new figure by default. When - used in non-interactive settings, such as a script or a program, these - are not displayed, but still accumulate and eventually lead to slowing - the execution and running out of memory. - - To avoid this, you must close the plot as follow:: - - >>> from nilearn import plotting - >>> display = plotting.plot_stat_map(img) # doctest: +SKIP - >>> display.close() # doctest: +SKIP - -.. seealso:: - - :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_dim_plotting.py` - -Different display modes -======================== - -.. |plot_ortho| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_001.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_z_many| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_002.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 30 - -.. |plot_x| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_003.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_x_small| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_004.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_z_small| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_005.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_xz| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_006.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_yx| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_007.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_yz| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_008.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 50 - - -================= ========================================================= -================= ========================================================= -|plot_ortho| `display_mode='ortho', cut_coords=(36, -27, 60)` - |hack| - Ortho slicer: 3 cuts along the x, y, z directions - -|plot_z_many| `display_mode='z', cut_coords=5` - |hack| - Cutting in the z direction, specifying the number of - cuts - -|plot_x| `display_mode='x', cut_coords=(-36, 36)` - |hack| - Cutting in the x direction, specifying the exact - cuts - -|plot_x_small| `display_mode='x', cut_coords=1` - |hack| - Cutting in the x direction, with only 1 cut, that is - automatically positionned - -|plot_z_small| `display_mode='z', cut_coords=1, colorbar=False` - |hack| - Cutting in the z direction, with only 1 cut, that is - automatically positionned - -|plot_xz| `display_mode='xz', cut_coords=(36, 60)` - |hack| - Cutting in the x and z direction, with cuts manually - positionned - -|plot_yx| `display_mode='yx', cut_coords=(-27, 36)` - |hack| - Cutting in the y and x direction, with cuts manually - positionned - -|plot_yz| `display_mode='yz', cut_coords=(-27, 60)` - |hack| - Cutting in the y and z direction, with cuts manually - positionned - - -================= ========================================================= - -Adding overlays, edges and contours -==================================== - -To add overlays, contours, or edges, use the return value of the plotting -functions. Indeed, these return a display object, such as the -:class:`nilearn.plotting.displays.OrthoSlicer`. This object represents the -plot, and has methods to add overlays, contours or edge maps:: - - display = plotting.plot_epi(...) - -.. |plot_edges| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_009.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_contours| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_010.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html - :scale: 50 - -================= ========================================================= -================= ========================================================= -|plot_edges| `display.add_edges(img)` - |hack| - Add a plot of the edges of `img`, where edges are - extracted using a Canny edge-detection routine. This - is typically useful to check registration. Note that - `img` should have some visible sharp edges. Typically - an EPI img does not, but a T1 does. - -|plot_contours| `display.add_contours(img, levels=[.5], colors='r')` - |hack| - Add a plot of the contours of `img`, where contours - are computed for constant values, specified in - 'levels'. This is typically useful to outline a mask, - or ROI on top of another map. - |hack| - **Example:** :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_haxby_masks.py` - - -**add_overlay** `display.add_overlay(img, cmap=plotting.cm.purple_green, threshold=3)` - |hack| - Add a new overlay on the existing figure - |hack| - **Example:** :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_overlay.py` - - -================= ========================================================= - -Displaying or saving to an image file -===================================== - -To display the figure when running a script, you need to call -:func:`nilearn.plotting.show`: (this is just an alias to -:func:`matplotlib.pyplot.show`):: - - >>> from nilearn import plotting - >>> plotting.show() # doctest: +SKIP - -The simplest way to output an image file from the plotting functions is -to specify the `output_file` argument:: - - >>> from nilearn import plotting - >>> plotting.plot_stat_map(img, output_file='pretty_brain.png') # doctest: +SKIP - -In this case, the display is closed automatically and the plotting -function returns None. - -| - -The display object returned by the plotting function has a savefig method -that can be used to save the plot to an image file:: - - >>> from nilearn import plotting - >>> display = plotting.plot_stat_map(img) # doctest: +SKIP - >>> display.savefig('pretty_brain.png') # doctest: +SKIP - # Don't forget to close the display - >>> display.close() # doctest: +SKIP - - diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index 336d21e00c..a3b0f3856e 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -27,7 +27,7 @@ uses. .. autosummary:: :toctree: generated/ :template: class.rst - + ConnectivityMeasure GroupSparseCovariance GroupSparseCovarianceCV @@ -41,7 +41,11 @@ uses. :template: function.rst sym_to_vec + sym_matrix_to_vec + vec_to_sym_matrix group_sparse_covariance + cov_to_corr + prec_to_partial .. _datasets_ref: @@ -66,20 +70,38 @@ uses. fetch_atlas_destrieux_2009 fetch_atlas_harvard_oxford fetch_atlas_msdl - fetch_atlas_power_2011 + fetch_coords_power_2011 fetch_atlas_smith_2009 fetch_atlas_yeo_2011 fetch_atlas_aal + fetch_atlas_basc_multiscale_2015 + fetch_atlas_allen_2011 + fetch_atlas_pauli_2017 + fetch_coords_dosenbach_2010 fetch_abide_pcp fetch_adhd fetch_haxby - fetch_haxby_simple fetch_icbm152_2009 + fetch_icbm152_brain_gm_mask + fetch_localizer_button_task fetch_localizer_contrasts fetch_localizer_calculation_task fetch_miyawaki2008 fetch_nyu_rest + fetch_surf_nki_enhanced + fetch_surf_fsaverage + fetch_atlas_surf_destrieux + fetch_atlas_talairach fetch_oasis_vbm + fetch_megatrawls_netmats + fetch_cobre + fetch_neurovault + fetch_neurovault_ids + fetch_neurovault_auditory_computation_task + fetch_neurovault_motor_task + get_data_dirs + load_mni152_template + load_mni152_brain_mask .. _decoding_ref: @@ -139,14 +161,21 @@ uses. :toctree: generated/ :template: function.rst - crop_img + clean_img + concat_imgs + coord_transform copy_img + crop_img + high_variance_confounds index_img iter_img - high_variance_confounds + largest_connected_component_img + load_img + math_img mean_img new_img_like resample_img + resample_to_img reorder_img smooth_img swap_img_hemispheres @@ -198,6 +227,8 @@ uses. compute_epi_mask compute_multi_epi_mask + compute_gray_matter_mask + compute_multi_gray_matter_mask compute_background_mask compute_multi_background_mask intersect_masks @@ -220,6 +251,7 @@ uses. :template: function.rst connected_regions + connected_label_regions img_to_signals_labels signals_to_img_labels img_to_signals_maps @@ -233,7 +265,8 @@ uses. :toctree: generated/ :template: class.rst - RegionExtractor + RegionExtractor + Parcellations :mod:`nilearn.mass_univariate`: Mass-univariate analysis @@ -257,6 +290,7 @@ uses. .. _plotting_ref: + :mod:`nilearn.plotting`: Plotting brain data ================================================ @@ -276,14 +310,25 @@ uses. find_cut_slices find_xyz_cut_coords + find_parcellation_cut_coords + find_probabilistic_atlas_cut_coords plot_anat plot_img plot_epi + plot_matrix plot_roi plot_stat_map plot_glass_brain plot_connectome plot_prob_atlas + plot_surf + plot_surf_roi + plot_surf_stat_map + view_surf + view_img_on_surf + view_connectome + view_markers + view_stat_map show **Classes**: @@ -320,4 +365,23 @@ uses. high_variance_confounds +:mod:`nilearn.surface`: Manipulating surface data +=================================================== + +.. automodule:: nilearn.surface + :no-members: + :no-inherited-members: + +.. No relevant user manual section yet. + +**Functions**: + +.. currentmodule:: nilearn.surface + +.. autosummary:: + :toctree: generated/ + :template: function.rst + load_surf_data + load_surf_mesh + vol_to_surf diff --git a/doc/plotting/index.rst b/doc/plotting/index.rst new file mode 100644 index 0000000000..3684fa9ad6 --- /dev/null +++ b/doc/plotting/index.rst @@ -0,0 +1,506 @@ +.. _plotting: + +====================== +Plotting brain images +====================== + +In this section, we detail the general tools to visualize +neuroimaging volumes with nilearn. + +Nilearn comes with plotting function to display brain maps coming from +Nifti-like images, in the :mod:`nilearn.plotting` module. + +.. contents:: **Contents** + :local: + :depth: 1 + +.. topic:: **Code examples** + + Nilearn has a whole section of the example gallery on plotting. + + A small tour of the plotting functions can be found in the example + :ref:`sphx_glr_auto_examples_01_plotting_plot_demo_plotting.py`. + + Finally, note that, as always in the nilearn documentation, clicking + on a figure will take you to the code that generates it. + +.. currentmodule:: nilearn.plotting + +Different plotting functions +============================= + +Nilearn has a set of plotting functions to plot brain volumes that are +fined tuned to specific applications. Amongst other things, they use +different heuristics to find cutting coordinates. + +.. |plot_stat_map| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_001.png + :target: ../auto_examples/01_plotting/plot_demo_plotting.html + :scale: 50 + +.. |plot_glass_brain| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_extensive_001.png + :target: ../auto_examples/01_plotting/plot_demo_glass_brain_extensive.html + :scale: 50 + +.. |plot_connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_adhd_spheres_003.png + :target: ../auto_examples/03_connectivity/plot_adhd_spheres.html + :scale: 50 + +.. |plot_anat| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_003.png + :target: ../auto_examples/01_plotting/plot_demo_plotting.html + :scale: 50 + +.. |plot_roi| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_004.png + :target: ../auto_examples/01_plotting/plot_demo_plotting.html + :scale: 50 + +.. |plot_epi| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_005.png + :target: ../auto_examples/01_plotting/plot_demo_plotting.html + :scale: 50 + +.. |plot_prob_atlas| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_prob_atlas_003.png + :target: ../auto_examples/01_plotting/plot_prob_atlas.html + :scale: 50 + +.. A temporary hack to avoid a sphinx bug +.. |hack| raw:: html + +
+ + +=================== ========================================================= +=================== ========================================================= +|plot_anat| :func:`plot_anat` + |hack| + Plotting an anatomical image + +|plot_epi| :func:`plot_epi` + |hack| + Plotting an EPI, or T2* image + +|plot_glass_brain| :func:`plot_glass_brain` + |hack| + Glass brain visualization. By default plots maximum + intensity projection of the absolute values. To plot + positive and negative values set plot_abs parameter to + False. + +|plot_stat_map| :func:`plot_stat_map` + |hack| + Plotting a statistical map, like a T-map, a Z-map, or + an ICA, with an optional background + +|plot_roi| :func:`plot_roi` + |hack| + Plotting ROIs, or a mask, with an optional background + +|plot_connectome| :func:`plot_connectome` + |hack| + Plotting a connectome + + Functions for automatic extraction of coords based on + brain parcellations useful for :func:`plot_connectome` + are demonstrated in + **Example:** :ref:`sphx_glr_auto_examples_03_connectivity_plot_atlas_comparison.py` + +|plot_prob_atlas| :func:`plot_prob_atlas` + |hack| + Plotting 4D probabilistic atlas maps + +**plot_img** :func:`plot_img` + |hack| + General-purpose function, with no specific presets +=================== ========================================================= + + +.. warning:: **Opening too many figures without closing** + + Each call to a plotting function creates a new figure by default. When + used in non-interactive settings, such as a script or a program, these + are not displayed, but still accumulate and eventually lead to slowing + the execution and running out of memory. + + To avoid this, you must close the plot as follow:: + + >>> from nilearn import plotting + >>> display = plotting.plot_stat_map(img) # doctest: +SKIP + >>> display.close() # doctest: +SKIP + +| + +.. seealso:: + + :ref:`sphx_glr_auto_examples_01_plotting_plot_dim_plotting.py` + +Different display modes +======================== + +.. |plot_ortho| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_001.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_z_many| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_002.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 30 + +.. |plot_x| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_003.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_y_small| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_004.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_z_small| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_005.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_xz| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_006.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_yx| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_007.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_yz| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_008.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_lzr| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_extensive_006.png + :target: ../auto_examples/01_plotting/plot_demo_glass_brain_extensive.html + :scale: 50 + +.. |plot_lyrz| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_extensive_007.png + :target: ../auto_examples/01_plotting/plot_demo_glass_brain_extensive.html + :scale: 50 + + +================= ========================================================= +================= ========================================================= +|plot_ortho| `display_mode='ortho', cut_coords=[36, -27, 60]` + |hack| + Ortho slicer: 3 cuts along the x, y, z directions + +|plot_z_many| `display_mode='z', cut_coords=5` + |hack| + Cutting in the z direction, specifying the number of + cuts + +|plot_x| `display_mode='x', cut_coords=[-36, 36]` + |hack| + Cutting in the x direction, specifying the exact + cuts + +|plot_y_small| `display_mode='y', cut_coords=1` + |hack| + Cutting in the y direction, with only 1 cut, that is + automatically positionned + +|plot_z_small| `display_mode='z', cut_coords=1, colorbar=False` + |hack| + Cutting in the z direction, with only 1 cut, that is + automatically positionned + +|plot_xz| `display_mode='xz', cut_coords=[36, 60]` + |hack| + Cutting in the x and z direction, with cuts manually + positionned + +|plot_yx| `display_mode='yx', cut_coords=[-27, 36]` + |hack| + Cutting in the y and x direction, with cuts manually + positionned + +|plot_yz| `display_mode='yz', cut_coords=[-27, 60]` + |hack| + Cutting in the y and z direction, with cuts manually + positionned + +|plot_lzr| `Glass brain display_mode='lzr'` + |hack| + Glass brain and Connectome provide additional display modes + due to the possibility of doing hemispheric projections. + Check out: 'l', 'r', 'lr', 'lzr', 'lyr', 'lzry', 'lyrz'. + +|plot_lyrz| `Glass brain display_mode='lyrz'` + |hack| + Glass brain and Connectome provide additional display modes + due to the possibility of doing hemispheric projections. + Check out: 'l', 'r', 'lr', 'lzr', 'lyr', 'lzry', 'lyrz'. + + +================= ========================================================= + +Available Colormaps +=================== + +Nilearn plotting library ships with a set of extra colormaps, as seen in the +image below + +.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_colormaps_001.png + :target: ../auto_examples/01_plotting/plot_colormaps.html + :scale: 50 + +These colormaps can be used as any other matplotlib colormap. + +.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_colormaps_002.png + :target: ../auto_examples/01_plotting/plot_colormaps.html + :scale: 50 + + +.. _display_modules: + +Adding overlays, edges, contours, contour fillings and markers +============================================================== + +To add overlays, contours, or edges, use the return value of the plotting +functions. Indeed, these return a display object, such as the +:class:`nilearn.plotting.displays.OrthoSlicer`. This object represents the +plot, and has methods to add overlays, contours or edge maps:: + + display = plotting.plot_epi(...) + +.. |plot_edges| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_009.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_contours| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_010.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_fill| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_011.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_markers| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_012.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_overlay| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_overlay_002.png + :target: ../auto_examples/01_plotting/plot_overlay.html + :scale: 50 + +================= ========================================================= +================= ========================================================= +|plot_edges| `display.add_edges(img)` + |hack| + Add a plot of the edges of `img`, where edges are + extracted using a Canny edge-detection routine. This + is typically useful to check registration. Note that + `img` should have some visible sharp edges. Typically + an EPI img does not, but a T1 does. + +|plot_contours| `display.add_contours(img, levels=[.5], colors='r')` + |hack| + Add a plot of the contours of `img`, where contours + are computed for constant values, specified in + 'levels'. This is typically useful to outline a mask, + or ROI on top of another map. + |hack| + **Example:** :ref:`sphx_glr_auto_examples_01_plotting_plot_haxby_masks.py` + +|plot_fill| `display.add_contours(img, filled=True, alpha=0.7, levels=[0.5], colors='b')` + |hack| + Add a plot of `img` with contours filled with colors + +|plot_overlay| `display.add_overlay(img, cmap=plotting.cm.purple_green, threshold=3)` + |hack| + Add a new overlay on the existing figure + |hack| + **Example:** :ref:`sphx_glr_auto_examples_01_plotting_plot_overlay.py` + +|plot_markers| `display.add_markers(coords, marker_color='y', marker_size=100)` + |hack| + Add seed based MNI coordinates as spheres on top of + statistical image or EPI image. This is useful for seed + based regions specific interpretation of brain images. + |hack| + **Example:** :ref:`sphx_glr_auto_examples_03_connectivity_plot_seed_to_voxel_correlation.py` + +================= ========================================================= + +Displaying or saving to an image file +===================================== + +To display the figure when running a script, you need to call +:func:`nilearn.plotting.show`: (this is just an alias to +:func:`matplotlib.pyplot.show`):: + + >>> from nilearn import plotting + >>> plotting.show() # doctest: +SKIP + +The simplest way to output an image file from the plotting functions is +to specify the `output_file` argument:: + + >>> from nilearn import plotting + >>> plotting.plot_stat_map(img, output_file='pretty_brain.png') # doctest: +SKIP + +In this case, the display is closed automatically and the plotting +function returns None. + +| + +The display object returned by the plotting function has a savefig method +that can be used to save the plot to an image file:: + + >>> from nilearn import plotting + >>> display = plotting.plot_stat_map(img) # doctest: +SKIP + >>> display.savefig('pretty_brain.png') # doctest: +SKIP + # Don't forget to close the display + >>> display.close() # doctest: +SKIP + +.. _surface-plotting: + +Surface plotting +================ + +Plotting functions required to plot surface data or statistical maps +on a brain surface. + +.. versionadded:: 0.3 + +.. |plot_surf_roi| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_surf_atlas_001.png + :target: ../auto_examples/01_plotting/plot_surf_atlas.html + :scale: 50 + +.. |plot_surf_stat_map| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_surf_stat_map_001.png + :target: ../auto_examples/01_plotting/plot_surf_stat_map.html + :scale: 50 + +===================== =================================================================== +===================== =================================================================== +|plot_surf_roi| :func:`plot_surf_roi` + |hack| + Plotting surface atlases on a brain surface + |hack| + **Example:** + :ref:`sphx_glr_auto_examples_01_plotting_plot_surf_atlas.py` + +|plot_surf_stat_map| :func:`plot_surf_stat_map` + |hack| + Plotting statistical maps onto a brain surface + |hack| + **Example:** + :ref:`sphx_glr_auto_examples_01_plotting_plot_surf_stat_map.py` + +===================== =================================================================== + + +.. _interactive-plotting: + +Interactive plots +================= + +Nilearn also has functions for making interactive plots that can be +seen in a web browser. + +.. versionadded:: 0.5 + + Interactive plotting is new in nilearn 0.5 + +For 3D surface plots of statistical maps or surface atlases, use +:func:`view_img_on_surf` and :func:`view_surf`. Both produce a 3D plot on the +cortical surface. The difference is that :func:`view_surf` takes as input a +surface map and a cortical mesh, whereas :func:`view_img_on_surf` takes as input +a volume statistical map, and projects it on the cortical surface before making +the plot. + +For 3D plots of a connectome, use :func:`view_connectome`. To see only markers, +use :func:`view_markers`. + + +.. _interactive-surface-plotting: + +3D Plots of statistical maps or atlases on the cortical surface +--------------------------------------------------------------- + +:func:`view_img_on_surf`: Surface plot using a 3D statistical map:: + + >>> from nilearn import plotting, datasets # doctest: +SKIP + >>> img = datasets.fetch_localizer_button_task()['tmaps'][0] # doctest: +SKIP + >>> view = plotting.view_img_on_surf(img, threshold='90%', surf_mesh='fsaverage') # doctest: +SKIP + +If you are running a notebook, displaying ``view`` will embed an interactive +plot (this is the case for all interactive plots produced by nilearn's "view" +functions): + +.. image:: ../images/plotly_surface_plot_notebook_screenshot.png + +If you are not using a notebook, you can open the plot in a browser like this:: + + >>> view.open_in_browser() # doctest: +SKIP + +This will open this 3D plot in your web browser: + +.. image:: ../images/plotly_surface_plot.png + + +Or you can save it to an html file:: + + >>> view.save_as_html("surface_plot.html") # doctest: +SKIP + + +:func:`view_surf`: Surface plot using a surface map and a cortical mesh:: + + >>> from nilearn import plotting, datasets # doctest: +SKIP + >>> destrieux = datasets.fetch_atlas_surf_destrieux() # doctest: +SKIP + >>> fsaverage = datasets.fetch_surf_fsaverage() # doctest: +SKIP + >>> view = plotting.view_surf(fsaverage['infl_left'], destrieux['map_left'], # doctest: +SKIP + ... cmap='gist_ncar', symmetric_cmap=False) # doctest: +SKIP + ... + >>> view.open_in_browser() # doctest: +SKIP + + +.. image:: ../images/plotly_surface_atlas_plot.png + +.. _interactive-connectome-plotting: + +3D Plots of connectomes +----------------------- + +:func:`view_connectome`: 3D plot of a connectome:: + + >>> view = plotting.view_connectome(correlation_matrix, coords, threshold='90%') # doctest: +SKIP + >>> view.open_in_browser() # doctest: +SKIP + + +.. image:: ../images/plotly_connectome_plot.png + + +.. _interactive-markers-plotting: + +3D Plots of markers +------------------- + +:func:`view_markers`: showing markers (e.g. seed locations) in 3D:: + + >>> from nilearn import plotting # doctest: +SKIP + >>> dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)] # doctest: +SKIP + >>> view = plotting.view_markers( # doctest: +SKIP + >>> dmn_coords, ['red', 'cyan', 'magenta', 'orange'], marker_size=10) # doctest: +SKIP + >>> view.open_in_browser() # doctest: +SKIP + + + +.. image:: ../images/plotly_markers_plot.png + + +.. _interactive-stat-map-plotting: + +Interactive visualization of statistical map slices +--------------------------------------------------- + +:func:`view_stat_map`: open stat map in a Papaya viewer (https://github.com/rii-mango/Papaya):: + + >>> from nilearn import plotting, datasets # doctest: +SKIP + >>> img = datasets.fetch_localizer_button_task()['tmaps'][0] # doctest: +SKIP + >>> view = plotting.view_stat_map(img, threshold=2, vmax=4) # doctest: +SKIP + +in a Jupyter notebook, you can view the image like this: + +.. image:: ../images/papaya_stat_map_plot_screenshot_notebook.png + +Or you can open a viewer in your web browser if you are not in the +notebook:: + + >>> view.open_in_browser() # doctest: +SKIP diff --git a/doc/sphinxext/numpydoc/__init__.py b/doc/sphinxext/numpydoc/__init__.py new file mode 100644 index 0000000000..0fce2cf747 --- /dev/null +++ b/doc/sphinxext/numpydoc/__init__.py @@ -0,0 +1,3 @@ +from __future__ import division, absolute_import, print_function + +from .numpydoc import setup diff --git a/doc/sphinxext/numpydoc/comment_eater.py b/doc/sphinxext/numpydoc/comment_eater.py new file mode 100644 index 0000000000..8cddd3305f --- /dev/null +++ b/doc/sphinxext/numpydoc/comment_eater.py @@ -0,0 +1,169 @@ +from __future__ import division, absolute_import, print_function + +import sys +if sys.version_info[0] >= 3: + from io import StringIO +else: + from io import StringIO + +import compiler +import inspect +import textwrap +import tokenize + +from .compiler_unparse import unparse + + +class Comment(object): + """ A comment block. + """ + is_comment = True + def __init__(self, start_lineno, end_lineno, text): + # int : The first line number in the block. 1-indexed. + self.start_lineno = start_lineno + # int : The last line number. Inclusive! + self.end_lineno = end_lineno + # str : The text block including '#' character but not any leading spaces. + self.text = text + + def add(self, string, start, end, line): + """ Add a new comment line. + """ + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + self.text += string + + def __repr__(self): + return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno, self.text) + + +class NonComment(object): + """ A non-comment block of code. + """ + is_comment = False + def __init__(self, start_lineno, end_lineno): + self.start_lineno = start_lineno + self.end_lineno = end_lineno + + def add(self, string, start, end, line): + """ Add lines to the block. + """ + if string.strip(): + # Only add if not entirely whitespace. + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + + def __repr__(self): + return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno) + + +class CommentBlocker(object): + """ Pull out contiguous comment blocks. + """ + def __init__(self): + # Start with a dummy. + self.current_block = NonComment(0, 0) + + # All of the blocks seen so far. + self.blocks = [] + + # The index mapping lines of code to their associated comment blocks. + self.index = {} + + def process_file(self, file): + """ Process a file object. + """ + if sys.version_info[0] >= 3: + nxt = file.__next__ + else: + nxt = file.next + for token in tokenize.generate_tokens(nxt): + self.process_token(*token) + self.make_index() + + def process_token(self, kind, string, start, end, line): + """ Process a single token. + """ + if self.current_block.is_comment: + if kind == tokenize.COMMENT: + self.current_block.add(string, start, end, line) + else: + self.new_noncomment(start[0], end[0]) + else: + if kind == tokenize.COMMENT: + self.new_comment(string, start, end, line) + else: + self.current_block.add(string, start, end, line) + + def new_noncomment(self, start_lineno, end_lineno): + """ We are transitioning from a noncomment to a comment. + """ + block = NonComment(start_lineno, end_lineno) + self.blocks.append(block) + self.current_block = block + + def new_comment(self, string, start, end, line): + """ Possibly add a new comment. + + Only adds a new comment if this comment is the only thing on the line. + Otherwise, it extends the noncomment block. + """ + prefix = line[:start[1]] + if prefix.strip(): + # Oops! Trailing comment, not a comment block. + self.current_block.add(string, start, end, line) + else: + # A comment block. + block = Comment(start[0], end[0], string) + self.blocks.append(block) + self.current_block = block + + def make_index(self): + """ Make the index mapping lines of actual code to their associated + prefix comments. + """ + for prev, block in zip(self.blocks[:-1], self.blocks[1:]): + if not block.is_comment: + self.index[block.start_lineno] = prev + + def search_for_comment(self, lineno, default=None): + """ Find the comment block just before the given line number. + + Returns None (or the specified default) if there is no such block. + """ + if not self.index: + self.make_index() + block = self.index.get(lineno, None) + text = getattr(block, 'text', default) + return text + + +def strip_comment_marker(text): + """ Strip # markers at the front of a block of comment text. + """ + lines = [] + for line in text.splitlines(): + lines.append(line.lstrip('#')) + text = textwrap.dedent('\n'.join(lines)) + return text + + +def get_class_traits(klass): + """ Yield all of the documentation for trait definitions on a class object. + """ + # FIXME: gracefully handle errors here or in the caller? + source = inspect.getsource(klass) + cb = CommentBlocker() + cb.process_file(StringIO(source)) + mod_ast = compiler.parse(source) + class_ast = mod_ast.node.nodes[0] + for node in class_ast.code.nodes: + # FIXME: handle other kinds of assignments? + if isinstance(node, compiler.ast.Assign): + name = node.nodes[0].name + rhs = unparse(node.expr).strip() + doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) + yield name, rhs, doc + diff --git a/doc/sphinxext/numpydoc/compiler_unparse.py b/doc/sphinxext/numpydoc/compiler_unparse.py new file mode 100644 index 0000000000..8933a83db3 --- /dev/null +++ b/doc/sphinxext/numpydoc/compiler_unparse.py @@ -0,0 +1,865 @@ +""" Turn compiler.ast structures back into executable python code. + + The unparse method takes a compiler.ast tree and transforms it back into + valid python code. It is incomplete and currently only works for + import statements, function calls, function definitions, assignments, and + basic expressions. + + Inspired by python-2.5-svn/Demo/parser/unparse.py + + fixme: We may want to move to using _ast trees because the compiler for + them is about 6 times faster than compiler.compile. +""" +from __future__ import division, absolute_import, print_function + +import sys +from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +def unparse(ast, single_line_functions=False): + s = StringIO() + UnparseCompilerAst(ast, s, single_line_functions) + return s.getvalue().lstrip() + +op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, + 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } + +class UnparseCompilerAst: + """ Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarged. + """ + + ######################################################################### + # object interface. + ######################################################################### + + def __init__(self, tree, file = sys.stdout, single_line_functions=False): + """ Unparser(tree, file=sys.stdout) -> None. + + Print the source for tree to file. + """ + self.f = file + self._single_func = single_line_functions + self._do_indent = True + self._indent = 0 + self._dispatch(tree) + self._write("\n") + self.f.flush() + + ######################################################################### + # Unparser private interface. + ######################################################################### + + ### format, output, and dispatch methods ################################ + + def _fill(self, text = ""): + "Indent a piece of text, according to the current indentation level" + if self._do_indent: + self._write("\n"+" "*self._indent + text) + else: + self._write(text) + + def _write(self, text): + "Append a piece of text to the current line." + self.f.write(text) + + def _enter(self): + "Print ':', and increase the indentation." + self._write(": ") + self._indent += 1 + + def _leave(self): + "Decrease the indentation level." + self._indent -= 1 + + def _dispatch(self, tree): + "_dispatcher function, _dispatching tree type T to method _T." + if isinstance(tree, list): + for t in tree: + self._dispatch(t) + return + meth = getattr(self, "_"+tree.__class__.__name__) + if tree.__class__.__name__ == 'NoneType' and not self._do_indent: + return + meth(tree) + + + ######################################################################### + # compiler.ast unparsing methods. + # + # There should be one method per concrete grammar type. They are + # organized in alphabetical order. + ######################################################################### + + def _Add(self, t): + self.__binary_op(t, '+') + + def _And(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") and (") + self._write(")") + + def _AssAttr(self, t): + """ Handle assigning an attribute of an object + """ + self._dispatch(t.expr) + self._write('.'+t.attrname) + + def _Assign(self, t): + """ Expression Assignment such as "a = 1". + + This only handles assignment in expressions. Keyword assignment + is handled separately. + """ + self._fill() + for target in t.nodes: + self._dispatch(target) + self._write(" = ") + self._dispatch(t.expr) + if not self._do_indent: + self._write('; ') + + def _AssName(self, t): + """ Name on left hand side of expression. + + Treat just like a name on the right side of an expression. + """ + self._Name(t) + + def _AssTuple(self, t): + """ Tuple on left hand side of an expression. + """ + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + def _AugAssign(self, t): + """ +=,-=,*=,/=,**=, etc. operations + """ + + self._fill() + self._dispatch(t.node) + self._write(' '+t.op+' ') + self._dispatch(t.expr) + if not self._do_indent: + self._write(';') + + def _Bitand(self, t): + """ Bit and operation. + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" & ") + + def _Bitor(self, t): + """ Bit or operation + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" | ") + + def _CallFunc(self, t): + """ Function call. + """ + self._dispatch(t.node) + self._write("(") + comma = False + for e in t.args: + if comma: self._write(", ") + else: comma = True + self._dispatch(e) + if t.star_args: + if comma: self._write(", ") + else: comma = True + self._write("*") + self._dispatch(t.star_args) + if t.dstar_args: + if comma: self._write(", ") + else: comma = True + self._write("**") + self._dispatch(t.dstar_args) + self._write(")") + + def _Compare(self, t): + self._dispatch(t.expr) + for op, expr in t.ops: + self._write(" " + op + " ") + self._dispatch(expr) + + def _Const(self, t): + """ A constant value such as an integer value, 3, or a string, "hello". + """ + self._dispatch(t.value) + + def _Decorators(self, t): + """ Handle function decorators (eg. @has_units) + """ + for node in t.nodes: + self._dispatch(node) + + def _Dict(self, t): + self._write("{") + for i, (k, v) in enumerate(t.items): + self._dispatch(k) + self._write(": ") + self._dispatch(v) + if i < len(t.items)-1: + self._write(", ") + self._write("}") + + def _Discard(self, t): + """ Node for when return value is ignored such as in "foo(a)". + """ + self._fill() + self._dispatch(t.expr) + + def _Div(self, t): + self.__binary_op(t, '/') + + def _Ellipsis(self, t): + self._write("...") + + def _From(self, t): + """ Handle "from xyz import foo, bar as baz". + """ + # fixme: Are From and ImportFrom handled differently? + self._fill("from ") + self._write(t.modname) + self._write(" import ") + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Function(self, t): + """ Handle function definitions + """ + if t.decorators is not None: + self._fill("@") + self._dispatch(t.decorators) + self._fill("def "+t.name + "(") + defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) + for i, arg in enumerate(zip(t.argnames, defaults)): + self._write(arg[0]) + if arg[1] is not None: + self._write('=') + self._dispatch(arg[1]) + if i < len(t.argnames)-1: + self._write(', ') + self._write(")") + if self._single_func: + self._do_indent = False + self._enter() + self._dispatch(t.code) + self._leave() + self._do_indent = True + + def _Getattr(self, t): + """ Handle getting an attribute of an object + """ + if isinstance(t.expr, (Div, Mul, Sub, Add)): + self._write('(') + self._dispatch(t.expr) + self._write(')') + else: + self._dispatch(t.expr) + + self._write('.'+t.attrname) + + def _If(self, t): + self._fill() + + for i, (compare,code) in enumerate(t.tests): + if i == 0: + self._write("if ") + else: + self._write("elif ") + self._dispatch(compare) + self._enter() + self._fill() + self._dispatch(code) + self._leave() + self._write("\n") + + if t.else_ is not None: + self._write("else") + self._enter() + self._fill() + self._dispatch(t.else_) + self._leave() + self._write("\n") + + def _IfExp(self, t): + self._dispatch(t.then) + self._write(" if ") + self._dispatch(t.test) + + if t.else_ is not None: + self._write(" else (") + self._dispatch(t.else_) + self._write(")") + + def _Import(self, t): + """ Handle "import xyz.foo". + """ + self._fill("import ") + + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Keyword(self, t): + """ Keyword value assignment within function calls and definitions. + """ + self._write(t.name) + self._write("=") + self._dispatch(t.expr) + + def _List(self, t): + self._write("[") + for i,node in enumerate(t.nodes): + self._dispatch(node) + if i < len(t.nodes)-1: + self._write(", ") + self._write("]") + + def _Module(self, t): + if t.doc is not None: + self._dispatch(t.doc) + self._dispatch(t.node) + + def _Mul(self, t): + self.__binary_op(t, '*') + + def _Name(self, t): + self._write(t.name) + + def _NoneType(self, t): + self._write("None") + + def _Not(self, t): + self._write('not (') + self._dispatch(t.expr) + self._write(')') + + def _Or(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") or (") + self._write(")") + + def _Pass(self, t): + self._write("pass\n") + + def _Printnl(self, t): + self._fill("print ") + if t.dest: + self._write(">> ") + self._dispatch(t.dest) + self._write(", ") + comma = False + for node in t.nodes: + if comma: self._write(', ') + else: comma = True + self._dispatch(node) + + def _Power(self, t): + self.__binary_op(t, '**') + + def _Return(self, t): + self._fill("return ") + if t.value: + if isinstance(t.value, Tuple): + text = ', '.join([ name.name for name in t.value.asList() ]) + self._write(text) + else: + self._dispatch(t.value) + if not self._do_indent: + self._write('; ') + + def _Slice(self, t): + self._dispatch(t.expr) + self._write("[") + if t.lower: + self._dispatch(t.lower) + self._write(":") + if t.upper: + self._dispatch(t.upper) + #if t.step: + # self._write(":") + # self._dispatch(t.step) + self._write("]") + + def _Sliceobj(self, t): + for i, node in enumerate(t.nodes): + if i != 0: + self._write(":") + if not (isinstance(node, Const) and node.value is None): + self._dispatch(node) + + def _Stmt(self, tree): + for node in tree.nodes: + self._dispatch(node) + + def _Sub(self, t): + self.__binary_op(t, '-') + + def _Subscript(self, t): + self._dispatch(t.expr) + self._write("[") + for i, value in enumerate(t.subs): + if i != 0: + self._write(",") + self._dispatch(value) + self._write("]") + + def _TryExcept(self, t): + self._fill("try") + self._enter() + self._dispatch(t.body) + self._leave() + + for handler in t.handlers: + self._fill('except ') + self._dispatch(handler[0]) + if handler[1] is not None: + self._write(', ') + self._dispatch(handler[1]) + self._enter() + self._dispatch(handler[2]) + self._leave() + + if t.else_: + self._fill("else") + self._enter() + self._dispatch(t.else_) + self._leave() + + def _Tuple(self, t): + + if not t.nodes: + # Empty tuple. + self._write("()") + else: + self._write("(") + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + self._write(")") + + def _UnaryAdd(self, t): + self._write("+") + self._dispatch(t.expr) + + def _UnarySub(self, t): + self._write("-") + self._dispatch(t.expr) + + def _With(self, t): + self._fill('with ') + self._dispatch(t.expr) + if t.vars: + self._write(' as ') + self._dispatch(t.vars.name) + self._enter() + self._dispatch(t.body) + self._leave() + self._write('\n') + + def _int(self, t): + self._write(repr(t)) + + def __binary_op(self, t, symbol): + # Check if parenthesis are needed on left side and then dispatch + has_paren = False + left_class = str(t.left.__class__) + if (left_class in op_precedence.keys() and + op_precedence[left_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.left) + if has_paren: + self._write(')') + # Write the appropriate symbol for operator + self._write(symbol) + # Check if parenthesis are needed on the right side and then dispatch + has_paren = False + right_class = str(t.right.__class__) + if (right_class in op_precedence.keys() and + op_precedence[right_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.right) + if has_paren: + self._write(')') + + def _float(self, t): + # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' + # We prefer str here. + self._write(str(t)) + + def _str(self, t): + self._write(repr(t)) + + def _tuple(self, t): + self._write(str(t)) + + ######################################################################### + # These are the methods from the _ast modules unparse. + # + # As our needs to handle more advanced code increase, we may want to + # modify some of the methods below so that they work for compiler.ast. + ######################################################################### + +# # stmt +# def _Expr(self, tree): +# self._fill() +# self._dispatch(tree.value) +# +# def _Import(self, t): +# self._fill("import ") +# first = True +# for a in t.names: +# if first: +# first = False +# else: +# self._write(", ") +# self._write(a.name) +# if a.asname: +# self._write(" as "+a.asname) +# +## def _ImportFrom(self, t): +## self._fill("from ") +## self._write(t.module) +## self._write(" import ") +## for i, a in enumerate(t.names): +## if i == 0: +## self._write(", ") +## self._write(a.name) +## if a.asname: +## self._write(" as "+a.asname) +## # XXX(jpe) what is level for? +## +# +# def _Break(self, t): +# self._fill("break") +# +# def _Continue(self, t): +# self._fill("continue") +# +# def _Delete(self, t): +# self._fill("del ") +# self._dispatch(t.targets) +# +# def _Assert(self, t): +# self._fill("assert ") +# self._dispatch(t.test) +# if t.msg: +# self._write(", ") +# self._dispatch(t.msg) +# +# def _Exec(self, t): +# self._fill("exec ") +# self._dispatch(t.body) +# if t.globals: +# self._write(" in ") +# self._dispatch(t.globals) +# if t.locals: +# self._write(", ") +# self._dispatch(t.locals) +# +# def _Print(self, t): +# self._fill("print ") +# do_comma = False +# if t.dest: +# self._write(">>") +# self._dispatch(t.dest) +# do_comma = True +# for e in t.values: +# if do_comma:self._write(", ") +# else:do_comma=True +# self._dispatch(e) +# if not t.nl: +# self._write(",") +# +# def _Global(self, t): +# self._fill("global") +# for i, n in enumerate(t.names): +# if i != 0: +# self._write(",") +# self._write(" " + n) +# +# def _Yield(self, t): +# self._fill("yield") +# if t.value: +# self._write(" (") +# self._dispatch(t.value) +# self._write(")") +# +# def _Raise(self, t): +# self._fill('raise ') +# if t.type: +# self._dispatch(t.type) +# if t.inst: +# self._write(", ") +# self._dispatch(t.inst) +# if t.tback: +# self._write(", ") +# self._dispatch(t.tback) +# +# +# def _TryFinally(self, t): +# self._fill("try") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# self._fill("finally") +# self._enter() +# self._dispatch(t.finalbody) +# self._leave() +# +# def _excepthandler(self, t): +# self._fill("except ") +# if t.type: +# self._dispatch(t.type) +# if t.name: +# self._write(", ") +# self._dispatch(t.name) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _ClassDef(self, t): +# self._write("\n") +# self._fill("class "+t.name) +# if t.bases: +# self._write("(") +# for a in t.bases: +# self._dispatch(a) +# self._write(", ") +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _FunctionDef(self, t): +# self._write("\n") +# for deco in t.decorators: +# self._fill("@") +# self._dispatch(deco) +# self._fill("def "+t.name + "(") +# self._dispatch(t.args) +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _For(self, t): +# self._fill("for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# def _While(self, t): +# self._fill("while ") +# self._dispatch(t.test) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# # expr +# def _Str(self, tree): +# self._write(repr(tree.s)) +## +# def _Repr(self, t): +# self._write("`") +# self._dispatch(t.value) +# self._write("`") +# +# def _Num(self, t): +# self._write(repr(t.n)) +# +# def _ListComp(self, t): +# self._write("[") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write("]") +# +# def _GeneratorExp(self, t): +# self._write("(") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write(")") +# +# def _comprehension(self, t): +# self._write(" for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# for if_clause in t.ifs: +# self._write(" if ") +# self._dispatch(if_clause) +# +# def _IfExp(self, t): +# self._dispatch(t.body) +# self._write(" if ") +# self._dispatch(t.test) +# if t.orelse: +# self._write(" else ") +# self._dispatch(t.orelse) +# +# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} +# def _UnaryOp(self, t): +# self._write(self.unop[t.op.__class__.__name__]) +# self._write("(") +# self._dispatch(t.operand) +# self._write(")") +# +# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", +# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", +# "FloorDiv":"//", "Pow": "**"} +# def _BinOp(self, t): +# self._write("(") +# self._dispatch(t.left) +# self._write(")" + self.binop[t.op.__class__.__name__] + "(") +# self._dispatch(t.right) +# self._write(")") +# +# boolops = {_ast.And: 'and', _ast.Or: 'or'} +# def _BoolOp(self, t): +# self._write("(") +# self._dispatch(t.values[0]) +# for v in t.values[1:]: +# self._write(" %s " % self.boolops[t.op.__class__]) +# self._dispatch(v) +# self._write(")") +# +# def _Attribute(self,t): +# self._dispatch(t.value) +# self._write(".") +# self._write(t.attr) +# +## def _Call(self, t): +## self._dispatch(t.func) +## self._write("(") +## comma = False +## for e in t.args: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## for e in t.keywords: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## if t.starargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("*") +## self._dispatch(t.starargs) +## if t.kwargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("**") +## self._dispatch(t.kwargs) +## self._write(")") +# +# # slice +# def _Index(self, t): +# self._dispatch(t.value) +# +# def _ExtSlice(self, t): +# for i, d in enumerate(t.dims): +# if i != 0: +# self._write(': ') +# self._dispatch(d) +# +# # others +# def _arguments(self, t): +# first = True +# nonDef = len(t.args)-len(t.defaults) +# for a in t.args[0:nonDef]: +# if first:first = False +# else: self._write(", ") +# self._dispatch(a) +# for a,d in zip(t.args[nonDef:], t.defaults): +# if first:first = False +# else: self._write(", ") +# self._dispatch(a), +# self._write("=") +# self._dispatch(d) +# if t.vararg: +# if first:first = False +# else: self._write(", ") +# self._write("*"+t.vararg) +# if t.kwarg: +# if first:first = False +# else: self._write(", ") +# self._write("**"+t.kwarg) +# +## def _keyword(self, t): +## self._write(t.arg) +## self._write("=") +## self._dispatch(t.value) +# +# def _Lambda(self, t): +# self._write("lambda ") +# self._dispatch(t.args) +# self._write(": ") +# self._dispatch(t.body) + + + diff --git a/doc/sphinxext/numpy_ext/docscrape.py b/doc/sphinxext/numpydoc/docscrape.py similarity index 73% rename from doc/sphinxext/numpy_ext/docscrape.py rename to doc/sphinxext/numpydoc/docscrape.py index e9670c05f5..2b1719db5c 100644 --- a/doc/sphinxext/numpy_ext/docscrape.py +++ b/doc/sphinxext/numpydoc/docscrape.py @@ -1,13 +1,15 @@ """Extract reference documentation from the NumPy source tree. """ +from __future__ import division, absolute_import, print_function import inspect import textwrap import re import pydoc -from StringIO import StringIO from warnings import warn +import collections +import sys class Reader(object): @@ -22,10 +24,10 @@ def __init__(self, data): String with lines separated by '\n'. """ - if isinstance(data, list): + if isinstance(data,list): self._str = data else: - self._str = data.split('\n') # store string as list of lines + self._str = data.split('\n') # store string as list of lines self.reset() @@ -33,7 +35,7 @@ def __getitem__(self, n): return self._str[n] def reset(self): - self._l = 0 # current line nr + self._l = 0 # current line nr def read(self): if not self.eof(): @@ -60,12 +62,11 @@ def read_to_condition(self, condition_func): return self[start:self._l] self._l += 1 if self.eof(): - return self[start:self._l + 1] + return self[start:self._l+1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() - def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) @@ -75,7 +76,7 @@ def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) - def peek(self, n=0): + def peek(self,n=0): if self._l + n < len(self._str): return self[self._l + n] else: @@ -111,11 +112,11 @@ def __init__(self, docstring, config={}): self._parse() - def __getitem__(self, key): + def __getitem__(self,key): return self._parsed_data[key] - def __setitem__(self, key, val): - if not self._parsed_data.has_key(key): + def __setitem__(self,key,val): + if key not in self._parsed_data: warn("Unknown section %s" % key) else: self._parsed_data[key] = val @@ -131,27 +132,25 @@ def _is_at_section(self): if l1.startswith('.. index::'): return True - l2 = self._doc.peek(1).strip() # ---------- or ========== - return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1)) + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) - def _strip(self, doc): + def _strip(self,doc): i = 0 j = 0 - for i, line in enumerate(doc): - if line.strip(): - break + for i,line in enumerate(doc): + if line.strip(): break - for j, line in enumerate(doc[::-1]): - if line.strip(): - break + for j,line in enumerate(doc[::-1]): + if line.strip(): break - return doc[i:len(doc) - j] + return doc[i:len(doc)-j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): - if not self._doc.peek(-1).strip(): # previous line was empty + if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() @@ -163,14 +162,14 @@ def _read_sections(self): data = self._read_to_next_section() name = data[0].strip() - if name.startswith('..'): # index section + if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) - def _parse_param_list(self, content): + def _parse_param_list(self,content): r = Reader(content) params = [] while not r.eof(): @@ -183,13 +182,13 @@ def _parse_param_list(self, content): desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) - params.append((arg_name, arg_type, desc)) + params.append((arg_name,arg_type,desc)) return params + _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) - def _parse_see_also(self, content): """ func_name : Descriptive text @@ -222,8 +221,7 @@ def push_item(name, rest): rest = [] for line in content: - if not line.strip(): - continue + if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): @@ -237,7 +235,8 @@ def push_item(name, rest): current_func = None if ',' in line: for func in line.split(','): - push_item(func, []) + if func.strip(): + push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: @@ -269,13 +268,17 @@ def _parse_summary(self): if self._is_at_section(): return - summary = self._doc.read_to_next_empty_line() - summary_str = " ".join([s.strip() for s in summary]).strip() - if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): - self['Signature'] = summary_str - if not self._is_at_section(): - self['Summary'] = self._doc.read_to_next_empty_line() - else: + # If several signatures present, take the last one + while True: + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + continue + break + + if summary is not None: self['Summary'] = summary if not self._is_at_section(): @@ -285,12 +288,11 @@ def _parse(self): self._doc.reset() self._parse_summary() - for (section, content) in self._read_sections(): + for (section,content) in self._read_sections(): if not section.startswith('..'): - section = ' '.join([s.capitalize() - for s in section.split(' ')]) - if section in ('Parameters', 'Attributes', 'Methods', - 'Returns', 'Raises', 'Warns'): + section = ' '.join([s.capitalize() for s in section.split(' ')]) + if section in ('Parameters', 'Returns', 'Raises', 'Warns', + 'Other Parameters', 'Attributes', 'Methods'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) @@ -302,17 +304,17 @@ def _parse(self): # string conversion routines def _str_header(self, name, symbol='-'): - return [name, len(name) * symbol] + return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: - out += [' ' * indent + line] + out += [' '*indent + line] return out def _str_signature(self): if self['Signature']: - return [self['Signature'].replace('*', '\*')] + [''] + return [self['Signature'].replace('*','\*')] + [''] else: return [''] @@ -332,8 +334,11 @@ def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) - for param, param_type, desc in self[name]: - out += ['%s : %s' % (param, param_type)] + for param,param_type,desc in self[name]: + if param_type: + out += ['%s : %s' % (param, param_type)] + else: + out += [param] out += self._str_indent(desc) out += [''] return out @@ -347,8 +352,7 @@ def _str_section(self, name): return out def _str_see_also(self, func_role): - if not self['See Also']: - return [] + if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True @@ -375,8 +379,8 @@ def _str_see_also(self, func_role): def _str_index(self): idx = self['index'] out = [] - out += ['.. index:: %s' % idx.get('default', '')] - for section, references in idx.iteritems(): + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.items(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] @@ -387,11 +391,12 @@ def __str__(self, func_role=''): out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Raises'): + for param_list in ('Parameters', 'Returns', 'Other Parameters', + 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) - for s in ('Notes', 'References', 'Examples'): + for s in ('Notes','References','Examples'): out += self._str_section(s) for param_list in ('Attributes', 'Methods'): out += self._str_param_list(param_list) @@ -399,27 +404,25 @@ def __str__(self, func_role=''): return '\n'.join(out) -def indent(str, indent=4): - indent_str = ' ' * indent +def indent(str,indent=4): + indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) - def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") - def header(text, style='-'): - return text + '\n' + style * len(text) + '\n' + return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None, config={}): self._f = func - self._role = role # e.g. "func" or "meth" + self._role = role # e.g. "func" or "meth" if doc is None: if func is None: @@ -431,11 +434,14 @@ def __init__(self, func, role='func', doc=None, config={}): func, func_name = self.get_func() try: # try to read signature - argspec = inspect.getargspec(func) + if sys.version_info[0] >= 3: + argspec = inspect.getfullargspec(func) + else: + argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) - argspec = argspec.replace('*', '\*') + argspec = argspec.replace('*','\*') signature = '%s%s' % (func_name, argspec) - except TypeError, e: + except TypeError as e: signature = '%s()' % func_name self['Signature'] = signature @@ -457,9 +463,9 @@ def __str__(self): 'meth': 'method'} if self._role: - if not roles.has_key(self._role): - print "Warning: invalid role %s" % self._role - out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), + if self._role not in roles: + print("Warning: invalid role %s" % self._role) + out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) @@ -467,8 +473,11 @@ def __str__(self): class ClassDoc(NumpyDocString): + + extra_public_methods = ['__call__'] + def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, - config=None): + config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls @@ -484,24 +493,39 @@ def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, NumpyDocString.__init__(self, doc) - if config is not None and config.get('show_class_members', True): - if not self['Methods']: - self['Methods'] = [(name, '', '') - for name in sorted(self.methods)] - if not self['Attributes']: - self['Attributes'] = [(name, '', '') - for name in sorted(self.properties)] + if config.get('show_class_members', True): + def splitlines_x(s): + if not s: + return [] + else: + return s.splitlines() + + for field, items in [('Methods', self.methods), + ('Attributes', self.properties)]: + if not self[field]: + doc_list = [] + for name in sorted(items): + try: + doc_item = pydoc.getdoc(getattr(self._cls, name)) + doc_list.append((name, '', splitlines_x(doc_item))) + except AttributeError: + pass # method doesn't exist + self[field] = doc_list @property def methods(self): if self._cls is None: return [] - return [name for name, func in inspect.getmembers(self._cls) - if not name.startswith('_') and callable(func)] + return [name for name,func in inspect.getmembers(self._cls) + if ((not name.startswith('_') + or name in self.extra_public_methods) + and isinstance(func, collections.Callable))] @property def properties(self): if self._cls is None: return [] - return [name for name, func in inspect.getmembers(self._cls) - if not name.startswith('_') and func is None] + return [name for name,func in inspect.getmembers(self._cls) + if not name.startswith('_') and + (func is None or isinstance(func, property) or + inspect.isgetsetdescriptor(func))] diff --git a/doc/sphinxext/numpy_ext/docscrape_sphinx.py b/doc/sphinxext/numpydoc/docscrape_sphinx.py similarity index 65% rename from doc/sphinxext/numpy_ext/docscrape_sphinx.py rename to doc/sphinxext/numpydoc/docscrape_sphinx.py index bcf7e70731..cdc2a37d17 100644 --- a/doc/sphinxext/numpy_ext/docscrape_sphinx.py +++ b/doc/sphinxext/numpydoc/docscrape_sphinx.py @@ -1,18 +1,24 @@ -import re -import inspect -import textwrap -import pydoc +from __future__ import division, absolute_import, print_function + +import sys, re, inspect, textwrap, pydoc import sphinx -from docscrape import NumpyDocString -from docscrape import FunctionDoc -from docscrape import ClassDoc +import collections +from .docscrape import NumpyDocString, FunctionDoc, ClassDoc + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') class SphinxDocString(NumpyDocString): - def __init__(self, docstring, config=None): - config = {} if config is None else config - self.use_plots = config.get('use_plots', False) + def __init__(self, docstring, config={}): NumpyDocString.__init__(self, docstring, config=config) + self.load_config(config) + + def load_config(self, config): + self.use_plots = config.get('use_plots', False) + self.class_members_toctree = config.get('class_members_toctree', True) # string conversion routines def _str_header(self, name, symbol='`'): @@ -24,7 +30,7 @@ def _str_field_list(self, name): def _str_indent(self, doc, indent=4): out = [] for line in doc: - out += [' ' * indent + line] + out += [' '*indent + line] return out def _str_signature(self): @@ -40,16 +46,37 @@ def _str_summary(self): def _str_extended_summary(self): return self['Extended Summary'] + [''] + def _str_returns(self): + out = [] + if self['Returns']: + out += self._str_field_list('Returns') + out += [''] + for param, param_type, desc in self['Returns']: + if param_type: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + else: + out += self._str_indent([param.strip()]) + if desc: + out += [''] + out += self._str_indent(desc, 8) + out += [''] + return out + def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: - out += self._str_indent(['**%s** : %s' % (param.strip(), - param_type)]) - out += [''] - out += self._str_indent(desc, 8) + if param_type: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + else: + out += self._str_indent(['**%s**' % param.strip()]) + if desc: + out += [''] + out += self._str_indent(desc, 8) out += [''] return out @@ -79,28 +106,36 @@ def _str_member_list(self, name): others = [] for param, param_type, desc in self[name]: param = param.strip() - if not self._obj or hasattr(self._obj, param): + + # Check if the referenced member can have a docstring or not + param_obj = getattr(self._obj, param, None) + if not (callable(param_obj) + or isinstance(param_obj, property) + or inspect.isgetsetdescriptor(param_obj)): + param_obj = None + + if param_obj and (pydoc.getdoc(param_obj) or not desc): + # Referenced object has a docstring autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: - # GAEL: Toctree commented out below because it creates - # hundreds of sphinx warnings - # out += ['.. autosummary::', ' :toctree:', ''] - out += ['.. autosummary::', ''] - out += autosum + out += ['.. autosummary::'] + if self.class_members_toctree: + out += [' :toctree:'] + out += [''] + autosum if others: - maxlen_0 = max([len(x[0]) for x in others]) - maxlen_1 = max([len(x[1]) for x in others]) - hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 - fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) - n_indent = maxlen_0 + maxlen_1 + 4 - out += [hdr] + maxlen_0 = max(3, max([len(x[0]) for x in others])) + hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10 + fmt = sixu('%%%ds %%s ') % (maxlen_0,) + out += ['', hdr] for param, param_type, desc in others: - out += [fmt % (param.strip(), param_type)] - out += self._str_indent(desc, n_indent) + desc = sixu(" ").join(x.strip() for x in desc).strip() + if param_type: + desc = "(%s) %s" % (param_type, desc) + out += [fmt % (param.strip(), desc)] out += [hdr] out += [''] return out @@ -136,8 +171,8 @@ def _str_index(self): if len(idx) == 0: return out - out += ['.. index:: %s' % idx.get('default', '')] - for section, references in idx.iteritems(): + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.items(): if section == 'default': continue elif section == 'refguide': @@ -157,9 +192,9 @@ def _str_references(self): # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": - out += ['.. only:: latex', ''] + out += ['.. only:: latex',''] else: - out += ['.. latexonly::', ''] + out += ['.. latexonly::',''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) @@ -188,7 +223,9 @@ def __str__(self, indent=0, func_role="obj"): out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Raises'): + out += self._str_param_list('Parameters') + out += self._str_returns() + for param_list in ('Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) @@ -197,35 +234,32 @@ def __str__(self, indent=0, func_role="obj"): out += self._str_examples() for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) - out = self._str_indent(out, indent) + out = self._str_indent(out,indent) return '\n'.join(out) - class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): - self.use_plots = config.get('use_plots', False) + self.load_config(config) FunctionDoc.__init__(self, obj, doc=doc, config=config) - class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): - self.use_plots = config.get('use_plots', False) + self.load_config(config) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) - class SphinxObjDoc(SphinxDocString): - def __init__(self, obj, doc=None, config=None): + def __init__(self, obj, doc=None, config={}): self._f = obj + self.load_config(config) SphinxDocString.__init__(self, doc, config=config) - def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' - elif callable(obj): + elif isinstance(obj, collections.Callable): what = 'function' else: what = 'object' diff --git a/doc/sphinxext/numpydoc/linkcode.py b/doc/sphinxext/numpydoc/linkcode.py new file mode 100644 index 0000000000..1ad3ab82cb --- /dev/null +++ b/doc/sphinxext/numpydoc/linkcode.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +""" + linkcode + ~~~~~~~~ + + Add external links to module code in Python object descriptions. + + :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import collections + +warnings.warn("This extension has been accepted to Sphinx upstream. " + "Use the version from there (Sphinx >= 1.2) " + "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode", + FutureWarning, stacklevel=1) + + +from docutils import nodes + +from sphinx import addnodes +from sphinx.locale import _ +from sphinx.errors import SphinxError + +class LinkcodeError(SphinxError): + category = "linkcode error" + +def doctree_read(app, doctree): + env = app.builder.env + + resolve_target = getattr(env.config, 'linkcode_resolve', None) + if not isinstance(env.config.linkcode_resolve, collections.Callable): + raise LinkcodeError( + "Function `linkcode_resolve` is not given in conf.py") + + domain_keys = dict( + py=['module', 'fullname'], + c=['names'], + cpp=['names'], + js=['object', 'fullname'], + ) + + for objnode in doctree.traverse(addnodes.desc): + domain = objnode.get('domain') + uris = set() + for signode in objnode: + if not isinstance(signode, addnodes.desc_signature): + continue + + # Convert signode to a specified format + info = {} + for key in domain_keys.get(domain, []): + value = signode.get(key) + if not value: + value = '' + info[key] = value + if not info: + continue + + # Call user code to resolve the link + uri = resolve_target(domain, info) + if not uri: + # no source + continue + + if uri in uris or not uri: + # only one link per name, please + continue + uris.add(uri) + + onlynode = addnodes.only(expr='html') + onlynode += nodes.reference('', '', internal=False, refuri=uri) + onlynode[0] += nodes.inline('', _('[source]'), + classes=['viewcode-link']) + signode += onlynode + +def setup(app): + app.connect('doctree-read', doctree_read) + app.add_config_value('linkcode_resolve', None, '') diff --git a/doc/sphinxext/numpy_ext/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py similarity index 63% rename from doc/sphinxext/numpy_ext/numpydoc.py rename to doc/sphinxext/numpydoc/numpydoc.py index 62adb56ae7..4861aa90ed 100644 --- a/doc/sphinxext/numpy_ext/numpydoc.py +++ b/doc/sphinxext/numpydoc/numpydoc.py @@ -10,52 +10,65 @@ - Convert Parameters etc. sections to field lists. - Convert See Also section to a See also entry. - Renumber references. -- Extract the signature from the docstring, if it can't be determined - otherwise. +- Extract the signature from the docstring, if it can't be determined otherwise. -.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard +.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ +from __future__ import division, absolute_import, print_function -import os -import re -import pydoc -from docscrape_sphinx import get_doc_object -from docscrape_sphinx import SphinxDocString -from sphinx.util.compat import Directive +import os, sys, re, pydoc +import sphinx import inspect +import collections + +if sphinx.__version__ < '1.0.1': + raise RuntimeError("Sphinx 1.0.1 or newer is required") + +from .docscrape_sphinx import get_doc_object, SphinxDocString + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = dict(use_plots=app.config.numpydoc_use_plots, - show_class_members=app.config.numpydoc_show_class_members) + show_class_members=app.config.numpydoc_show_class_members, + class_members_toctree=app.config.numpydoc_class_members_toctree, + ) if what == 'module': # Strip top title - title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', - re.I | re.S) - lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") + title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'), + re.I|re.S) + lines[:] = title_re.sub(sixu(''), sixu("\n").join(lines)).split(sixu("\n")) else: - doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) - lines[:] = unicode(doc).split(u"\n") + doc = get_doc_object(obj, what, sixu("\n").join(lines), config=cfg) + if sys.version_info[0] >= 3: + doc = str(doc) + else: + doc = unicode(doc) + lines[:] = doc.split(sixu("\n")) if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ obj.__name__: if hasattr(obj, '__module__'): - v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) + v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) - lines += [u'', u'.. htmlonly::', ''] - lines += [u' %s' % x for x in + lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] + lines += [sixu(' %s') % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() - m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) + m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) if m: references.append(m.group(1)) @@ -64,38 +77,36 @@ def mangle_docstrings(app, what, name, obj, options, lines, if references: for i, line in enumerate(lines): for r in references: - if re.match(ur'^\d+$', r): - new_r = u"R%d" % (reference_offset[0] + int(r)) + if re.match(sixu('^\\d+$'), r): + new_r = sixu("R%d") % (reference_offset[0] + int(r)) else: - new_r = u"%s%d" % (r, reference_offset[0]) - lines[i] = lines[i].replace(u'[%s]_' % r, - u'[%s]_' % new_r) - lines[i] = lines[i].replace(u'.. [%s]' % r, - u'.. [%s]' % new_r) + new_r = sixu("%s%d") % (r, reference_offset[0]) + lines[i] = lines[i].replace(sixu('[%s]_') % r, + sixu('[%s]_') % new_r) + lines[i] = lines[i].replace(sixu('.. [%s]') % r, + sixu('.. [%s]') % new_r) reference_offset[0] += len(references) - -def mangle_signature(app, what, name, obj, - options, sig, retann): +def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' - if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): - return - if not hasattr(obj, '__doc__'): - return + if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return + if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: - sig = re.sub(u"^[^(]*", u"", doc['Signature']) - return sig, u'' - + sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) + return sig, sixu('') def setup(app, get_doc_object_=get_doc_object): + if not hasattr(app, 'add_config_value'): + return # probably called by nose, better bail out + global get_doc_object get_doc_object = get_doc_object_ @@ -104,20 +115,20 @@ def setup(app, get_doc_object_=get_doc_object): app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) + app.add_config_value('numpydoc_class_members_toctree', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) -#----------------------------------------------------------------------------- +#------------------------------------------------------------------------------ # Docstring-mangling domains -#----------------------------------------------------------------------------- +#------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain - class ManglingDomainBase(object): directive_mangling_map = {} @@ -126,11 +137,10 @@ def __init__(self, *a, **kw): self.wrap_mangling_directives() def wrap_mangling_directives(self): - for name, objtype in self.directive_mangling_map.items(): + for name, objtype in list(self.directive_mangling_map.items()): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) - class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { @@ -142,7 +152,7 @@ class NumpyPythonDomain(ManglingDomainBase, PythonDomain): 'staticmethod': 'function', 'attribute': 'attribute', } - + indices = [] class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' @@ -154,7 +164,6 @@ class NumpyCDomain(ManglingDomainBase, CDomain): 'var': 'object', } - def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): diff --git a/doc/sphinxext/numpydoc/phantom_import.py b/doc/sphinxext/numpydoc/phantom_import.py new file mode 100644 index 0000000000..9a60b4a35b --- /dev/null +++ b/doc/sphinxext/numpydoc/phantom_import.py @@ -0,0 +1,167 @@ +""" +============== +phantom_import +============== + +Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar +extensions to use docstrings loaded from an XML file. + +This extension loads an XML file in the Pydocweb format [1] and +creates a dummy module that contains the specified docstrings. This +can be used to get the current docstrings from a Pydocweb instance +without needing to rebuild the documented module. + +.. [1] http://code.google.com/p/pydocweb + +""" +from __future__ import division, absolute_import, print_function + +import imp, sys, compiler, types, os, inspect, re + +def setup(app): + app.connect('builder-inited', initialize) + app.add_config_value('phantom_import_file', None, True) + +def initialize(app): + fn = app.config.phantom_import_file + if (fn and os.path.isfile(fn)): + print("[numpydoc] Phantom importing modules from", fn, "...") + import_phantom_module(fn) + +#------------------------------------------------------------------------------ +# Creating 'phantom' modules from an XML description +#------------------------------------------------------------------------------ +def import_phantom_module(xml_file): + """ + Insert a fake Python module to sys.modules, based on a XML file. + + The XML file is expected to conform to Pydocweb DTD. The fake + module will contain dummy objects, which guarantee the following: + + - Docstrings are correct. + - Class inheritance relationships are correct (if present in XML). + - Function argspec is *NOT* correct (even if present in XML). + Instead, the function signature is prepended to the function docstring. + - Class attributes are *NOT* correct; instead, they are dummy objects. + + Parameters + ---------- + xml_file : str + Name of an XML file to read + + """ + import lxml.etree as etree + + object_cache = {} + + tree = etree.parse(xml_file) + root = tree.getroot() + + # Sort items so that + # - Base classes come before classes inherited from them + # - Modules come before their contents + all_nodes = dict([(n.attrib['id'], n) for n in root]) + + def _get_bases(node, recurse=False): + bases = [x.attrib['ref'] for x in node.findall('base')] + if recurse: + j = 0 + while True: + try: + b = bases[j] + except IndexError: break + if b in all_nodes: + bases.extend(_get_bases(all_nodes[b])) + j += 1 + return bases + + type_index = ['module', 'class', 'callable', 'object'] + + def base_cmp(a, b): + x = cmp(type_index.index(a.tag), type_index.index(b.tag)) + if x != 0: return x + + if a.tag == 'class' and b.tag == 'class': + a_bases = _get_bases(a, recurse=True) + b_bases = _get_bases(b, recurse=True) + x = cmp(len(a_bases), len(b_bases)) + if x != 0: return x + if a.attrib['id'] in b_bases: return -1 + if b.attrib['id'] in a_bases: return 1 + + return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) + + nodes = root.getchildren() + nodes.sort(base_cmp) + + # Create phantom items + for node in nodes: + name = node.attrib['id'] + doc = (node.text or '').decode('string-escape') + "\n" + if doc == "\n": doc = "" + + # create parent, if missing + parent = name + while True: + parent = '.'.join(parent.split('.')[:-1]) + if not parent: break + if parent in object_cache: break + obj = imp.new_module(parent) + object_cache[parent] = obj + sys.modules[parent] = obj + + # create object + if node.tag == 'module': + obj = imp.new_module(name) + obj.__doc__ = doc + sys.modules[name] = obj + elif node.tag == 'class': + bases = [object_cache[b] for b in _get_bases(node) + if b in object_cache] + bases.append(object) + init = lambda self: None + init.__doc__ = doc + obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) + obj.__name__ = name.split('.')[-1] + elif node.tag == 'callable': + funcname = node.attrib['id'].split('.')[-1] + argspec = node.attrib.get('argspec') + if argspec: + argspec = re.sub('^[^(]*', '', argspec) + doc = "%s%s\n\n%s" % (funcname, argspec, doc) + obj = lambda: 0 + obj.__argspec_is_invalid_ = True + if sys.version_info[0] >= 3: + obj.__name__ = funcname + else: + obj.func_name = funcname + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__objclass__ = object_cache[parent] + else: + class Dummy(object): pass + obj = Dummy() + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__get__ = lambda: None + object_cache[name] = obj + + if parent: + if inspect.ismodule(object_cache[parent]): + obj.__module__ = parent + setattr(object_cache[parent], name.split('.')[-1], obj) + + # Populate items + for node in root: + obj = object_cache.get(node.attrib['id']) + if obj is None: continue + for ref in node.findall('ref'): + if node.tag == 'class': + if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) + else: + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) diff --git a/doc/sphinxext/numpydoc/plot_directive.py b/doc/sphinxext/numpydoc/plot_directive.py new file mode 100644 index 0000000000..2014f85707 --- /dev/null +++ b/doc/sphinxext/numpydoc/plot_directive.py @@ -0,0 +1,642 @@ +""" +A special directive for generating a matplotlib plot. + +.. warning:: + + This is a hacked version of plot_directive.py from Matplotlib. + It's very much subject to change! + + +Usage +----- + +Can be used like this:: + + .. plot:: examples/example.py + + .. plot:: + + import matplotlib.pyplot as plt + plt.plot([1,2,3], [4,5,6]) + + .. plot:: + + A plotting example: + + >>> import matplotlib.pyplot as plt + >>> plt.plot([1,2,3], [4,5,6]) + +The content is interpreted as doctest formatted if it has a line starting +with ``>>>``. + +The ``plot`` directive supports the options + + format : {'python', 'doctest'} + Specify the format of the input + + include-source : bool + Whether to display the source code. Default can be changed in conf.py + +and the ``image`` directive options ``alt``, ``height``, ``width``, +``scale``, ``align``, ``class``. + +Configuration options +--------------------- + +The plot directive has the following configuration options: + + plot_include_source + Default value for the include-source option + + plot_pre_code + Code that should be executed before each plot. + + plot_basedir + Base directory, to which plot:: file names are relative to. + (If None or empty, file names are relative to the directoly where + the file containing the directive is.) + + plot_formats + File formats to generate. List of tuples or strings:: + + [(suffix, dpi), suffix, ...] + + that determine the file format and the DPI. For entries whose + DPI was omitted, sensible defaults are chosen. + + plot_html_show_formats + Whether to show links to the files in HTML. + +TODO +---- + +* Refactor Latex output; now it's plain images, but it would be nice + to make them appear side-by-side, or in floats. + +""" +from __future__ import division, absolute_import, print_function + +import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback +import sphinx + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from io import StringIO + +import warnings +warnings.warn("A plot_directive module is also available under " + "matplotlib.sphinxext; expect this numpydoc.plot_directive " + "module to be deprecated after relevant features have been " + "integrated there.", + FutureWarning, stacklevel=2) + + +#------------------------------------------------------------------------------ +# Registration hook +#------------------------------------------------------------------------------ + +def setup(app): + setup.app = app + setup.config = app.config + setup.confdir = app.confdir + + app.add_config_value('plot_pre_code', '', True) + app.add_config_value('plot_include_source', False, True) + app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) + app.add_config_value('plot_basedir', None, True) + app.add_config_value('plot_html_show_formats', True, True) + + app.add_directive('plot', plot_directive, True, (0, 1, False), + **plot_directive_options) + +#------------------------------------------------------------------------------ +# plot:: directive +#------------------------------------------------------------------------------ +from docutils.parsers.rst import directives +from docutils import nodes + +def plot_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(arguments, content, options, state_machine, state, lineno) +plot_directive.__doc__ = __doc__ + +def _option_boolean(arg): + if not arg or not arg.strip(): + # no argument given, assume used as a flag + return True + elif arg.strip().lower() in ('no', '0', 'false'): + return False + elif arg.strip().lower() in ('yes', '1', 'true'): + return True + else: + raise ValueError('"%s" unknown boolean' % arg) + +def _option_format(arg): + return directives.choice(arg, ('python', 'lisp')) + +def _option_align(arg): + return directives.choice(arg, ("top", "middle", "bottom", "left", "center", + "right")) + +plot_directive_options = {'alt': directives.unchanged, + 'height': directives.length_or_unitless, + 'width': directives.length_or_percentage_or_unitless, + 'scale': directives.nonnegative_int, + 'align': _option_align, + 'class': directives.class_option, + 'include-source': _option_boolean, + 'format': _option_format, + } + +#------------------------------------------------------------------------------ +# Generating output +#------------------------------------------------------------------------------ + +from docutils import nodes, utils + +try: + # Sphinx depends on either Jinja or Jinja2 + import jinja2 + def format_template(template, **kw): + return jinja2.Template(template).render(**kw) +except ImportError: + import jinja + def format_template(template, **kw): + return jinja.from_string(template, **kw) + +TEMPLATE = """ +{{ source_code }} + +{{ only_html }} + + {% if source_link or (html_show_formats and not multi_image) %} + ( + {%- if source_link -%} + `Source code <{{ source_link }}>`__ + {%- endif -%} + {%- if html_show_formats and not multi_image -%} + {%- for img in images -%} + {%- for fmt in img.formats -%} + {%- if source_link or not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + {%- endfor -%} + {%- endif -%} + ) + {% endif %} + + {% for img in images %} + .. figure:: {{ build_dir }}/{{ img.basename }}.png + {%- for option in options %} + {{ option }} + {% endfor %} + + {% if html_show_formats and multi_image -%} + ( + {%- for fmt in img.formats -%} + {%- if not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + ) + {%- endif -%} + {% endfor %} + +{{ only_latex }} + + {% for img in images %} + .. image:: {{ build_dir }}/{{ img.basename }}.pdf + {% endfor %} + +""" + +class ImageFile(object): + def __init__(self, basename, dirname): + self.basename = basename + self.dirname = dirname + self.formats = [] + + def filename(self, format): + return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) + + def filenames(self): + return [self.filename(fmt) for fmt in self.formats] + +def run(arguments, content, options, state_machine, state, lineno): + if arguments and content: + raise RuntimeError("plot:: directive can't have both args and content") + + document = state_machine.document + config = document.settings.env.config + + options.setdefault('include-source', config.plot_include_source) + + # determine input + rst_file = document.attributes['source'] + rst_dir = os.path.dirname(rst_file) + + if arguments: + if not config.plot_basedir: + source_file_name = os.path.join(rst_dir, + directives.uri(arguments[0])) + else: + source_file_name = os.path.join(setup.confdir, config.plot_basedir, + directives.uri(arguments[0])) + code = open(source_file_name, 'r').read() + output_base = os.path.basename(source_file_name) + else: + source_file_name = rst_file + code = textwrap.dedent("\n".join(map(str, content))) + counter = document.attributes.get('_plot_counter', 0) + 1 + document.attributes['_plot_counter'] = counter + base, ext = os.path.splitext(os.path.basename(source_file_name)) + output_base = '%s-%d.py' % (base, counter) + + base, source_ext = os.path.splitext(output_base) + if source_ext in ('.py', '.rst', '.txt'): + output_base = base + else: + source_ext = '' + + # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames + output_base = output_base.replace('.', '-') + + # is it in doctest format? + is_doctest = contains_doctest(code) + if 'format' in options: + if options['format'] == 'python': + is_doctest = False + else: + is_doctest = True + + # determine output directory name fragment + source_rel_name = relpath(source_file_name, setup.confdir) + source_rel_dir = os.path.dirname(source_rel_name) + while source_rel_dir.startswith(os.path.sep): + source_rel_dir = source_rel_dir[1:] + + # build_dir: where to place output files (temporarily) + build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), + 'plot_directive', + source_rel_dir) + if not os.path.exists(build_dir): + os.makedirs(build_dir) + + # output_dir: final location in the builder's directory + dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, + source_rel_dir)) + + # how to link to files from the RST file + dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), + source_rel_dir).replace(os.path.sep, '/') + build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') + source_link = dest_dir_link + '/' + output_base + source_ext + + # make figures + try: + results = makefig(code, source_file_name, build_dir, output_base, + config) + errors = [] + except PlotError as err: + reporter = state.memo.reporter + sm = reporter.system_message( + 2, "Exception occurred in plotting %s: %s" % (output_base, err), + line=lineno) + results = [(code, [])] + errors = [sm] + + # generate output restructuredtext + total_lines = [] + for j, (code_piece, images) in enumerate(results): + if options['include-source']: + if is_doctest: + lines = [''] + lines += [row.rstrip() for row in code_piece.split('\n')] + else: + lines = ['.. code-block:: python', ''] + lines += [' %s' % row.rstrip() + for row in code_piece.split('\n')] + source_code = "\n".join(lines) + else: + source_code = "" + + opts = [':%s: %s' % (key, val) for key, val in list(options.items()) + if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] + + only_html = ".. only:: html" + only_latex = ".. only:: latex" + + if j == 0: + src_link = source_link + else: + src_link = None + + result = format_template( + TEMPLATE, + dest_dir=dest_dir_link, + build_dir=build_dir_link, + source_link=src_link, + multi_image=len(images) > 1, + only_html=only_html, + only_latex=only_latex, + options=opts, + images=images, + source_code=source_code, + html_show_formats=config.plot_html_show_formats) + + total_lines.extend(result.split("\n")) + total_lines.extend("\n") + + if total_lines: + state_machine.insert_input(total_lines, source=source_file_name) + + # copy image files to builder's output directory + if not os.path.exists(dest_dir): + os.makedirs(dest_dir) + + for code_piece, images in results: + for img in images: + for fn in img.filenames(): + shutil.copyfile(fn, os.path.join(dest_dir, + os.path.basename(fn))) + + # copy script (if necessary) + if source_file_name == rst_file: + target_name = os.path.join(dest_dir, output_base + source_ext) + f = open(target_name, 'w') + f.write(unescape_doctest(code)) + f.close() + + return errors + + +#------------------------------------------------------------------------------ +# Run code and capture figures +#------------------------------------------------------------------------------ + +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.image as image +from matplotlib import _pylab_helpers + +import exceptions + +def contains_doctest(text): + try: + # check if it's valid Python as-is + compile(text, '', 'exec') + return False + except SyntaxError: + pass + r = re.compile(r'^\s*>>>', re.M) + m = r.search(text) + return bool(m) + +def unescape_doctest(text): + """ + Extract code from a piece of text, which contains either Python code + or doctests. + + """ + if not contains_doctest(text): + return text + + code = "" + for line in text.split("\n"): + m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) + if m: + code += m.group(2) + "\n" + elif line.strip(): + code += "# " + line.strip() + "\n" + else: + code += "\n" + return code + +def split_code_at_show(text): + """ + Split code at plt.show() + + """ + + parts = [] + is_doctest = contains_doctest(text) + + part = [] + for line in text.split("\n"): + if (not is_doctest and line.strip() == 'plt.show()') or \ + (is_doctest and line.strip() == '>>> plt.show()'): + part.append(line) + parts.append("\n".join(part)) + part = [] + else: + part.append(line) + if "\n".join(part).strip(): + parts.append("\n".join(part)) + return parts + +class PlotError(RuntimeError): + pass + +def run_code(code, code_path, ns=None): + # Change the working directory to the directory of the example, so + # it can get at its data files, if any. + pwd = os.getcwd() + old_sys_path = list(sys.path) + if code_path is not None: + dirname = os.path.abspath(os.path.dirname(code_path)) + os.chdir(dirname) + sys.path.insert(0, dirname) + + # Redirect stdout + stdout = sys.stdout + sys.stdout = StringIO() + + # Reset sys.argv + old_sys_argv = sys.argv + sys.argv = [code_path] + + try: + try: + code = unescape_doctest(code) + if ns is None: + ns = {} + if not ns: + exec(setup.config.plot_pre_code, ns) + exec(code, ns) + except (Exception, SystemExit) as err: + raise PlotError(traceback.format_exc()) + finally: + os.chdir(pwd) + sys.argv = old_sys_argv + sys.path[:] = old_sys_path + sys.stdout = stdout + return ns + + +#------------------------------------------------------------------------------ +# Generating figures +#------------------------------------------------------------------------------ + +def out_of_date(original, derived): + """ + Returns True if derivative is out-of-date wrt original, + both of which are full file paths. + """ + return (not os.path.exists(derived) + or os.stat(derived).st_mtime < os.stat(original).st_mtime) + + +def makefig(code, code_path, output_dir, output_base, config): + """ + Run a pyplot script *code* and save the images under *output_dir* + with file names derived from *output_base* + + """ + + # -- Parse format list + default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} + formats = [] + for fmt in config.plot_formats: + if isinstance(fmt, str): + formats.append((fmt, default_dpi.get(fmt, 80))) + elif type(fmt) in (tuple, list) and len(fmt)==2: + formats.append((str(fmt[0]), int(fmt[1]))) + else: + raise PlotError('invalid image format "%r" in plot_formats' % fmt) + + # -- Try to determine if all images already exist + + code_pieces = split_code_at_show(code) + + # Look for single-figure output files first + all_exists = True + img = ImageFile(output_base, output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + if all_exists: + return [(code, [img])] + + # Then look for multi-figure output files + results = [] + all_exists = True + for i, code_piece in enumerate(code_pieces): + images = [] + for j in range(1000): + img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + # assume that if we have one, we have them all + if not all_exists: + all_exists = (j > 0) + break + images.append(img) + if not all_exists: + break + results.append((code_piece, images)) + + if all_exists: + return results + + # -- We didn't find the files, so build them + + results = [] + ns = {} + + for i, code_piece in enumerate(code_pieces): + # Clear between runs + plt.close('all') + + # Run code + run_code(code_piece, code_path, ns) + + # Collect images + images = [] + fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() + for j, figman in enumerate(fig_managers): + if len(fig_managers) == 1 and len(code_pieces) == 1: + img = ImageFile(output_base, output_dir) + else: + img = ImageFile("%s_%02d_%02d" % (output_base, i, j), + output_dir) + images.append(img) + for format, dpi in formats: + try: + figman.canvas.figure.savefig(img.filename(format), dpi=dpi) + except exceptions.BaseException as err: + raise PlotError(traceback.format_exc()) + img.formats.append(format) + + # Results + results.append((code_piece, images)) + + return results + + +#------------------------------------------------------------------------------ +# Relative pathnames +#------------------------------------------------------------------------------ + +try: + from os.path import relpath +except ImportError: + # Copied from Python 2.7 + if 'posix' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir + + if not path: + raise ValueError("no path specified") + + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + elif 'nt' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir, splitunc + + if not path: + raise ValueError("no path specified") + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + if start_list[0].lower() != path_list[0].lower(): + unc_path, rest = splitunc(path) + unc_start, rest = splitunc(start) + if bool(unc_path) ^ bool(unc_start): + raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" + % (path, start)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_list[0], start_list[0])) + # Work out how much of the filepath is shared by start and path. + for i in range(min(len(start_list), len(path_list))): + if start_list[i].lower() != path_list[i].lower(): + break + else: + i += 1 + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + else: + raise RuntimeError("Unsupported platform (no relpath available!)") diff --git a/doc/sphinxext/numpydoc/traitsdoc.py b/doc/sphinxext/numpydoc/traitsdoc.py new file mode 100644 index 0000000000..596c54eb38 --- /dev/null +++ b/doc/sphinxext/numpydoc/traitsdoc.py @@ -0,0 +1,142 @@ +""" +========= +traitsdoc +========= + +Sphinx extension that handles docstrings in the Numpy standard format, [1] +and support Traits [2]. + +This extension can be used as a replacement for ``numpydoc`` when support +for Traits is required. + +.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard +.. [2] http://code.enthought.com/projects/traits/ + +""" +from __future__ import division, absolute_import, print_function + +import inspect +import os +import pydoc +import collections + +from . import docscrape +from . import docscrape_sphinx +from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString + +from . import numpydoc + +from . import comment_eater + +class SphinxTraitsDoc(SphinxClassDoc): + def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): + if not inspect.isclass(cls): + raise ValueError("Initialise using a class. Got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + self._name = cls.__name__ + self._func_doc = func_doc + + docstring = pydoc.getdoc(cls) + docstring = docstring.split('\n') + + # De-indent paragraph + try: + indent = min(len(s) - len(s.lstrip()) for s in docstring + if s.strip()) + except ValueError: + indent = 0 + + for n,line in enumerate(docstring): + docstring[n] = docstring[n][indent:] + + self._doc = docscrape.Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': '', + 'Description': [], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Traits': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'References': '', + 'Example': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Description'] + self['Extended Summary'] + [''] + + def __str__(self, indent=0, func_role="func"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Traits', 'Methods', + 'Returns','Raises'): + out += self._str_param_list(param_list) + out += self._str_see_also("obj") + out += self._str_section('Notes') + out += self._str_references() + out += self._str_section('Example') + out += self._str_section('Examples') + out = self._str_indent(out,indent) + return '\n'.join(out) + +def looks_like_issubclass(obj, classname): + """ Return True if the object has a class or superclass with the given class + name. + + Ignores old-style classes. + """ + t = obj + if t.__name__ == classname: + return True + for klass in t.__mro__: + if klass.__name__ == classname: + return True + return False + +def get_doc_object(obj, what=None, config=None): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif isinstance(obj, collections.Callable): + what = 'function' + else: + what = 'object' + if what == 'class': + doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) + if looks_like_issubclass(obj, 'HasTraits'): + for name, trait, comment in comment_eater.get_class_traits(obj): + # Exclude private traits. + if not name.startswith('_'): + doc['Traits'].append((name, trait, comment.splitlines())) + return doc + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, '', config=config) + else: + return SphinxDocString(pydoc.getdoc(obj), config=config) + +def setup(app): + # init numpydoc + numpydoc.setup(app, get_doc_object) + diff --git a/doc/sphinxext/sphinx_gallery/__init__.py b/doc/sphinxext/sphinx_gallery/__init__.py index 247d21aebe..e113f97d2a 100644 --- a/doc/sphinxext/sphinx_gallery/__init__.py +++ b/doc/sphinxext/sphinx_gallery/__init__.py @@ -1,11 +1,10 @@ """ -============== Sphinx Gallery ============== """ import os -__version__ = '0.0.11' +__version__ = '0.1.11' def glr_path_static(): diff --git a/doc/sphinxext/sphinx_gallery/_static/broken_example.png b/doc/sphinxext/sphinx_gallery/_static/broken_example.png new file mode 100644 index 0000000000..4fea24e7df Binary files /dev/null and b/doc/sphinxext/sphinx_gallery/_static/broken_example.png differ diff --git a/doc/sphinxext/sphinx_gallery/_static/gallery.css b/doc/sphinxext/sphinx_gallery/_static/gallery.css index 623003ee25..37047a9b91 100644 --- a/doc/sphinxext/sphinx_gallery/_static/gallery.css +++ b/doc/sphinxext/sphinx_gallery/_static/gallery.css @@ -1,106 +1,192 @@ -div.sphx-glr-thumbContainer { +/* +Sphinx-Gallery has compatible CSS to fix default sphinx themes +Tested for Sphinx 1.3.1 for all themes: default, alabaster, sphinxdoc, +scrolls, agogo, traditional, nature, haiku, pyramid +Tested for Read the Docs theme 0.1.7 */ +.sphx-glr-thumbcontainer { + background: #fff; + border: solid #fff 1px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + border-radius: 5px; box-shadow: none; - background: #FFF; + float: left; margin: 5px; - padding-top: 5px; min-height: 230px; - border: solid white 1px; + padding-top: 5px; + position: relative; +} +.sphx-glr-thumbcontainer:hover { + border: solid #b4ddfc 1px; + box-shadow: 0 0 15px rgba(142, 176, 202, 0.5); +} +.sphx-glr-thumbcontainer a.internal { + bottom: 0; + display: block; + left: 0; + padding: 150px 10px 0; + position: absolute; + right: 0; + top: 0; +} +/* Next one is to avoid Sphinx traditional theme to cover all the +thumbnail with its default link Background color */ +.sphx-glr-thumbcontainer a.internal:hover { + background-color: transparent; +} + +.sphx-glr-thumbcontainer p { + margin: 0 0 .1em 0; +} +.sphx-glr-thumbcontainer .figure { + margin: 10px; + width: 160px; +} +.sphx-glr-thumbcontainer img { + display: inline; + max-height: 160px; + width: 160px; +} +.sphx-glr-thumbcontainer[tooltip]:hover:after { + background: rgba(0, 0, 0, 0.8); -webkit-border-radius: 5px; -moz-border-radius: 5px; border-radius: 5px; - float: left; - position: relative; } - div.sphx-glr-thumbContainer:hover { - box-shadow: 0 0 15px rgba(142, 176, 202, 0.5); - border: solid #B4DDFC 1px; } - div.sphx-glr-thumbContainer a.internal { - display: block; - position: absolute; - padding: 150px 10px 0px 10px; - top: 0px; - right: 0px; - bottom: 0px; - left: 0px; } - div.sphx-glr-thumbContainer p { - margin: 0 0 .1em 0; } - div.sphx-glr-thumbContainer .figure { - margin: 10px; - width: 160px; } - div.sphx-glr-thumbContainer img { - max-width: 100%; - max-height: 160px; - display: inline; } - div.sphx-glr-thumbContainer[tooltip]:hover:after { - background: rgba(0, 0, 0, 0.8); - -webkit-border-radius: 5px; - -moz-border-radius: 5px; - border-radius: 5px; - color: white; - content: attr(tooltip); - left: 95%; - padding: 5px 15px; - position: absolute; - z-index: 98; - width: 220px; - bottom: 52%; } - div.sphx-glr-thumbContainer[tooltip]:hover:before { - content: ""; - position: absolute; - z-index: 99; - border: solid; - border-color: #333 transparent; - border-width: 18px 0px 0px 20px; - left: 85%; - bottom: 58%; } - -div.sphx-glr-script-out div.highlight { - background-color: transparent; + color: #fff; + content: attr(tooltip); + left: 95%; + padding: 5px 15px; + position: absolute; + z-index: 98; + width: 220px; + bottom: 52%; +} +.sphx-glr-thumbcontainer[tooltip]:hover:before { + border: solid; + border-color: #333 transparent; + border-width: 18px 0 0 20px; + bottom: 58%; + content: ''; + left: 85%; + position: absolute; + z-index: 99; } -p.sphx-glr-script-out { - margin: -.9ex 0ex; - color: #888; +.highlight-pytb pre { + background-color: #ffe4e4; + border: 1px solid #f66; + margin-top: 10px; + padding: 7px; } -.sphx-glr-script-out pre { - overflow: auto; - word-break: break-word; +.sphx-glr-script-out { + color: #888; + margin: 0; +} +.sphx-glr-script-out .highlight { + background-color: transparent; + margin-left: 2.5em; + margin-top: -1.4em; +} +.sphx-glr-script-out .highlight pre { + background-color: #fafae2; + border: 0; max-height: 30em; - background-color: #FAFAE2; - border: none; - margin-left: 1ex; - margin-top: 0px; + overflow: auto; padding-left: 1ex; + margin: 0px; + word-break: break-word; } - -p.sphx-glr-horizontal { - margin-bottom: 0px; +.sphx-glr-script-out + p { + margin-top: 1.8em; +} +blockquote.sphx-glr-script-out { + margin-left: 0pt; } -/* Paragraph following an output are a bit more indented */ -blockquote.sphx-glr-script-out+p { - margin-top: 1.8em; +div.sphx-glr-footer { + text-align: center; } div.sphx-glr-download { - padding: 1ex; - margin: 1em auto 1ex auto; - border-radius: 4px; - max-width: 45ex; - background-color: #ffc; - border: 1px solid #C2C22D; + display: inline-block; + margin: 1em auto 1ex 2ex; + vertical-align: middle; } div.sphx-glr-download a { - color: #4B4600; + background-color: #ffc; + background-image: linear-gradient(to bottom, #FFC, #d5d57e); + border-radius: 4px; + border: 1px solid #c2c22d; + color: #000; + display: inline-block; + /* Not valid in old browser, hence we keep the line above to override */ + display: table-caption; + font-weight: bold; + padding: 1ex; + text-align: center; +} + +/* The last child of a download button is the file name */ +div.sphx-glr-download a span:last-child { + font-size: smaller; +} + +@media (min-width: 20em) { + div.sphx-glr-download a { + min-width: 10em; + } +} + +@media (min-width: 30em) { + div.sphx-glr-download a { + min-width: 13em; + } +} + +@media (min-width: 40em) { + div.sphx-glr-download a { + min-width: 16em; + } +} + + +div.sphx-glr-download code.download { + display: inline-block; + white-space: normal; + word-break: normal; + overflow-wrap: break-word; + /* border and background are given by the enclosing 'a' */ + border: none; + background: none; +} + +div.sphx-glr-download a:hover { + box-shadow: inset 0 1px 0 rgba(255,255,255,.1), 0 1px 5px rgba(0,0,0,.25); + text-decoration: none; + background-image: none; + background-color: #d5d57e; } ul.sphx-glr-horizontal { - padding: 0px; - list-style: none; } - ul.sphx-glr-horizontal li { - display: inline; } - ul.sphx-glr-horizontal img { - height: auto !important; } + list-style: none; + padding: 0; +} +ul.sphx-glr-horizontal li { + display: inline; +} +ul.sphx-glr-horizontal img { + height: auto !important; +} -/*# sourceMappingURL=gallery.css.map */ +p.sphx-glr-signature a.reference.external { + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + border-radius: 5px; + padding: 3px; + font-size: 75%; + text-align: right; + margin-left: auto; + display: table; +} diff --git a/doc/sphinxext/sphinx_gallery/backreferences.py b/doc/sphinxext/sphinx_gallery/backreferences.py index 4df5d3df61..32e4dd913f 100644 --- a/doc/sphinxext/sphinx_gallery/backreferences.py +++ b/doc/sphinxext/sphinx_gallery/backreferences.py @@ -2,11 +2,10 @@ # Author: Óscar Nájera # License: 3-clause BSD """ -======================== Backreferences Generator ======================== -Reviews generated example files in order to keep track of used modules +Parses example file code in order to keep track of used functions """ from __future__ import print_function @@ -75,7 +74,7 @@ def get_short_module_name(module_name, obj_name): short_name = '.'.join(parts[:i]) try: exec('from %s import %s' % (short_name, obj_name)) - except ImportError: + except Exception: # libraries can throw all sorts of exceptions... # get the last working module name short_name = '.'.join(parts[:(i + 1)]) break @@ -97,13 +96,22 @@ def identify_names(code): e.HelloWorld HelloWorld d d """ finder = NameFinder() - finder.visit(ast.parse(code)) + try: + finder.visit(ast.parse(code)) + except SyntaxError: + return {} example_code_obj = {} for name, full_name in finder.get_mapping(): # name is as written in file (e.g. np.asarray) # full_name includes resolved import path (e.g. numpy.asarray) - module, attribute = full_name.rsplit('.', 1) + splitted = full_name.rsplit('.', 1) + if len(splitted) == 1: + # module without attribute. This is not useful for + # backreferences + continue + + module, attribute = splitted # get shortened module name module_short = get_short_module_name(module, attribute) cobj = {'name': attribute, 'module': module, @@ -130,7 +138,7 @@ def scan_used_functions(example_file, gallery_conf): THUMBNAIL_TEMPLATE = """ .. raw:: html -

+
.. only:: html @@ -154,6 +162,10 @@ def _thumbnail_div(full_dir, fname, snippet, is_backref=False): """Generates RST to place a thumbnail in a gallery""" thumb = os.path.join(full_dir, 'images', 'thumb', 'sphx_glr_%s_thumb.png' % fname[:-3]) + + # Inside rst files forward slash defines paths + thumb = thumb.replace(os.sep, "/") + ref_name = os.path.join(full_dir, fname).replace(os.path.sep, '_') template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATE @@ -164,10 +176,15 @@ def write_backreferences(seen_backrefs, gallery_conf, target_dir, fname, snippet): """Writes down back reference files, which include a thumbnail list of examples using a certain module""" + if gallery_conf['backreferences_dir'] is None: + return + example_file = os.path.join(target_dir, fname) + build_target_dir = os.path.relpath(target_dir, gallery_conf['src_dir']) backrefs = scan_used_functions(example_file, gallery_conf) for backref in backrefs: - include_path = os.path.join(gallery_conf['mod_example_dir'], + include_path = os.path.join(gallery_conf['src_dir'], + gallery_conf['backreferences_dir'], '%s.examples' % backref) seen = backref in seen_backrefs with open(include_path, 'a' if seen else 'w') as ex_file: @@ -175,6 +192,6 @@ def write_backreferences(seen_backrefs, gallery_conf, heading = '\n\nExamples using ``%s``' % backref ex_file.write(heading + '\n') ex_file.write('^' * len(heading) + '\n') - ex_file.write(_thumbnail_div(target_dir, fname, snippet, + ex_file.write(_thumbnail_div(build_target_dir, fname, snippet, is_backref=True)) seen_backrefs.add(backref) diff --git a/doc/sphinxext/sphinx_gallery/docs_resolv.py b/doc/sphinxext/sphinx_gallery/docs_resolv.py index fb596fdb1f..762298cbe2 100644 --- a/doc/sphinxext/sphinx_gallery/docs_resolv.py +++ b/doc/sphinxext/sphinx_gallery/docs_resolv.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- # Author: Óscar Nájera # License: 3-clause BSD -############################################################################### -# Documentation link resolver objects +""" +Link resolver objects +===================== +""" from __future__ import print_function import gzip import os @@ -10,21 +12,25 @@ import re import shelve import sys +from distutils.version import LooseVersion + +import sphinx +from sphinx.util.console import fuchsia # Try Python 2 first, otherwise load from Python 3 try: - from StringIO import StringIO import cPickle as pickle import urllib2 as urllib from urllib2 import HTTPError, URLError except ImportError: - from io import StringIO import pickle import urllib.request import urllib.error import urllib.parse from urllib.error import HTTPError, URLError +from io import StringIO + def _get_data(url): """Helper function to get data over http or from a local file""" @@ -232,22 +238,34 @@ def _get_link(self, cobj): fname_idx = value[cobj['name']][0] if fname_idx is not None: - fname = self._searchindex['filenames'][fname_idx] + '.html' - - if self._is_windows: - fname = fname.replace('/', '\\') - link = os.path.join(self.doc_url, fname) - else: - link = posixpath.join(self.doc_url, fname) - - if hasattr(link, 'decode'): - link = link.decode('utf-8', 'replace') - - if link in self._page_cache: - html = self._page_cache[link] + fname = self._searchindex['filenames'][fname_idx] + # In 1.5+ Sphinx seems to have changed from .rst.html to only + # .html extension in converted files. But URLs could be + # built with < 1.5 or >= 1.5 regardless of what we're currently + # building with, so let's just check both :( + fnames = [fname + '.html', os.path.splitext(fname)[0] + '.html'] + for fname in fnames: + try: + if self._is_windows: + fname = fname.replace('/', '\\') + link = os.path.join(self.doc_url, fname) + else: + link = posixpath.join(self.doc_url, fname) + + if hasattr(link, 'decode'): + link = link.decode('utf-8', 'replace') + + if link in self._page_cache: + html = self._page_cache[link] + else: + html = get_data(link, self.gallery_dir) + self._page_cache[link] = html + except (HTTPError, URLError, IOError): + pass + else: + break else: - html = get_data(link, self.gallery_dir) - self._page_cache[link] = html + raise # test if cobj appears in page comb_names = [cobj['module_short'] + '.' + cobj['name']] @@ -279,7 +297,7 @@ def resolve(self, cobj, this_url): cobj : dict Dict with information about the "code object" for which we are resolving a link. - cobi['name'] : function or class name (str) + cobj['name'] : function or class name (str) cobj['module_short'] : shortened module name (str) cobj['module'] : module name (str) this_url: str @@ -319,16 +337,17 @@ def _embed_code_links(app, gallery_conf, gallery_dir): # Add resolvers for the packages for which we want to show links doc_resolvers = {} + src_gallery_dir = os.path.join(app.builder.srcdir, gallery_dir) for this_module, url in gallery_conf['reference_url'].items(): try: if url is None: doc_resolvers[this_module] = SphinxDocLinkResolver( app.builder.outdir, - gallery_dir, + src_gallery_dir, relative=True) else: doc_resolvers[this_module] = SphinxDocLinkResolver(url, - gallery_dir) + src_gallery_dir) except HTTPError as e: print("The following HTTP Error has occurred:\n") @@ -345,64 +364,82 @@ def _embed_code_links(app, gallery_conf, gallery_dir): gallery_dir)) # patterns for replacement - link_pattern = '%s' + link_pattern = ('%s') orig_pattern = '%s' period = '.' - for dirpath, _, filenames in os.walk(html_gallery_dir): - for fname in filenames: - print('\tprocessing: %s' % fname) - full_fname = os.path.join(html_gallery_dir, dirpath, fname) - subpath = dirpath[len(html_gallery_dir) + 1:] - pickle_fname = os.path.join(gallery_dir, subpath, - fname[:-5] + '_codeobj.pickle') - - if os.path.exists(pickle_fname): - # we have a pickle file with the objects to embed links for - with open(pickle_fname, 'rb') as fid: - example_code_obj = pickle.load(fid) - fid.close() - str_repl = {} - # generate replacement strings with the links - for name, cobj in example_code_obj.items(): - this_module = cobj['module'].split('.')[0] - - if this_module not in doc_resolvers: - continue - - try: - link = doc_resolvers[this_module].resolve(cobj, - full_fname) - except (HTTPError, URLError) as e: - print("The following error has occurred:\n") - print(repr(e)) - continue - - if link is not None: - parts = name.split('.') - name_html = period.join(orig_pattern % part - for part in parts) - str_repl[name_html] = link_pattern % (link, name_html) - # do the replacement in the html file - - # ensure greediness - names = sorted(str_repl, key=len, reverse=True) - expr = re.compile(r'(? 0: - with open(full_fname, 'rb') as fid: - lines_in = fid.readlines() - with open(full_fname, 'wb') as fid: - for line in lines_in: - line = line.decode('utf-8') - line = expr.sub(substitute_link, line) - fid.write(line.encode('utf-8')) - print('[done]') + # This could be turned into a generator if necessary, but should be okay + flat = [[dirpath, filename] + for dirpath, _, filenames in os.walk(html_gallery_dir) + for filename in filenames] + if LooseVersion(sphinx.__version__) >= LooseVersion('1.6'): + # It will be removed once upgraded to new sphinx-gallery version + from sphinx.util import status_iterator + iterator = status_iterator( + flat, os.path.basename(html_gallery_dir), color='fuchsia', + length=len(flat), stringify_func=lambda x: os.path.basename(x[1])) + else: + iterator = app.status_iterator( + flat, os.path.basename(html_gallery_dir), colorfunc=fuchsia, + length=len(flat), stringify_func=lambda x: os.path.basename(x[1])) + + for dirpath, fname in iterator: + full_fname = os.path.join(html_gallery_dir, dirpath, fname) + subpath = dirpath[len(html_gallery_dir) + 1:] + pickle_fname = os.path.join(src_gallery_dir, subpath, + fname[:-5] + '_codeobj.pickle') + + if os.path.exists(pickle_fname): + # we have a pickle file with the objects to embed links for + with open(pickle_fname, 'rb') as fid: + example_code_obj = pickle.load(fid) + fid.close() + str_repl = {} + # generate replacement strings with the links + for name, cobj in example_code_obj.items(): + this_module = cobj['module'].split('.')[0] + + if this_module not in doc_resolvers: + continue + + try: + link = doc_resolvers[this_module].resolve(cobj, + full_fname) + except (HTTPError, URLError) as e: + if isinstance(e, HTTPError): + extra = e.code + else: + extra = e.reason + print("\n\t\tError resolving %s.%s: %r (%s)" + % (cobj['module'], cobj['name'], e, extra)) + continue + + if link is not None: + parts = name.split('.') + name_html = period.join(orig_pattern % part + for part in parts) + full_function_name = '%s.%s' % ( + cobj['module'], cobj['name']) + str_repl[name_html] = link_pattern % ( + link, full_function_name, name_html) + # do the replacement in the html file + + # ensure greediness + names = sorted(str_repl, key=len, reverse=True) + regex_str = '|'.join(re.escape(name) for name in names) + regex = re.compile(regex_str) + + def substitute_link(match): + return str_repl[match.group()] + + if len(str_repl) > 0: + with open(full_fname, 'rb') as fid: + lines_in = fid.readlines() + with open(full_fname, 'wb') as fid: + for line in lines_in: + line = line.decode('utf-8') + line = regex.sub(substitute_link, line) + fid.write(line.encode('utf-8')) def embed_code_links(app, exception): diff --git a/doc/sphinxext/sphinx_gallery/downloads.py b/doc/sphinxext/sphinx_gallery/downloads.py new file mode 100644 index 0000000000..6b5b3df17f --- /dev/null +++ b/doc/sphinxext/sphinx_gallery/downloads.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +r""" +Utilities for downloadable items +================================ + +""" +# Author: Óscar Nájera +# License: 3-clause BSD + +from __future__ import absolute_import, division, print_function + +import os +import zipfile + +CODE_DOWNLOAD = """ +\n.. container:: sphx-glr-footer + +\n .. container:: sphx-glr-download + + :download:`Download Python source code: {0} <{0}>`\n + +\n .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: {1} <{1}>`\n""" + +CODE_ZIP_DOWNLOAD = """ +\n.. container:: sphx-glr-footer + +\n .. container:: sphx-glr-download + + :download:`Download all examples in Python source code: {0} `\n + +\n .. container:: sphx-glr-download + + :download:`Download all examples in Jupyter notebooks: {2} `\n""" + + +def python_zip(file_list, gallery_path, extension='.py'): + """Stores all files in file_list into an zip file + + Parameters + ---------- + file_list : list of strings + Holds all the file names to be included in zip file + gallery_path : string + path to where the zipfile is stored + extension : str + '.py' or '.ipynb' In order to deal with downloads of python + sources and jupyter notebooks the file extension from files in + file_list will be removed and replace with the value of this + variable while generating the zip file + Returns + ------- + zipname : string + zip file name, written as `target_dir_{python,jupyter}.zip` + depending on the extension + """ + zipname = os.path.basename(gallery_path) + zipname += '_python' if extension == '.py' else '_jupyter' + zipname = os.path.join(gallery_path, zipname + '.zip') + + zipf = zipfile.ZipFile(zipname, mode='w') + for fname in file_list: + file_src = os.path.splitext(fname)[0] + extension + zipf.write(file_src, os.path.relpath(file_src, gallery_path)) + zipf.close() + + return zipname + + +def list_downloadable_sources(target_dir): + """Returns a list of python source files is target_dir + + Parameters + ---------- + target_dir : string + path to the directory where python source file are + Returns + ------- + list + list of paths to all Python source files in `target_dir` + """ + return [os.path.join(target_dir, fname) + for fname in os.listdir(target_dir) + if fname.endswith('.py')] + + +def generate_zipfiles(gallery_dir): + """ + Collects all Python source files and Jupyter notebooks in + gallery_dir and makes zipfiles of them + + Parameters + ---------- + gallery_dir : string + path of the gallery to collect downloadable sources + + Return + ------ + download_rst: string + RestructuredText to include download buttons to the generated files + """ + + listdir = list_downloadable_sources(gallery_dir) + for directory in sorted(os.listdir(gallery_dir)): + if os.path.isdir(os.path.join(gallery_dir, directory)): + target_dir = os.path.join(gallery_dir, directory) + listdir.extend(list_downloadable_sources(target_dir)) + + py_zipfile = python_zip(listdir, gallery_dir) + jy_zipfile = python_zip(listdir, gallery_dir, ".ipynb") + + def rst_path(filepath): + return filepath.replace(os.sep, '/') + + dw_rst = CODE_ZIP_DOWNLOAD.format(os.path.basename(py_zipfile), + rst_path(py_zipfile), + os.path.basename(jy_zipfile), + rst_path(jy_zipfile)) + return dw_rst diff --git a/doc/sphinxext/sphinx_gallery/gen_gallery.py b/doc/sphinxext/sphinx_gallery/gen_gallery.py index 8e58b62362..3cfb028576 100644 --- a/doc/sphinxext/sphinx_gallery/gen_gallery.py +++ b/doc/sphinxext/sphinx_gallery/gen_gallery.py @@ -2,7 +2,6 @@ # Author: Óscar Nájera # License: 3-clause BSD """ -======================== Sphinx-Gallery Generator ======================== @@ -12,13 +11,39 @@ from __future__ import division, print_function, absolute_import +import copy +import re import os + from . import glr_path_static -from .gen_rst import generate_dir_rst +from .gen_rst import generate_dir_rst, SPHX_GLR_SIG from .docs_resolv import embed_code_links +from .downloads import generate_zipfiles + +try: + FileNotFoundError +except NameError: + # Python2 + FileNotFoundError = IOError + +DEFAULT_GALLERY_CONF = { + 'filename_pattern': re.escape(os.sep) + 'plot', + 'examples_dirs': os.path.join('..', 'examples'), + 'gallery_dirs': 'auto_examples', + 'backreferences_dir': None, + 'doc_module': (), + 'reference_url': {}, + # build options + 'plot_gallery': True, + 'download_all_examples': True, + 'abort_on_example_error': False, + 'failing_examples': {}, + 'expected_failing_examples': set(), +} def clean_gallery_out(build_dir): + """Deletes images under the sphx_glr namespace in the build directory""" # Sphinx hack: sphinx copies generated images to the build directory # each time the docs are made. If the desired image name already # exists, it appends a digit to prevent overwrites. The problem is, @@ -29,10 +54,11 @@ def clean_gallery_out(build_dir): # was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html # # The following is a hack that prevents this behavior by clearing the - # image build directory each time the docs are built. If sphinx - # changes their layout between versions, this will not work (though - # it should probably not cause a crash). Tested successfully - # on Sphinx 1.0.7 + # image build directory from gallery images each time the docs are built. + # If sphinx changes their layout between versions, this will not + # work (though it should probably not cause a crash). + # Tested successfully on Sphinx 1.0.7 + build_image_dir = os.path.join(build_dir, '_images') if os.path.exists(build_image_dir): filelist = os.listdir(build_image_dir) @@ -41,28 +67,66 @@ def clean_gallery_out(build_dir): os.remove(os.path.join(build_image_dir, filename)) -def generate_gallery_rst(app): - """Generate the Main examples gallery reStructuredText - - Start the sphinx-gallery configuration and recursively scan the examples - directories in order to populate the examples gallery - """ +def parse_config(app): + """Process the Sphinx Gallery configuration""" + # TODO: Test this behavior. try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) + gallery_conf = copy.deepcopy(DEFAULT_GALLERY_CONF) gallery_conf.update(app.config.sphinx_gallery_conf) + gallery_conf.update(plot_gallery=plot_gallery) + gallery_conf.update( + abort_on_example_error=app.builder.config.abort_on_example_error) + gallery_conf['src_dir'] = app.builder.srcdir + + backreferences_warning = """\n======== +Sphinx-Gallery now requires you to set the configuration variable +'backreferences_dir' in your config to activate the +backreferences. That is mini galleries clustered by the functions used +in the example scripts. Have a look at it in sphinx-gallery + +https://sphinx-gallery.readthedocs.io/en/stable/index.html#examples-using-numpy-linspace +""" + + if gallery_conf.get("mod_example_dir", False): + update_msg = """\nFor a quick fix try replacing 'mod_example_dir' +by 'backreferences_dir' in your conf.py file. If that does not solve the +present issue read carefully how to update in the online documentation + +https://sphinx-gallery.readthedocs.io/en/latest/advanced_configuration.html#references-to-examples""" + + gallery_conf['backreferences_dir'] = gallery_conf['mod_example_dir'] + app.warn("Old configuration for backreferences detected \n" + "using the configuration variable `mod_example_dir`\n" + + backreferences_warning + + update_msg, prefix="DeprecationWarning: ") + + elif gallery_conf['backreferences_dir'] is None: + no_care_msg = """ +If you don't care about this features set in your conf.py +'backreferences_dir': False\n""" + + app.warn(backreferences_warning + no_care_msg) + + gallery_conf['backreferences_dir'] = os.path.join( + 'modules', 'generated') + app.warn("using old default 'backreferences_dir':'{}'.\n" + " This will be disabled in future releases\n".format( + gallery_conf['backreferences_dir']), + prefix="DeprecationWarning: ") # this assures I can call the config in other places app.config.sphinx_gallery_conf = gallery_conf app.config.html_static_path.append(glr_path_static()) - if not plot_gallery: - return + return gallery_conf - clean_gallery_out(app.builder.outdir) +def _prepare_sphx_glr_dirs(gallery_conf, srcdir): + """Creates necessary folders for sphinx_gallery files """ examples_dirs = gallery_conf['examples_dirs'] gallery_dirs = gallery_conf['gallery_dirs'] @@ -71,56 +135,171 @@ def generate_gallery_rst(app): if not isinstance(gallery_dirs, list): gallery_dirs = [gallery_dirs] - mod_examples_dir = os.path.relpath(gallery_conf['mod_example_dir'], - app.builder.srcdir) + if bool(gallery_conf['backreferences_dir']): + backreferences_dir = os.path.join( + srcdir, gallery_conf['backreferences_dir']) + if not os.path.exists(backreferences_dir): + os.makedirs(backreferences_dir) + + return examples_dirs, gallery_dirs + + +def generate_gallery_rst(app): + """Generate the Main examples gallery reStructuredText + + Start the sphinx-gallery configuration and recursively scan the examples + directories in order to populate the examples gallery + """ + print('Generating gallery') + gallery_conf = parse_config(app) + + clean_gallery_out(app.builder.outdir) + seen_backrefs = set() + computation_times = [] + examples_dirs, gallery_dirs = _prepare_sphx_glr_dirs(gallery_conf, + app.builder.srcdir) + for examples_dir, gallery_dir in zip(examples_dirs, gallery_dirs): - examples_dir = os.path.relpath(examples_dir, - app.builder.srcdir) - gallery_dir = os.path.relpath(gallery_dir, - app.builder.srcdir) + examples_dir = os.path.join(app.builder.srcdir, examples_dir) + gallery_dir = os.path.join(app.builder.srcdir, gallery_dir) - for workdir in [examples_dir, gallery_dir, mod_examples_dir]: + for workdir in [examples_dir, gallery_dir]: if not os.path.exists(workdir): os.makedirs(workdir) + # Here we don't use an os.walk, but we recurse only twice: flat is + # better than nested. + this_fhindex, this_computation_times = generate_dir_rst( + examples_dir, gallery_dir, gallery_conf, seen_backrefs) + if this_fhindex == "": + raise FileNotFoundError("Main example directory {0} does not " + "have a README.txt file. Please write " + "one to introduce your gallery." + .format(examples_dir)) + + computation_times += this_computation_times # we create an index.rst with all examples fhindex = open(os.path.join(gallery_dir, 'index.rst'), 'w') - # Here we don't use an os.walk, but we recurse only twice: flat is - # better than nested. - fhindex.write(generate_dir_rst(examples_dir, gallery_dir, gallery_conf, - seen_backrefs)) + # :orphan: to suppress "not included in TOCTREE" sphinx warnings + fhindex.write(":orphan:\n\n" + this_fhindex) for directory in sorted(os.listdir(examples_dir)): if os.path.isdir(os.path.join(examples_dir, directory)): src_dir = os.path.join(examples_dir, directory) target_dir = os.path.join(gallery_dir, directory) - fhindex.write(generate_dir_rst(src_dir, target_dir, - gallery_conf, - seen_backrefs)) + this_fhindex, this_computation_times = generate_dir_rst(src_dir, target_dir, gallery_conf, + seen_backrefs) + fhindex.write(this_fhindex) + computation_times += this_computation_times + + if gallery_conf['download_all_examples']: + download_fhindex = generate_zipfiles(gallery_dir) + fhindex.write(download_fhindex) + + fhindex.write(SPHX_GLR_SIG) fhindex.flush() + if gallery_conf['plot_gallery']: + print("Computation time summary:") + for time_elapsed, fname in sorted(computation_times)[::-1]: + if time_elapsed is not None: + print("\t- %s : %.2g sec" % (fname, time_elapsed)) + else: + print("\t- %s : not run" % fname) -gallery_conf = { - 'examples_dirs': '../examples', - 'gallery_dirs': 'auto_examples', - 'mod_example_dir': 'modules/generated', - 'doc_module': (), - 'reference_url': {}, -} + +def touch_empty_backreferences(app, what, name, obj, options, lines): + """Generate empty back-reference example files + + This avoids inclusion errors/warnings if there are no gallery + examples for a class / module that is being parsed by autodoc""" + + if not bool(app.config.sphinx_gallery_conf['backreferences_dir']): + return + + examples_path = os.path.join(app.srcdir, + app.config.sphinx_gallery_conf[ + "backreferences_dir"], + "%s.examples" % name) + + if not os.path.exists(examples_path): + # touch file + open(examples_path, 'w').close() + + +def sumarize_failing_examples(app, exception): + """Collects the list of falling examples during build and prints them with the traceback + + Raises ValueError if there where failing examples + """ + if exception is not None: + return + + # Under no-plot Examples are not run so nothing to summarize + if not app.config.sphinx_gallery_conf['plot_gallery']: + return + + gallery_conf = app.config.sphinx_gallery_conf + failing_examples = set(gallery_conf['failing_examples'].keys()) + expected_failing_examples = set([os.path.normpath(os.path.join(app.srcdir, path)) + for path in + gallery_conf['expected_failing_examples']]) + + examples_expected_to_fail = failing_examples.intersection( + expected_failing_examples) + expected_fail_msg = [] + if examples_expected_to_fail: + expected_fail_msg.append("\n\nExamples failing as expected:") + for fail_example in examples_expected_to_fail: + expected_fail_msg.append(fail_example + ' failed leaving traceback:\n' + + gallery_conf['failing_examples'][fail_example] + '\n') + print("\n".join(expected_fail_msg)) + + examples_not_expected_to_fail = failing_examples.difference( + expected_failing_examples) + fail_msgs = [] + if examples_not_expected_to_fail: + fail_msgs.append("Unexpected failing examples:") + for fail_example in examples_not_expected_to_fail: + fail_msgs.append(fail_example + ' failed leaving traceback:\n' + + gallery_conf['failing_examples'][fail_example] + '\n') + + examples_not_expected_to_pass = expected_failing_examples.difference( + failing_examples) + if examples_not_expected_to_pass: + fail_msgs.append("Examples expected to fail, but not failling:\n" + + "Please remove these examples from\n" + + "sphinx_gallery_conf['expected_failing_examples']\n" + + "in your conf.py file" + "\n".join(examples_not_expected_to_pass)) + + if fail_msgs: + raise ValueError("Here is a summary of the problems encountered when " + "running the examples\n\n" + "\n".join(fail_msgs) + + "\n" + "-" * 79) + + +def get_default_config_value(key): + def default_getter(conf): + return conf['sphinx_gallery_conf'].get(key, DEFAULT_GALLERY_CONF[key]) + return default_getter def setup(app): """Setup sphinx-gallery sphinx extension""" - app.add_config_value('plot_gallery', True, 'html') - app.add_config_value('sphinx_gallery_conf', gallery_conf, 'html') + app.add_config_value('sphinx_gallery_conf', DEFAULT_GALLERY_CONF, 'html') + for key in ['plot_gallery', 'abort_on_example_error']: + app.add_config_value(key, get_default_config_value(key), 'html') + app.add_stylesheet('gallery.css') + # Sphinx < 1.6 calls it `_extensions`, >= 1.6 is `extensions`. + extensions_attr = '_extensions' if hasattr(app, '_extensions') else 'extensions' + if 'sphinx.ext.autodoc' in getattr(app, extensions_attr): + app.connect('autodoc-process-docstring', touch_empty_backreferences) + app.connect('builder-inited', generate_gallery_rst) + app.connect('build-finished', sumarize_failing_examples) app.connect('build-finished', embed_code_links) - - -def setup_module(): - # HACK: Stop nosetests running setup() above - pass diff --git a/doc/sphinxext/sphinx_gallery/gen_rst.py b/doc/sphinxext/sphinx_gallery/gen_rst.py index 48a04d1e82..c2a0b95545 100644 --- a/doc/sphinxext/sphinx_gallery/gen_rst.py +++ b/doc/sphinxext/sphinx_gallery/gen_rst.py @@ -2,7 +2,6 @@ # Author: Óscar Nájera # License: 3-clause BSD """ -================== RST file generator ================== @@ -12,44 +11,22 @@ Files that generate images should start with 'plot' """ +# Don't use unicode_literals here (be explicit with u"..." instead) otherwise +# tricky errors come up with exec(code_blocks, ...) calls from __future__ import division, print_function, absolute_import from time import time -import ast +import codecs +import hashlib import os import re import shutil -import traceback -import sys import subprocess +import sys +import traceback import warnings -from textwrap import dedent -from . import glr_path_static -from .backreferences import write_backreferences, _thumbnail_div # Try Python 2 first, otherwise load from Python 3 -try: - from StringIO import StringIO -except ImportError: - from io import StringIO - -try: - basestring -except NameError: - basestring = str - -try: - # make sure that the Agg backend is set before importing any - # matplotlib - import matplotlib - matplotlib.use('Agg') - import matplotlib.pyplot as plt -except ImportError: - # this script can be imported by nosetest to find tests to run: we should - # not impose the matplotlib requirement in that case. - pass - - try: # textwrap indent only exists in python 3 from textwrap import indent @@ -71,6 +48,44 @@ def prefixed_lines(): yield (prefix + line if predicate(line) else line) return ''.join(prefixed_lines()) +from io import StringIO + +# make sure that the Agg backend is set before importing any +# matplotlib +import matplotlib +matplotlib.use('agg') +matplotlib_backend = matplotlib.get_backend() + +if matplotlib_backend != 'agg': + mpl_backend_msg = ( + "Sphinx-Gallery relies on the matplotlib 'agg' backend to " + "render figures and write them to files. You are " + "currently using the {} backend. Sphinx-Gallery will " + "terminate the build now, because changing backends is " + "not well supported by matplotlib. We advise you to move " + "sphinx_gallery imports before any matplotlib-dependent " + "import. Moving sphinx_gallery imports at the top of " + "your conf.py file should fix this issue") + + raise ValueError(mpl_backend_msg.format(matplotlib_backend)) + +import matplotlib.pyplot as plt + +from . import glr_path_static +from .backreferences import write_backreferences, _thumbnail_div +from .downloads import CODE_DOWNLOAD +from .py_source_parser import (get_docstring_and_rest, + split_code_and_text_blocks) + +from .notebook import jupyter_notebook, save_notebook + +try: + basestring +except NameError: + basestring = str + unicode = str + + ############################################################################### @@ -89,14 +104,21 @@ def flush(self): self.file1.flush() self.file2.flush() + # When called from a local terminal seaborn needs it in Python3 + def isatty(self): + self.file1.isatty() -############################################################################### -CODE_DOWNLOAD = """**Total running time of the script:** -({0:.0f} minutes {1:.3f} seconds)\n\n -\n.. container:: sphx-glr-download - **Download Python source code:** :download:`{2} <{2}>`\n""" +class MixedEncodingStringIO(StringIO): + """Helper when both ASCII and unicode strings will be written""" + + def write(self, data): + if not isinstance(data, unicode): + data = data.decode('utf-8') + StringIO.write(self, data) + +############################################################################### # The following strings are used when we have several pictures: we use # an html div tag that our CSS uses to turn the lists into horizontal # lists. @@ -117,98 +139,43 @@ def flush(self): :align: center """ -CODE_OUTPUT = """.. rst-class:: sphx-glr-script-out - **Output**:\n +# This one could contain unicode +CODE_OUTPUT = u""".. rst-class:: sphx-glr-script-out - :: + Out:: {0}\n""" -def get_docstring_and_rest(filename): - """Separate `filename` content between docstring and the rest - - Strongly inspired from ast.get_docstring. - - Returns - ------- - docstring: str - docstring of `filename` - rest: str - `filename` content without the docstring - """ - with open(filename) as f: - content = f.read() - - node = ast.parse(content) - if not isinstance(node, ast.Module): - raise TypeError("This function only supports modules. " - "You provided {0}".format(node.__class__.__name__)) - if node.body and isinstance(node.body[0], ast.Expr) and \ - isinstance(node.body[0].value, ast.Str): - docstring_node = node.body[0] - docstring = docstring_node.value.s - # This get the content of the file after the docstring last line - # Note: 'maxsplit' argument is not a keyword argument in python2 - rest = content.split('\n', docstring_node.lineno)[-1] - return docstring, rest - else: - raise ValueError(('Could not find docstring in file "{0}". ' - 'A docstring is required by sphinx-gallery') - .format(filename)) - - -def split_code_and_text_blocks(source_file): - """Return list with source file separated into code and text blocks. - - Returns - ------- - blocks : list of (label, content) - List where each element is a tuple with the label ('text' or 'code'), - and content string of block. - """ - docstring, rest_of_content = get_docstring_and_rest(source_file) - - blocks = [('text', docstring)] - - pattern = re.compile( - r'(?P^#{20,}.*)\s(?P(?:^#.*\s)*)', - flags=re.M) +SPHX_GLR_SIG = """\n.. rst-class:: sphx-glr-signature - pos_so_far = 0 - for match in re.finditer(pattern, rest_of_content): - match_start_pos, match_end_pos = match.span() - code_block_content = rest_of_content[pos_so_far:match_start_pos] - text_content = match.group('text_content') - sub_pat = re.compile('^#', flags=re.M) - text_block_content = dedent(re.sub(sub_pat, '', text_content)) - if code_block_content.strip(): - blocks.append(('code', code_block_content)) - if text_block_content.strip(): - blocks.append(('text', text_block_content)) - pos_so_far = match_end_pos + `Generated by Sphinx-Gallery `_\n""" - remaining_content = rest_of_content[pos_so_far:] - if remaining_content.strip(): - blocks.append(('code', remaining_content)) - return blocks - - -def codestr2rst(codestr): +def codestr2rst(codestr, lang='python'): """Return reStructuredText code block from code string""" - code_directive = "\n.. code-block:: python\n\n" + code_directive = "\n.. code-block:: {0}\n\n".format(lang) indented_block = indent(codestr, ' ' * 4) return code_directive + indented_block -def text2string(content): - """Returns a string without the extra triple quotes""" - try: - return ast.literal_eval(content) + '\n' - except Exception: - return content +def extract_thumbnail_number(text): + """ Pull out the thumbnail image number specified in the docstring. """ + + # check whether the user has specified a specific thumbnail image + pattr = re.compile( + r"^\s*#\s*sphinx_gallery_thumbnail_number\s*=\s*([0-9]+)\s*$", + flags=re.MULTILINE) + match = pattr.search(text) + + if match is None: + # by default, use the first figure created + thumbnail_number = 1 + else: + thumbnail_number = int(match.groups()[0]) + + return thumbnail_number def extract_intro(filename): @@ -231,14 +198,29 @@ def extract_intro(filename): return first_paragraph -def _plots_are_current(src_file, image_file): - """Test existence of image file and later touch time to source script""" +def get_md5sum(src_file): + """Returns md5sum of file""" + + with open(src_file, 'rb') as src_data: + src_content = src_data.read() + + src_md5 = hashlib.md5(src_content).hexdigest() + return src_md5 + + +def md5sum_is_current(src_file): + """Checks whether src_file has the same md5 hash as the one on disk""" + + src_md5 = get_md5sum(src_file) + + src_md5_file = src_file + '.md5' + if os.path.exists(src_md5_file): + with open(src_md5_file, 'r') as file_checksum: + ref_md5 = file_checksum.read() + + return src_md5 == ref_md5 - first_image_file = image_file.format(1) - needs_replot = ( - not os.path.exists(first_image_file) or - os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime) - return not needs_replot + return False def save_figures(image_path, fig_count, gallery_conf): @@ -250,18 +232,22 @@ def save_figures(image_path, fig_count, gallery_conf): Path where plots are saved (format string which accepts figure number) fig_count : int Previous figure number count. Figure number add from this number + gallery_conf : dict + Contains the configuration of Sphinx-Gallery Returns ------- - list of strings containing the full path to each figure + images_rst : str + rst code to embed the images in the document + fig_num : int + number of figures saved """ figure_list = [] - fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers() - for fig_mngr in fig_managers: + for fig_num in plt.get_fignums(): # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. - fig = plt.figure(fig_mngr.num) + fig = plt.figure(fig_num) kwargs = {} to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: @@ -270,16 +256,16 @@ def save_figures(image_path, fig_count, gallery_conf): if to_rgba(fig_attr) != to_rgba(default_attr): kwargs[attr] = fig_attr - current_fig = image_path.format(fig_count + fig_mngr.num) + current_fig = image_path.format(fig_count + fig_num) fig.savefig(current_fig, **kwargs) figure_list.append(current_fig) if gallery_conf.get('find_mayavi_figures', False): from mayavi import mlab e = mlab.get_engine() - last_matplotlib_fig_num = len(figure_list) + last_matplotlib_fig_num = fig_count + len(figure_list) total_fig_num = last_matplotlib_fig_num + len(e.scenes) - mayavi_fig_nums = range(last_matplotlib_fig_num, total_fig_num) + mayavi_fig_nums = range(last_matplotlib_fig_num + 1, total_fig_num + 1) for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums): current_fig = image_path.format(mayavi_fig_num) @@ -289,7 +275,43 @@ def save_figures(image_path, fig_count, gallery_conf): figure_list.append(current_fig) mlab.close(all=True) - return figure_list + return figure_rst(figure_list, gallery_conf['src_dir']) + + +def figure_rst(figure_list, sources_dir): + """Given a list of paths to figures generate the corresponding rst + + Depending on whether we have one or more figures, we use a + single rst call to 'image' or a horizontal list. + + Parameters + ---------- + figure_list : list of str + Strings are the figures' absolute paths + sources_dir : str + absolute path of Sphinx documentation sources + + Returns + ------- + images_rst : str + rst code to embed the images in the document + fig_num : int + number of figures saved + """ + + figure_paths = [os.path.relpath(figure_path, sources_dir) + .replace(os.sep, '/').lstrip('/') + for figure_path in figure_list] + images_rst = "" + if len(figure_paths) == 1: + figure_name = figure_paths[0] + images_rst = SINGLE_IMAGE % figure_name + elif len(figure_paths) > 1: + images_rst = HLIST_HEADER + for figure_name in figure_paths: + images_rst += HLIST_IMAGE_TEMPLATE % figure_name + + return images_rst, len(figure_list) def scale_image(in_fname, out_fname, max_width, max_height): @@ -337,18 +359,28 @@ def scale_image(in_fname, out_fname, max_width, max_height): generated images') -def save_thumbnail(image_path, base_image_name, gallery_conf): +def save_thumbnail(image_path_template, src_file, gallery_conf): """Save the thumbnail image""" - first_image_file = image_path.format(1) - thumb_dir = os.path.join(os.path.dirname(first_image_file), 'thumb') + # read specification of the figure to display as thumbnail from main text + _, content = get_docstring_and_rest(src_file) + thumbnail_number = extract_thumbnail_number(content) + thumbnail_image_path = image_path_template.format(thumbnail_number) + + thumb_dir = os.path.join(os.path.dirname(thumbnail_image_path), 'thumb') if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) + base_image_name = os.path.splitext(os.path.basename(src_file))[0] thumb_file = os.path.join(thumb_dir, 'sphx_glr_%s_thumb.png' % base_image_name) - if os.path.exists(first_image_file): - scale_image(first_image_file, thumb_file, 400, 280) + if src_file in gallery_conf['failing_examples']: + broken_img = os.path.join(glr_path_static(), 'broken_example.png') + scale_image(broken_img, thumb_file, 200, 140) + + elif os.path.exists(thumbnail_image_path): + scale_image(thumbnail_image_path, thumb_file, 400, 280) + elif not os.path.exists(thumb_file): # create something to replace the thumbnail default_thumb_file = os.path.join(glr_path_static(), 'no_image.png') @@ -365,29 +397,38 @@ def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs): src_dir) print('Skipping this directory') print(80 * '_') - return "" # because string is an expected return type + return "", [] # because string is an expected return type + + with open(os.path.join(src_dir, 'README.txt')) as fid: + fhindex = fid.read() + # Add empty lines to avoid bug in issue #165 + fhindex += "\n\n" - fhindex = open(os.path.join(src_dir, 'README.txt')).read() if not os.path.exists(target_dir): os.makedirs(target_dir) sorted_listdir = [fname for fname in sorted(os.listdir(src_dir)) if fname.endswith('.py')] entries_text = [] + computation_times = [] + build_target_dir = os.path.relpath(target_dir, gallery_conf['src_dir']) for fname in sorted_listdir: - amount_of_code = generate_file_rst(fname, target_dir, src_dir, - gallery_conf) + amount_of_code, time_elapsed = \ + generate_file_rst(fname, target_dir, src_dir, gallery_conf) + computation_times.append((time_elapsed, fname)) new_fname = os.path.join(src_dir, fname) intro = extract_intro(new_fname) - write_backreferences(seen_backrefs, gallery_conf, - target_dir, fname, intro) - this_entry = _thumbnail_div(target_dir, fname, intro) + """ + this_entry = _thumbnail_div(build_target_dir, fname, intro) + """ .. toctree:: :hidden: - /%s/%s\n""" % (target_dir, fname[:-3]) + /%s\n""" % os.path.join(build_target_dir, fname[:-3]).replace(os.sep, '/') entries_text.append((amount_of_code, this_entry)) + if gallery_conf['backreferences_dir']: + write_backreferences(seen_backrefs, gallery_conf, + target_dir, fname, intro) + # sort to have the smallest entries in the beginning entries_text.sort() @@ -398,80 +439,118 @@ def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs): fhindex += """.. raw:: html\n
\n\n""" - return fhindex + return fhindex, computation_times -def execute_script(code_block, example_globals, image_path, fig_count, - src_file, gallery_conf): +def execute_code_block(code_block, example_globals, + block_vars, gallery_conf): """Executes the code block of the example file""" time_elapsed = 0 stdout = '' - # We need to execute the code - print('plotting code blocks in %s' % src_file) + # If example is not suitable to run, skip executing its blocks + if not block_vars['execute_script']: + return stdout, time_elapsed plt.close('all') cwd = os.getcwd() # Redirect output to stdout and orig_stdout = sys.stdout + src_file = block_vars['src_file'] try: # First cd in the original example dir, so that any file # created by the example get created in this directory os.chdir(os.path.dirname(src_file)) - my_buffer = StringIO() + my_buffer = MixedEncodingStringIO() my_stdout = Tee(sys.stdout, my_buffer) sys.stdout = my_stdout t_start = time() + # don't use unicode_literals at the top of this file or you get + # nasty errors here on Py2.7 exec(code_block, example_globals) time_elapsed = time() - t_start sys.stdout = orig_stdout my_stdout = my_buffer.getvalue().strip().expandtabs() + # raise RuntimeError if my_stdout: - stdout = CODE_OUTPUT.format(indent(my_stdout, ' ' * 4)) + stdout = CODE_OUTPUT.format(indent(my_stdout, u' ' * 4)) os.chdir(cwd) - figure_list = save_figures(image_path, fig_count, gallery_conf) - - # Depending on whether we have one or more figures, we're using a - # horizontal list or a single rst call to 'image'. - if len(figure_list) == 1: - figure_name = figure_list[0] - image_list = SINGLE_IMAGE % figure_name.lstrip('/') - else: - image_list = HLIST_HEADER - for figure_name in figure_list: - image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') + images_rst, fig_num = save_figures(block_vars['image_path'], + block_vars['fig_count'], gallery_conf) except Exception: - figure_list = [] - image_list = '%s is not compiling:' % src_file - print(80 * '_') - print(image_list) - traceback.print_exc() - print(80 * '_') + formatted_exception = traceback.format_exc() + + fail_example_warning = 80 * '_' + '\n' + \ + '%s failed to execute correctly:' % src_file + \ + formatted_exception + 80 * '_' + '\n' + warnings.warn(fail_example_warning) + + fig_num = 0 + images_rst = codestr2rst(formatted_exception, lang='pytb') + + # Breaks build on first example error + # XXX This check can break during testing e.g. if you uncomment the + # `raise RuntimeError` by the `my_stdout` call, maybe use `.get()`? + if gallery_conf['abort_on_example_error']: + raise + # Stores failing file + gallery_conf['failing_examples'][src_file] = formatted_exception + block_vars['execute_script'] = False + finally: os.chdir(cwd) sys.stdout = orig_stdout - print(" - time elapsed : %.2g sec" % time_elapsed) - code_output = "\n{0}\n\n{1}\n\n".format(image_list, stdout) + code_output = u"\n{0}\n\n{1}\n\n".format(images_rst, stdout) + block_vars['fig_count'] += fig_num + + return code_output, time_elapsed + + +def clean_modules(): + """Remove "unload" seaborn from the name space + + After a script is executed it can load a variety of setting that one + does not want to influence in other examples in the gallery.""" + + # Horrible code to 'unload' seaborn, so that it resets + # its default when is load + # Python does not support unloading of modules + # https://bugs.python.org/issue9072 + for module in list(sys.modules.keys()): + if 'seaborn' in module: + del sys.modules[module] - return code_output, time_elapsed, fig_count + len(figure_list) + # Reset Matplotlib to default + plt.rcdefaults() def generate_file_rst(fname, target_dir, src_dir, gallery_conf): - """ Generate the rst file for a given example. + """Generate the rst file for a given example. - Returns the amout of code (in characters) of the corresponding - files. + Returns + ------- + amount_of_code : int + character count of the corresponding python script in file + time_elapsed : float + seconds required to run the script """ - src_file = os.path.join(src_dir, fname) + src_file = os.path.normpath(os.path.join(src_dir, fname)) example_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, example_file) + script_blocks = split_code_and_text_blocks(src_file) + amount_of_code = sum([len(bcontent) + for blabel, bcontent in script_blocks + if blabel == 'code']) + + if md5sum_is_current(example_file): + return amount_of_code, 0 image_dir = os.path.join(target_dir, 'images') if not os.path.exists(image_dir): @@ -479,66 +558,84 @@ def generate_file_rst(fname, target_dir, src_dir, gallery_conf): base_image_name = os.path.splitext(fname)[0] image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png' - image_path = os.path.join(image_dir, image_fname) - - script_blocks = split_code_and_text_blocks(example_file) + build_image_dir = os.path.relpath(image_dir, gallery_conf['src_dir']) + image_path_template = os.path.join(image_dir, image_fname) - if _plots_are_current(src_file, image_path): - amount_of_code = sum([len(bcontent) - for blabel, bcontent in script_blocks - if blabel == 'code']) - return amount_of_code - - time_elapsed = 0 - - ref_fname = example_file.replace(os.path.sep, '_') + ref_fname = os.path.relpath(example_file, gallery_conf['src_dir']) + ref_fname = ref_fname.replace(os.path.sep, '_') example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname) - if not fname.startswith('plot'): - convert_func = dict(code=codestr2rst, text=text2string) - for blabel, bcontent in script_blocks: - example_rst += convert_func[blabel](bcontent) + '\n' - else: + filename_pattern = gallery_conf.get('filename_pattern') + execute_script = re.search(filename_pattern, src_file) and gallery_conf[ + 'plot_gallery'] + example_globals = { # A lot of examples contains 'print(__doc__)' for example in # scikit-learn so that running the example prints some useful # information. Because the docstring has been separated from # the code blocks in sphinx-gallery, __doc__ is actually # __builtin__.__doc__ in the execution context and we do not # want to print it - example_globals = {'__doc__': ''} - fig_count = 0 - # A simple example has two blocks: one for the - # example introduction/explanation and one for the code - is_example_notebook_like = len(script_blocks) > 2 - for blabel, bcontent in script_blocks: - if blabel == 'code': - code_output, rtime, fig_count = execute_script(bcontent, - example_globals, - image_path, - fig_count, - src_file, - gallery_conf) - - time_elapsed += rtime - - if is_example_notebook_like: - example_rst += codestr2rst(bcontent) + '\n' - example_rst += code_output - else: - example_rst += code_output - example_rst += codestr2rst(bcontent) + '\n' - + '__doc__': '', + # Examples may contain if __name__ == '__main__' guards + # for in example scikit-learn if the example uses multiprocessing + '__name__': '__main__', + # Don't ever support __file__: Issues #166 #212 + } + + # A simple example has two blocks: one for the + # example introduction/explanation and one for the code + is_example_notebook_like = len(script_blocks) > 2 + time_elapsed = 0 + block_vars = {'execute_script': execute_script, 'fig_count': 0, + 'image_path': image_path_template, 'src_file': src_file} + if block_vars['execute_script']: + print('Executing file %s' % src_file) + for blabel, bcontent in script_blocks: + if blabel == 'code': + code_output, rtime = execute_code_block(bcontent, + example_globals, + block_vars, + gallery_conf) + + time_elapsed += rtime + + if is_example_notebook_like: + example_rst += codestr2rst(bcontent) + '\n' + example_rst += code_output else: - example_rst += text2string(bcontent) + '\n' + example_rst += code_output + if 'sphx-glr-script-out' in code_output: + # Add some vertical space after output + example_rst += "\n\n|\n\n" + example_rst += codestr2rst(bcontent) + '\n' - amount_of_code = sum([len(bcontent) - for blabel, bcontent in script_blocks - if blabel == 'code']) + else: + example_rst += bcontent + '\n\n' + + clean_modules() - save_thumbnail(image_path, base_image_name, gallery_conf) + # Writes md5 checksum if example has build correctly + # not failed and was initially meant to run(no-plot shall not cache md5sum) + if block_vars['execute_script']: + with open(example_file + '.md5', 'w') as file_checksum: + file_checksum.write(get_md5sum(example_file)) + + save_thumbnail(image_path_template, src_file, gallery_conf) time_m, time_s = divmod(time_elapsed, 60) - with open(os.path.join(target_dir, base_image_name + '.rst'), 'w') as f: - example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname) + example_nb = jupyter_notebook(script_blocks) + save_notebook(example_nb, example_file.replace('.py', '.ipynb')) + with codecs.open(os.path.join(target_dir, base_image_name + '.rst'), + mode='w', encoding='utf-8') as f: + example_rst += "**Total running time of the script:**" \ + " ({0: .0f} minutes {1: .3f} seconds)\n\n".format( + time_m, time_s) + example_rst += CODE_DOWNLOAD.format(fname, + fname.replace('.py', '.ipynb')) + example_rst += SPHX_GLR_SIG f.write(example_rst) - return amount_of_code + + if block_vars['execute_script']: + print("{0} ran in : {1:.2g} seconds\n".format(src_file, time_elapsed)) + + return amount_of_code, time_elapsed diff --git a/doc/sphinxext/sphinx_gallery/notebook.py b/doc/sphinxext/sphinx_gallery/notebook.py new file mode 100644 index 0000000000..a0cfdbd788 --- /dev/null +++ b/doc/sphinxext/sphinx_gallery/notebook.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +r""" +Parser for Jupyter notebooks +============================ + +Class that holds the Jupyter notebook information + +""" +# Author: Óscar Nájera +# License: 3-clause BSD + +from __future__ import division, absolute_import, print_function +from functools import partial +import argparse +import json +import re +import sys +from .py_source_parser import split_code_and_text_blocks + + +def jupyter_notebook_skeleton(): + """Returns a dictionary with the elements of a Jupyter notebook""" + py_version = sys.version_info + notebook_skeleton = { + "cells": [], + "metadata": { + "kernelspec": { + "display_name": "Python " + str(py_version[0]), + "language": "python", + "name": "python" + str(py_version[0]) + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": py_version[0] + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython" + str(py_version[0]), + "version": '{0}.{1}.{2}'.format(*sys.version_info[:3]) + } + }, + "nbformat": 4, + "nbformat_minor": 0 + } + return notebook_skeleton + + +def directive_fun(match, directive): + """Helper to fill in directives""" + directive_to_alert = dict(note="info", warning="danger") + return ('

{1}

{2}

' + .format(directive_to_alert[directive], directive.capitalize(), + match.group(1).strip())) + + +def rst2md(text): + """Converts the RST text from the examples docstrigs and comments + into markdown text for the Jupyter notebooks""" + + top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M) + text = re.sub(top_heading, r'# \1', text) + + math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M) + text = re.sub(math_eq, + lambda match: r'\begin{{align}}{0}\end{{align}}'.format( + match.group(1).strip()), + text) + inline_math = re.compile(r':math:`(.+?)`', re.DOTALL) + text = re.sub(inline_math, r'$\1$', text) + + directives = ('warning', 'note') + for directive in directives: + directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^ .+)*)' + % directive, flags=re.M) + text = re.sub(directive_re, + partial(directive_fun, directive=directive), text) + + links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M) + text = re.sub(links, '', text) + + refs = re.compile(r':ref:`') + text = re.sub(refs, '`', text) + + contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n', + flags=re.M) + text = re.sub(contents, '', text) + + images = re.compile( + r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*', + flags=re.M) + text = re.sub( + images, lambda match: '![{1}]({0})\n'.format( + match.group(1).strip(), (match.group(2) or '').strip()), text) + + return text + + +def jupyter_notebook(script_blocks): + """Generate a Jupyter notebook file cell-by-cell + + Parameters + ---------- + script_blocks: list + script execution cells + """ + + work_notebook = jupyter_notebook_skeleton() + add_code_cell(work_notebook, "%matplotlib inline") + fill_notebook(work_notebook, script_blocks) + + return work_notebook + + +def add_code_cell(work_notebook, code): + """Add a code cell to the notebook + + Parameters + ---------- + code : str + Cell content + """ + + code_cell = { + "cell_type": "code", + "execution_count": None, + "metadata": {"collapsed": False}, + "outputs": [], + "source": [code.strip()] + } + work_notebook["cells"].append(code_cell) + + +def add_markdown_cell(work_notebook, text): + """Add a markdown cell to the notebook + + Parameters + ---------- + code : str + Cell content + """ + markdown_cell = { + "cell_type": "markdown", + "metadata": {}, + "source": [rst2md(text)] + } + work_notebook["cells"].append(markdown_cell) + + +def fill_notebook(work_notebook, script_blocks): + """Writes the Jupyter notebook cells + + Parameters + ---------- + script_blocks : list of tuples + """ + + for blabel, bcontent in script_blocks: + if blabel == 'code': + add_code_cell(work_notebook, bcontent) + else: + add_markdown_cell(work_notebook, bcontent + '\n') + + +def save_notebook(work_notebook, write_file): + """Saves the Jupyter work_notebook to write_file""" + with open(write_file, 'w') as out_nb: + json.dump(work_notebook, out_nb, indent=2) + + +############################################################################### +# Notebook shell utility + +def python_to_jupyter_cli(args=None, namespace=None): + """Exposes the jupyter notebook renderer to the command line + + Takes the same arguments as ArgumentParser.parse_args + """ + parser = argparse.ArgumentParser( + description='Sphinx-Gallery Notebook converter') + parser.add_argument('python_src_file', nargs='+', + help='Input Python file script to convert. ' + 'Supports multiple files and shell wildcards' + ' (e.g. *.py)') + args = parser.parse_args(args, namespace) + + for src_file in args.python_src_file: + blocks = split_code_and_text_blocks(src_file) + print('Converting {0}'.format(src_file)) + example_nb = jupyter_notebook(blocks) + save_notebook(example_nb, src_file.replace('.py', '.ipynb')) diff --git a/doc/sphinxext/sphinx_gallery/py_source_parser.py b/doc/sphinxext/sphinx_gallery/py_source_parser.py new file mode 100644 index 0000000000..d397087f99 --- /dev/null +++ b/doc/sphinxext/sphinx_gallery/py_source_parser.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +r""" +Parser for python source files +============================== +""" +# Created Sun Nov 27 14:03:07 2016 +# Author: Óscar Nájera + +from __future__ import division, absolute_import, print_function +import ast +import re +from textwrap import dedent + +SYNTAX_ERROR_DOCSTRING = """ +SyntaxError +=========== + +Example script with invalid Python syntax +""" + + +def get_docstring_and_rest(filename): + """Separate `filename` content between docstring and the rest + + Strongly inspired from ast.get_docstring. + + Returns + ------- + docstring: str + docstring of `filename` + rest: str + `filename` content without the docstring + """ + # can't use codecs.open(filename, 'r', 'utf-8') here b/c ast doesn't + # seem to work with unicode strings in Python2.7 + # "SyntaxError: encoding declaration in Unicode string" + with open(filename, 'rb') as fid: + content = fid.read() + # change from Windows format to UNIX for uniformity + content = content.replace(b'\r\n', b'\n') + + try: + node = ast.parse(content) + except SyntaxError: + return SYNTAX_ERROR_DOCSTRING, content.decode('utf-8') + + if not isinstance(node, ast.Module): + raise TypeError("This function only supports modules. " + "You provided {0}".format(node.__class__.__name__)) + if node.body and isinstance(node.body[0], ast.Expr) and \ + isinstance(node.body[0].value, ast.Str): + docstring_node = node.body[0] + docstring = docstring_node.value.s + if hasattr(docstring, 'decode'): # python2.7 + docstring = docstring.decode('utf-8') + # This get the content of the file after the docstring last line + # Note: 'maxsplit' argument is not a keyword argument in python2 + rest = content.decode('utf-8').split('\n', docstring_node.lineno)[-1] + return docstring, rest + else: + raise ValueError(('Could not find docstring in file "{0}". ' + 'A docstring is required by sphinx-gallery') + .format(filename)) + + +def split_code_and_text_blocks(source_file): + """Return list with source file separated into code and text blocks. + + Returns + ------- + blocks : list of (label, content) + List where each element is a tuple with the label ('text' or 'code'), + and content string of block. + """ + docstring, rest_of_content = get_docstring_and_rest(source_file) + blocks = [('text', docstring)] + + pattern = re.compile( + r'(?P^#{20,}.*)\s(?P(?:^#.*\s)*)', + flags=re.M) + + pos_so_far = 0 + for match in re.finditer(pattern, rest_of_content): + match_start_pos, match_end_pos = match.span() + code_block_content = rest_of_content[pos_so_far:match_start_pos] + text_content = match.group('text_content') + sub_pat = re.compile('^#', flags=re.M) + text_block_content = dedent(re.sub(sub_pat, '', text_content)).lstrip() + if code_block_content.strip(): + blocks.append(('code', code_block_content)) + if text_block_content.strip(): + blocks.append(('text', text_block_content)) + pos_so_far = match_end_pos + + remaining_content = rest_of_content[pos_so_far:] + if remaining_content.strip(): + blocks.append(('code', remaining_content)) + + return blocks diff --git a/doc/themes/nilearn/layout.html b/doc/themes/nilearn/layout.html index d4755755fc..d4c42eb9ff 100644 --- a/doc/themes/nilearn/layout.html +++ b/doc/themes/nilearn/layout.html @@ -22,7 +22,7 @@
  • User Guide
  • Examples
  • Reference
  • - + {% endblock %} @@ -105,10 +105,10 @@ for(i in sections){ if(sections[i] > pos){ break; - }; - if($('a.internal[href$="' + i + '"]').is(':visible')){ + } + if($('a.internal[href$="' + i + '"]').is(':visible')){ current_section = i; - }; + } } $('a.internal[href$="' + current_section + '"]').addClass('active'); }); @@ -156,7 +156,7 @@ ICA
  • - Nifti IO + Nifti IO
  • Datasets @@ -169,17 +169,17 @@

    Nilearn:

    Machine learning for Neuro-Imaging in Python

  • -
    - -
    @@ -195,9 +195,13 @@

    Machine learning for Neuro-Imaging in Python

    News

      -
    • Dec 13th 2015: Nilearn 0.2.1 released +

    • June 14th 2018: Nilearn 0.4.2 released +

    • +
    • March 12th 2018: Nilearn 0.4.1 released +

    • +
    • November 19th 2017: Nilearn 0.4 released

    • -
    • July 13th 2015: Nilearn 0.1.4 released +

    • June 20th 2017: Nilearn 0.3.1 released

    • March 2014: Development

    • Nilearn on GitHub

    • All material Free Software: BSD license (3 clause).

    • -
    • Authors

    • +
    • Authors

    • +
    • Contributing

    {% endif %}

    Giving credit

    {% endblock %} diff --git a/doc/themes/nilearn/static/copybutton.js b/doc/themes/nilearn/static/copybutton.js index aca108d938..925d44f743 100644 --- a/doc/themes/nilearn/static/copybutton.js +++ b/doc/themes/nilearn/static/copybutton.js @@ -3,7 +3,9 @@ $(document).ready(function() { * the >>> and ... prompts and the output and thus make the code * copyable. */ var div = $('.highlight-python .highlight,' + - '.highlight-python3 .highlight') + '.highlight-python3 .highlight,' + + '.highlight-pycon .highlight,' + + '.highlight-default .highlight'); var pre = div.find('pre'); // get the styles from the current theme @@ -12,21 +14,23 @@ $(document).ready(function() { var show_text = 'Show the prompts and output'; var border_width = pre.css('border-top-width'); var border_style = pre.css('border-top-style'); - var border_color = '#AAA'; + var border_color = pre.css('border-top-color'); var button_styles = { 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0', 'border-color': border_color, 'border-style': border_style, 'border-width': border_width, 'color': border_color, 'text-size': '75%', - 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em' - } + 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em', + 'border-radius': '0 3px 0 0' + }; // create and add the button to all the code blocks that contain >>> div.each(function(index) { var jthis = $(this); if (jthis.find('.gp').length > 0) { var button = $('>>>'); - button.css(button_styles) + button.css(button_styles); button.attr('title', hide_text); + button.data('hidden', 'false'); jthis.prepend(button); } // tracebacks (.gt) contain bare text elements that need to be @@ -37,20 +41,24 @@ $(document).ready(function() { }); // define the behavior of the button when it's clicked - $('.copybutton').toggle( - function() { - var button = $(this); + $('.copybutton').click(function(e){ + e.preventDefault(); + var button = $(this); + if (button.data('hidden') === 'false') { + // hide the code output button.parent().find('.go, .gp, .gt').hide(); button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden'); button.css('text-decoration', 'line-through'); button.attr('title', show_text); - }, - function() { - var button = $(this); + button.data('hidden', 'true'); + } else { + // show the code output button.parent().find('.go, .gp, .gt').show(); button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible'); button.css('text-decoration', 'none'); button.attr('title', hide_text); - }); + button.data('hidden', 'false'); + } + }); }); diff --git a/doc/themes/nilearn/static/jquery.js b/doc/themes/nilearn/static/jquery.js deleted file mode 100644 index 16ad06c5ac..0000000000 --- a/doc/themes/nilearn/static/jquery.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! jQuery v1.7.2 jquery.com | jquery.org/license */ -(function(a,b){function cy(a){return f.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function cu(a){if(!cj[a]){var b=c.body,d=f("<"+a+">").appendTo(b),e=d.css("display");d.remove();if(e==="none"||e===""){ck||(ck=c.createElement("iframe"),ck.frameBorder=ck.width=ck.height=0),b.appendChild(ck);if(!cl||!ck.createElement)cl=(ck.contentWindow||ck.contentDocument).document,cl.write((f.support.boxModel?"":"")+""),cl.close();d=cl.createElement(a),cl.body.appendChild(d),e=f.css(d,"display"),b.removeChild(ck)}cj[a]=e}return cj[a]}function ct(a,b){var c={};f.each(cp.concat.apply([],cp.slice(0,b)),function(){c[this]=a});return c}function cs(){cq=b}function cr(){setTimeout(cs,0);return cq=f.now()}function ci(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}function ch(){try{return new a.XMLHttpRequest}catch(b){}}function cb(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var d=a.dataTypes,e={},g,h,i=d.length,j,k=d[0],l,m,n,o,p;for(g=1;g0){if(c!=="border")for(;e=0===c})}function S(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function K(){return!0}function J(){return!1}function n(a,b,c){var d=b+"defer",e=b+"queue",g=b+"mark",h=f._data(a,d);h&&(c==="queue"||!f._data(a,e))&&(c==="mark"||!f._data(a,g))&&setTimeout(function(){!f._data(a,e)&&!f._data(a,g)&&(f.removeData(a,d,!0),h.fire())},0)}function m(a){for(var b in a){if(b==="data"&&f.isEmptyObject(a[b]))continue;if(b!=="toJSON")return!1}return!0}function l(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(k,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:f.isNumeric(d)?+d:j.test(d)?f.parseJSON(d):d}catch(g){}f.data(a,c,d)}else d=b}return d}function h(a){var b=g[a]={},c,d;a=a.split(/\s+/);for(c=0,d=a.length;c)[^>]*$|#([\w\-]*)$)/,j=/\S/,k=/^\s+/,l=/\s+$/,m=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,n=/^[\],:{}\s]*$/,o=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,p=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,q=/(?:^|:|,)(?:\s*\[)+/g,r=/(webkit)[ \/]([\w.]+)/,s=/(opera)(?:.*version)?[ \/]([\w.]+)/,t=/(msie) ([\w.]+)/,u=/(mozilla)(?:.*? rv:([\w.]+))?/,v=/-([a-z]|[0-9])/ig,w=/^-ms-/,x=function(a,b){return(b+"").toUpperCase()},y=d.userAgent,z,A,B,C=Object.prototype.toString,D=Object.prototype.hasOwnProperty,E=Array.prototype.push,F=Array.prototype.slice,G=String.prototype.trim,H=Array.prototype.indexOf,I={};e.fn=e.prototype={constructor:e,init:function(a,d,f){var g,h,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!d&&c.body){this.context=c,this[0]=c.body,this.selector=a,this.length=1;return this}if(typeof a=="string"){a.charAt(0)!=="<"||a.charAt(a.length-1)!==">"||a.length<3?g=i.exec(a):g=[null,a,null];if(g&&(g[1]||!d)){if(g[1]){d=d instanceof e?d[0]:d,k=d?d.ownerDocument||d:c,j=m.exec(a),j?e.isPlainObject(d)?(a=[c.createElement(j[1])],e.fn.attr.call(a,d,!0)):a=[k.createElement(j[1])]:(j=e.buildFragment([g[1]],[k]),a=(j.cacheable?e.clone(j.fragment):j.fragment).childNodes);return e.merge(this,a)}h=c.getElementById(g[2]);if(h&&h.parentNode){if(h.id!==g[2])return f.find(a);this.length=1,this[0]=h}this.context=c,this.selector=a;return this}return!d||d.jquery?(d||f).find(a):this.constructor(d).find(a)}if(e.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return e.makeArray(a,this)},selector:"",jquery:"1.7.2",length:0,size:function(){return this.length},toArray:function(){return F.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=this.constructor();e.isArray(a)?E.apply(d,a):e.merge(d,a),d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")");return d},each:function(a,b){return e.each(this,a,b)},ready:function(a){e.bindReady(),A.add(a);return this},eq:function(a){a=+a;return a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(F.apply(this,arguments),"slice",F.call(arguments).join(","))},map:function(a){return this.pushStack(e.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:E,sort:[].sort,splice:[].splice},e.fn.init.prototype=e.fn,e.extend=e.fn.extend=function(){var a,c,d,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i=="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!="object"&&!e.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j0)return;A.fireWith(c,[e]),e.fn.trigger&&e(c).trigger("ready").off("ready")}},bindReady:function(){if(!A){A=e.Callbacks("once memory");if(c.readyState==="complete")return setTimeout(e.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",B,!1),a.addEventListener("load",e.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",B),a.attachEvent("onload",e.ready);var b=!1;try{b=a.frameElement==null}catch(d){}c.documentElement.doScroll&&b&&J()}}},isFunction:function(a){return e.type(a)==="function"},isArray:Array.isArray||function(a){return e.type(a)==="array"},isWindow:function(a){return a!=null&&a==a.window},isNumeric:function(a){return!isNaN(parseFloat(a))&&isFinite(a)},type:function(a){return a==null?String(a):I[C.call(a)]||"object"},isPlainObject:function(a){if(!a||e.type(a)!=="object"||a.nodeType||e.isWindow(a))return!1;try{if(a.constructor&&!D.call(a,"constructor")&&!D.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||D.call(a,d)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw new Error(a)},parseJSON:function(b){if(typeof b!="string"||!b)return null;b=e.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(n.test(b.replace(o,"@").replace(p,"]").replace(q,"")))return(new Function("return "+b))();e.error("Invalid JSON: "+b)},parseXML:function(c){if(typeof c!="string"||!c)return null;var d,f;try{a.DOMParser?(f=new DOMParser,d=f.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(g){d=b}(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&e.error("Invalid XML: "+c);return d},noop:function(){},globalEval:function(b){b&&j.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(w,"ms-").replace(v,x)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,d){var f,g=0,h=a.length,i=h===b||e.isFunction(a);if(d){if(i){for(f in a)if(c.apply(a[f],d)===!1)break}else for(;g0&&a[0]&&a[j-1]||j===0||e.isArray(a));if(k)for(;i1?i.call(arguments,0):b,j.notifyWith(k,e)}}function l(a){return function(c){b[a]=arguments.length>1?i.call(arguments,0):c,--g||j.resolveWith(j,b)}}var b=i.call(arguments,0),c=0,d=b.length,e=Array(d),g=d,h=d,j=d<=1&&a&&f.isFunction(a.promise)?a:f.Deferred(),k=j.promise();if(d>1){for(;c
    a",d=p.getElementsByTagName("*"),e=p.getElementsByTagName("a")[0];if(!d||!d.length||!e)return{};g=c.createElement("select"),h=g.appendChild(c.createElement("option")),i=p.getElementsByTagName("input")[0],b={leadingWhitespace:p.firstChild.nodeType===3,tbody:!p.getElementsByTagName("tbody").length,htmlSerialize:!!p.getElementsByTagName("link").length,style:/top/.test(e.getAttribute("style")),hrefNormalized:e.getAttribute("href")==="/a",opacity:/^0.55/.test(e.style.opacity),cssFloat:!!e.style.cssFloat,checkOn:i.value==="on",optSelected:h.selected,getSetAttribute:p.className!=="t",enctype:!!c.createElement("form").enctype,html5Clone:c.createElement("nav").cloneNode(!0).outerHTML!=="<:nav>",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,pixelMargin:!0},f.boxModel=b.boxModel=c.compatMode==="CSS1Compat",i.checked=!0,b.noCloneChecked=i.cloneNode(!0).checked,g.disabled=!0,b.optDisabled=!h.disabled;try{delete p.test}catch(r){b.deleteExpando=!1}!p.addEventListener&&p.attachEvent&&p.fireEvent&&(p.attachEvent("onclick",function(){b.noCloneEvent=!1}),p.cloneNode(!0).fireEvent("onclick")),i=c.createElement("input"),i.value="t",i.setAttribute("type","radio"),b.radioValue=i.value==="t",i.setAttribute("checked","checked"),i.setAttribute("name","t"),p.appendChild(i),j=c.createDocumentFragment(),j.appendChild(p.lastChild),b.checkClone=j.cloneNode(!0).cloneNode(!0).lastChild.checked,b.appendChecked=i.checked,j.removeChild(i),j.appendChild(p);if(p.attachEvent)for(n in{submit:1,change:1,focusin:1})m="on"+n,o=m in p,o||(p.setAttribute(m,"return;"),o=typeof p[m]=="function"),b[n+"Bubbles"]=o;j.removeChild(p),j=g=h=p=i=null,f(function(){var d,e,g,h,i,j,l,m,n,q,r,s,t,u=c.getElementsByTagName("body")[0];!u||(m=1,t="padding:0;margin:0;border:",r="position:absolute;top:0;left:0;width:1px;height:1px;",s=t+"0;visibility:hidden;",n="style='"+r+t+"5px solid #000;",q="
    "+""+"
    ",d=c.createElement("div"),d.style.cssText=s+"width:0;height:0;position:static;top:0;margin-top:"+m+"px",u.insertBefore(d,u.firstChild),p=c.createElement("div"),d.appendChild(p),p.innerHTML="
    t
    ",k=p.getElementsByTagName("td"),o=k[0].offsetHeight===0,k[0].style.display="",k[1].style.display="none",b.reliableHiddenOffsets=o&&k[0].offsetHeight===0,a.getComputedStyle&&(p.innerHTML="",l=c.createElement("div"),l.style.width="0",l.style.marginRight="0",p.style.width="2px",p.appendChild(l),b.reliableMarginRight=(parseInt((a.getComputedStyle(l,null)||{marginRight:0}).marginRight,10)||0)===0),typeof p.style.zoom!="undefined"&&(p.innerHTML="",p.style.width=p.style.padding="1px",p.style.border=0,p.style.overflow="hidden",p.style.display="inline",p.style.zoom=1,b.inlineBlockNeedsLayout=p.offsetWidth===3,p.style.display="block",p.style.overflow="visible",p.innerHTML="
    ",b.shrinkWrapBlocks=p.offsetWidth!==3),p.style.cssText=r+s,p.innerHTML=q,e=p.firstChild,g=e.firstChild,i=e.nextSibling.firstChild.firstChild,j={doesNotAddBorder:g.offsetTop!==5,doesAddBorderForTableAndCells:i.offsetTop===5},g.style.position="fixed",g.style.top="20px",j.fixedPosition=g.offsetTop===20||g.offsetTop===15,g.style.position=g.style.top="",e.style.overflow="hidden",e.style.position="relative",j.subtractsBorderForOverflowNotVisible=g.offsetTop===-5,j.doesNotIncludeMarginInBodyOffset=u.offsetTop!==m,a.getComputedStyle&&(p.style.marginTop="1%",b.pixelMargin=(a.getComputedStyle(p,null)||{marginTop:0}).marginTop!=="1%"),typeof d.style.zoom!="undefined"&&(d.style.zoom=1),u.removeChild(d),l=p=d=null,f.extend(b,j))});return b}();var j=/^(?:\{.*\}|\[.*\])$/,k=/([A-Z])/g;f.extend({cache:{},uuid:0,expando:"jQuery"+(f.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?f.cache[a[f.expando]]:a[f.expando];return!!a&&!m(a)},data:function(a,c,d,e){if(!!f.acceptData(a)){var g,h,i,j=f.expando,k=typeof c=="string",l=a.nodeType,m=l?f.cache:a,n=l?a[j]:a[j]&&j,o=c==="events";if((!n||!m[n]||!o&&!e&&!m[n].data)&&k&&d===b)return;n||(l?a[j]=n=++f.uuid:n=j),m[n]||(m[n]={},l||(m[n].toJSON=f.noop));if(typeof c=="object"||typeof c=="function")e?m[n]=f.extend(m[n],c):m[n].data=f.extend(m[n].data,c);g=h=m[n],e||(h.data||(h.data={}),h=h.data),d!==b&&(h[f.camelCase(c)]=d);if(o&&!h[c])return g.events;k?(i=h[c],i==null&&(i=h[f.camelCase(c)])):i=h;return i}},removeData:function(a,b,c){if(!!f.acceptData(a)){var d,e,g,h=f.expando,i=a.nodeType,j=i?f.cache:a,k=i?a[h]:h;if(!j[k])return;if(b){d=c?j[k]:j[k].data;if(d){f.isArray(b)||(b in d?b=[b]:(b=f.camelCase(b),b in d?b=[b]:b=b.split(" ")));for(e=0,g=b.length;e1,null,!1)},removeData:function(a){return this.each(function(){f.removeData(this,a)})}}),f.extend({_mark:function(a,b){a&&(b=(b||"fx")+"mark",f._data(a,b,(f._data(a,b)||0)+1))},_unmark:function(a,b,c){a!==!0&&(c=b,b=a,a=!1);if(b){c=c||"fx";var d=c+"mark",e=a?0:(f._data(b,d)||1)-1;e?f._data(b,d,e):(f.removeData(b,d,!0),n(b,c,"mark"))}},queue:function(a,b,c){var d;if(a){b=(b||"fx")+"queue",d=f._data(a,b),c&&(!d||f.isArray(c)?d=f._data(a,b,f.makeArray(c)):d.push(c));return d||[]}},dequeue:function(a,b){b=b||"fx";var c=f.queue(a,b),d=c.shift(),e={};d==="inprogress"&&(d=c.shift()),d&&(b==="fx"&&c.unshift("inprogress"),f._data(a,b+".run",e),d.call(a,function(){f.dequeue(a,b)},e)),c.length||(f.removeData(a,b+"queue "+b+".run",!0),n(a,b,"queue"))}}),f.fn.extend({queue:function(a,c){var d=2;typeof a!="string"&&(c=a,a="fx",d--);if(arguments.length1)},removeAttr:function(a){return this.each(function(){f.removeAttr(this,a)})},prop:function(a,b){return f.access(this,f.prop,a,b,arguments.length>1)},removeProp:function(a){a=f.propFix[a]||a;return this.each(function(){try{this[a]=b,delete this[a]}catch(c){}})},addClass:function(a){var b,c,d,e,g,h,i;if(f.isFunction(a))return this.each(function(b){f(this).addClass(a.call(this,b,this.className))});if(a&&typeof a=="string"){b=a.split(p);for(c=0,d=this.length;c-1)return!0;return!1},val:function(a){var c,d,e,g=this[0];{if(!!arguments.length){e=f.isFunction(a);return this.each(function(d){var g=f(this),h;if(this.nodeType===1){e?h=a.call(this,d,g.val()):h=a,h==null?h="":typeof h=="number"?h+="":f.isArray(h)&&(h=f.map(h,function(a){return a==null?"":a+""})),c=f.valHooks[this.type]||f.valHooks[this.nodeName.toLowerCase()];if(!c||!("set"in c)||c.set(this,h,"value")===b)this.value=h}})}if(g){c=f.valHooks[g.type]||f.valHooks[g.nodeName.toLowerCase()];if(c&&"get"in c&&(d=c.get(g,"value"))!==b)return d;d=g.value;return typeof d=="string"?d.replace(q,""):d==null?"":d}}}}),f.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c,d,e,g=a.selectedIndex,h=[],i=a.options,j=a.type==="select-one";if(g<0)return null;c=j?g:0,d=j?g+1:i.length;for(;c=0}),c.length||(a.selectedIndex=-1);return c}}},attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attr:function(a,c,d,e){var g,h,i,j=a.nodeType;if(!!a&&j!==3&&j!==8&&j!==2){if(e&&c in f.attrFn)return f(a)[c](d);if(typeof a.getAttribute=="undefined")return f.prop(a,c,d);i=j!==1||!f.isXMLDoc(a),i&&(c=c.toLowerCase(),h=f.attrHooks[c]||(u.test(c)?x:w));if(d!==b){if(d===null){f.removeAttr(a,c);return}if(h&&"set"in h&&i&&(g=h.set(a,d,c))!==b)return g;a.setAttribute(c,""+d);return d}if(h&&"get"in h&&i&&(g=h.get(a,c))!==null)return g;g=a.getAttribute(c);return g===null?b:g}},removeAttr:function(a,b){var c,d,e,g,h,i=0;if(b&&a.nodeType===1){d=b.toLowerCase().split(p),g=d.length;for(;i=0}})});var z=/^(?:textarea|input|select)$/i,A=/^([^\.]*)?(?:\.(.+))?$/,B=/(?:^|\s)hover(\.\S+)?\b/,C=/^key/,D=/^(?:mouse|contextmenu)|click/,E=/^(?:focusinfocus|focusoutblur)$/,F=/^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/,G=function( -a){var b=F.exec(a);b&&(b[1]=(b[1]||"").toLowerCase(),b[3]=b[3]&&new RegExp("(?:^|\\s)"+b[3]+"(?:\\s|$)"));return b},H=function(a,b){var c=a.attributes||{};return(!b[1]||a.nodeName.toLowerCase()===b[1])&&(!b[2]||(c.id||{}).value===b[2])&&(!b[3]||b[3].test((c["class"]||{}).value))},I=function(a){return f.event.special.hover?a:a.replace(B,"mouseenter$1 mouseleave$1")};f.event={add:function(a,c,d,e,g){var h,i,j,k,l,m,n,o,p,q,r,s;if(!(a.nodeType===3||a.nodeType===8||!c||!d||!(h=f._data(a)))){d.handler&&(p=d,d=p.handler,g=p.selector),d.guid||(d.guid=f.guid++),j=h.events,j||(h.events=j={}),i=h.handle,i||(h.handle=i=function(a){return typeof f!="undefined"&&(!a||f.event.triggered!==a.type)?f.event.dispatch.apply(i.elem,arguments):b},i.elem=a),c=f.trim(I(c)).split(" ");for(k=0;k=0&&(h=h.slice(0,-1),k=!0),h.indexOf(".")>=0&&(i=h.split("."),h=i.shift(),i.sort());if((!e||f.event.customEvent[h])&&!f.event.global[h])return;c=typeof c=="object"?c[f.expando]?c:new f.Event(h,c):new f.Event(h),c.type=h,c.isTrigger=!0,c.exclusive=k,c.namespace=i.join("."),c.namespace_re=c.namespace?new RegExp("(^|\\.)"+i.join("\\.(?:.*\\.)?")+"(\\.|$)"):null,o=h.indexOf(":")<0?"on"+h:"";if(!e){j=f.cache;for(l in j)j[l].events&&j[l].events[h]&&f.event.trigger(c,d,j[l].handle.elem,!0);return}c.result=b,c.target||(c.target=e),d=d!=null?f.makeArray(d):[],d.unshift(c),p=f.event.special[h]||{};if(p.trigger&&p.trigger.apply(e,d)===!1)return;r=[[e,p.bindType||h]];if(!g&&!p.noBubble&&!f.isWindow(e)){s=p.delegateType||h,m=E.test(s+h)?e:e.parentNode,n=null;for(;m;m=m.parentNode)r.push([m,s]),n=m;n&&n===e.ownerDocument&&r.push([n.defaultView||n.parentWindow||a,s])}for(l=0;le&&j.push({elem:this,matches:d.slice(e)});for(k=0;k0?this.on(b,null,a,c):this.trigger(b)},f.attrFn&&(f.attrFn[b]=!0),C.test(b)&&(f.event.fixHooks[b]=f.event.keyHooks),D.test(b)&&(f.event.fixHooks[b]=f.event.mouseHooks)}),function(){function x(a,b,c,e,f,g){for(var h=0,i=e.length;h0){k=j;break}}j=j[a]}e[h]=k}}}function w(a,b,c,e,f,g){for(var h=0,i=e.length;h+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,d="sizcache"+(Math.random()+"").replace(".",""),e=0,g=Object.prototype.toString,h=!1,i=!0,j=/\\/g,k=/\r\n/g,l=/\W/;[0,0].sort(function(){i=!1;return 0});var m=function(b,d,e,f){e=e||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!="string")return e;var i,j,k,l,n,q,r,t,u=!0,v=m.isXML(d),w=[],x=b;do{a.exec(""),i=a.exec(x);if(i){x=i[3],w.push(i[1]);if(i[2]){l=i[3];break}}}while(i);if(w.length>1&&p.exec(b))if(w.length===2&&o.relative[w[0]])j=y(w[0]+w[1],d,f);else{j=o.relative[w[0]]?[d]:m(w.shift(),d);while(w.length)b=w.shift(),o.relative[b]&&(b+=w.shift()),j=y(b,j,f)}else{!f&&w.length>1&&d.nodeType===9&&!v&&o.match.ID.test(w[0])&&!o.match.ID.test(w[w.length-1])&&(n=m.find(w.shift(),d,v),d=n.expr?m.filter(n.expr,n.set)[0]:n.set[0]);if(d){n=f?{expr:w.pop(),set:s(f)}:m.find(w.pop(),w.length===1&&(w[0]==="~"||w[0]==="+")&&d.parentNode?d.parentNode:d,v),j=n.expr?m.filter(n.expr,n.set):n.set,w.length>0?k=s(j):u=!1;while(w.length)q=w.pop(),r=q,o.relative[q]?r=w.pop():q="",r==null&&(r=d),o.relative[q](k,r,v)}else k=w=[]}k||(k=j),k||m.error(q||b);if(g.call(k)==="[object Array]")if(!u)e.push.apply(e,k);else if(d&&d.nodeType===1)for(t=0;k[t]!=null;t++)k[t]&&(k[t]===!0||k[t].nodeType===1&&m.contains(d,k[t]))&&e.push(j[t]);else for(t=0;k[t]!=null;t++)k[t]&&k[t].nodeType===1&&e.push(j[t]);else s(k,e);l&&(m(l,h,e,f),m.uniqueSort(e));return e};m.uniqueSort=function(a){if(u){h=i,a.sort(u);if(h)for(var b=1;b0},m.find=function(a,b,c){var d,e,f,g,h,i;if(!a)return[];for(e=0,f=o.order.length;e":function(a,b){var c,d=typeof b=="string",e=0,f=a.length;if(d&&!l.test(b)){b=b.toLowerCase();for(;e=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(j,"")},TAG:function(a,b){return a[1].replace(j,"").toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||m.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&m.error(a[0]);a[0]=e++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(j,"");!f&&o.attrMap[g]&&(a[1]=o.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(j,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=m(b[3],null,null,c);else{var g=m.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(o.match.POS.test(b[0])||o.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode&&a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!m(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){var b=a.getAttribute("type"),c=a.type;return a.nodeName.toLowerCase()==="input"&&"text"===c&&(b===c||b===null)},radio:function(a){return a.nodeName.toLowerCase()==="input"&&"radio"===a.type},checkbox:function(a){return a.nodeName.toLowerCase()==="input"&&"checkbox"===a.type},file:function(a){return a.nodeName.toLowerCase()==="input"&&"file"===a.type},password:function(a){return a.nodeName.toLowerCase()==="input"&&"password"===a.type},submit:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"submit"===a.type},image:function(a){return a.nodeName.toLowerCase()==="input"&&"image"===a.type},reset:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"reset"===a.type},button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&"button"===a.type||b==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)},focus:function(a){return a===a.ownerDocument.activeElement}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return bc[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=o.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||n([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,i=g.length;h=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||!!a.nodeName&&a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=m.attr?m.attr(a,c):o.attrHandle[c]?o.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":!f&&m.attr?d!=null:f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=o.setFilters[e];if(f)return f(a,c,b,d)}}},p=o.match.POS,q=function(a,b){return"\\"+(b-0+1)};for(var r in o.match)o.match[r]=new RegExp(o.match[r].source+/(?![^\[]*\])(?![^\(]*\))/.source),o.leftMatch[r]=new RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[r].source.replace(/\\(\d+)/g,q));o.match.globalPOS=p;var s=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(t){s=function(a,b){var c=0,d=b||[];if(g.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length=="number")for(var e=a.length;c",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(o.find.ID=function(a,c,d){if(typeof c.getElementById!="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},o.filter.ID=function(a,b){var c=typeof a.getAttributeNode!="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(o.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="",a.firstChild&&typeof a.firstChild.getAttribute!="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(o.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=m,b=c.createElement("div"),d="__sizzle__";b.innerHTML="

    ";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){m=function(b,e,f,g){e=e||c;if(!g&&!m.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return s(e.getElementsByTagName(b),f);if(h[2]&&o.find.CLASS&&e.getElementsByClassName)return s(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return s([e.body],f);if(h&&h[3]){var i=e.getElementById(h[3]);if(!i||!i.parentNode)return s([],f);if(i.id===h[3])return s([i],f)}try{return s(e.querySelectorAll(b),f)}catch(j){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var k=e,l=e.getAttribute("id"),n=l||d,p=e.parentNode,q=/^\s*[+~]/.test(b);l?n=n.replace(/'/g,"\\$&"):e.setAttribute("id",n),q&&p&&(e=e.parentNode);try{if(!q||p)return s(e.querySelectorAll("[id='"+n+"'] "+b),f)}catch(r){}finally{l||k.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)m[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector;if(b){var d=!b.call(c.createElement("div"),"div"),e=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(f){e=!0}m.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!m.isXML(a))try{if(e||!o.match.PSEUDO.test(c)&&!/!=/.test(c)){var f=b.call(a,c);if(f||!d||a.document&&a.document.nodeType!==11)return f}}catch(g){}return m(c,null,null,[a]).length>0}}}(),function(){var a=c.createElement("div");a.innerHTML="
    ";if(!!a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;o.order.splice(1,0,"CLASS"),o.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?m.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?m.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:m.contains=function(){return!1},m.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var y=function(a,b,c){var d,e=[],f="",g=b.nodeType?[b]:b;while(d=o.match.PSEUDO.exec(a))f+=d[0],a=a.replace(o.match.PSEUDO,"");a=o.relative[a]?a+"*":a;for(var h=0,i=g.length;h0)for(h=g;h=0:f.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c=[],d,e,g=this[0];if(f.isArray(a)){var h=1;while(g&&g.ownerDocument&&g!==b){for(d=0;d-1:f.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b||g.nodeType===11)break}}c=c.length>1?f.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a)return this[0]&&this[0].parentNode?this.prevAll().length:-1;if(typeof a=="string")return f.inArray(this[0],f(a));return f.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a=="string"?f(a,b):f.makeArray(a&&a.nodeType?[a]:a),d=f.merge(this.get(),c);return this.pushStack(S(c[0])||S(d[0])?d:f.unique(d))},andSelf:function(){return this.add(this.prevObject)}}),f.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return f.dir(a,"parentNode")},parentsUntil:function(a,b,c){return f.dir(a,"parentNode",c)},next:function(a){return f.nth(a,2,"nextSibling")},prev:function(a){return f.nth(a,2,"previousSibling")},nextAll:function(a){return f.dir(a,"nextSibling")},prevAll:function(a){return f.dir(a,"previousSibling")},nextUntil:function(a,b,c){return f.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return f.dir(a,"previousSibling",c)},siblings:function(a){return f.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return f.sibling(a.firstChild)},contents:function(a){return f.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:f.makeArray(a.childNodes)}},function(a,b){f.fn[a]=function(c,d){var e=f.map(this,b,c);L.test(a)||(d=c),d&&typeof d=="string"&&(e=f.filter(d,e)),e=this.length>1&&!R[a]?f.unique(e):e,(this.length>1||N.test(d))&&M.test(a)&&(e=e.reverse());return this.pushStack(e,a,P.call(arguments).join(","))}}),f.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?f.find.matchesSelector(b[0],a)?[b[0]]:[]:f.find.matches(a,b)},dir:function(a,c,d){var e=[],g=a[c];while(g&&g.nodeType!==9&&(d===b||g.nodeType!==1||!f(g).is(d)))g.nodeType===1&&e.push(g),g=g[c];return e},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var V="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",W=/ jQuery\d+="(?:\d+|null)"/g,X=/^\s+/,Y=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Z=/<([\w:]+)/,$=/]","i"),bd=/checked\s*(?:[^=]|=\s*.checked.)/i,be=/\/(java|ecma)script/i,bf=/^\s*",""],legend:[1,"
    ","
    "],thead:[1,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],col:[2,"","
    "],area:[1,"",""],_default:[0,"",""]},bh=U(c);bg.optgroup=bg.option,bg.tbody=bg.tfoot=bg.colgroup=bg.caption=bg.thead,bg.th=bg.td,f.support.htmlSerialize||(bg._default=[1,"div
    ","
    "]),f.fn.extend({text:function(a){return f.access(this,function(a){return a===b?f.text(this):this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a))},null,a,arguments.length)},wrapAll:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapAll(a.call(this,b))});if(this[0]){var b=f(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapInner(a.call(this,b))});return this.each(function(){var b=f(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=f.isFunction(a);return this.each(function(c){f(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){f.nodeName(this,"body")||f(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=f -.clean(arguments);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,f.clean(arguments));return a}},remove:function(a,b){for(var c=0,d;(d=this[c])!=null;c++)if(!a||f.filter(a,[d]).length)!b&&d.nodeType===1&&(f.cleanData(d.getElementsByTagName("*")),f.cleanData([d])),d.parentNode&&d.parentNode.removeChild(d);return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&f.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!1:a,b=b==null?a:b;return this.map(function(){return f.clone(this,a,b)})},html:function(a){return f.access(this,function(a){var c=this[0]||{},d=0,e=this.length;if(a===b)return c.nodeType===1?c.innerHTML.replace(W,""):null;if(typeof a=="string"&&!ba.test(a)&&(f.support.leadingWhitespace||!X.test(a))&&!bg[(Z.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Y,"<$1>");try{for(;d1&&l0?this.clone(!0):this).get();f(e[h])[b](j),d=d.concat(j)}return this.pushStack(d,a,e.selector)}}),f.extend({clone:function(a,b,c){var d,e,g,h=f.support.html5Clone||f.isXMLDoc(a)||!bc.test("<"+a.nodeName+">")?a.cloneNode(!0):bo(a);if((!f.support.noCloneEvent||!f.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!f.isXMLDoc(a)){bk(a,h),d=bl(a),e=bl(h);for(g=0;d[g];++g)e[g]&&bk(d[g],e[g])}if(b){bj(a,h);if(c){d=bl(a),e=bl(h);for(g=0;d[g];++g)bj(d[g],e[g])}}d=e=null;return h},clean:function(a,b,d,e){var g,h,i,j=[];b=b||c,typeof b.createElement=="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);for(var k=0,l;(l=a[k])!=null;k++){typeof l=="number"&&(l+="");if(!l)continue;if(typeof l=="string")if(!_.test(l))l=b.createTextNode(l);else{l=l.replace(Y,"<$1>");var m=(Z.exec(l)||["",""])[1].toLowerCase(),n=bg[m]||bg._default,o=n[0],p=b.createElement("div"),q=bh.childNodes,r;b===c?bh.appendChild(p):U(b).appendChild(p),p.innerHTML=n[1]+l+n[2];while(o--)p=p.lastChild;if(!f.support.tbody){var s=$.test(l),t=m==="table"&&!s?p.firstChild&&p.firstChild.childNodes:n[1]===""&&!s?p.childNodes:[];for(i=t.length-1;i>=0;--i)f.nodeName(t[i],"tbody")&&!t[i].childNodes.length&&t[i].parentNode.removeChild(t[i])}!f.support.leadingWhitespace&&X.test(l)&&p.insertBefore(b.createTextNode(X.exec(l)[0]),p.firstChild),l=p.childNodes,p&&(p.parentNode.removeChild(p),q.length>0&&(r=q[q.length-1],r&&r.parentNode&&r.parentNode.removeChild(r)))}var u;if(!f.support.appendChecked)if(l[0]&&typeof (u=l.length)=="number")for(i=0;i1)},f.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=by(a,"opacity");return c===""?"1":c}return a.style.opacity}}},cssNumber:{fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":f.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,d,e){if(!!a&&a.nodeType!==3&&a.nodeType!==8&&!!a.style){var g,h,i=f.camelCase(c),j=a.style,k=f.cssHooks[i];c=f.cssProps[i]||i;if(d===b){if(k&&"get"in k&&(g=k.get(a,!1,e))!==b)return g;return j[c]}h=typeof d,h==="string"&&(g=bu.exec(d))&&(d=+(g[1]+1)*+g[2]+parseFloat(f.css(a,c)),h="number");if(d==null||h==="number"&&isNaN(d))return;h==="number"&&!f.cssNumber[i]&&(d+="px");if(!k||!("set"in k)||(d=k.set(a,d))!==b)try{j[c]=d}catch(l){}}},css:function(a,c,d){var e,g;c=f.camelCase(c),g=f.cssHooks[c],c=f.cssProps[c]||c,c==="cssFloat"&&(c="float");if(g&&"get"in g&&(e=g.get(a,!0,d))!==b)return e;if(by)return by(a,c)},swap:function(a,b,c){var d={},e,f;for(f in b)d[f]=a.style[f],a.style[f]=b[f];e=c.call(a);for(f in b)a.style[f]=d[f];return e}}),f.curCSS=f.css,c.defaultView&&c.defaultView.getComputedStyle&&(bz=function(a,b){var c,d,e,g,h=a.style;b=b.replace(br,"-$1").toLowerCase(),(d=a.ownerDocument.defaultView)&&(e=d.getComputedStyle(a,null))&&(c=e.getPropertyValue(b),c===""&&!f.contains(a.ownerDocument.documentElement,a)&&(c=f.style(a,b))),!f.support.pixelMargin&&e&&bv.test(b)&&bt.test(c)&&(g=h.width,h.width=c,c=e.width,h.width=g);return c}),c.documentElement.currentStyle&&(bA=function(a,b){var c,d,e,f=a.currentStyle&&a.currentStyle[b],g=a.style;f==null&&g&&(e=g[b])&&(f=e),bt.test(f)&&(c=g.left,d=a.runtimeStyle&&a.runtimeStyle.left,d&&(a.runtimeStyle.left=a.currentStyle.left),g.left=b==="fontSize"?"1em":f,f=g.pixelLeft+"px",g.left=c,d&&(a.runtimeStyle.left=d));return f===""?"auto":f}),by=bz||bA,f.each(["height","width"],function(a,b){f.cssHooks[b]={get:function(a,c,d){if(c)return a.offsetWidth!==0?bB(a,b,d):f.swap(a,bw,function(){return bB(a,b,d)})},set:function(a,b){return bs.test(b)?b+"px":b}}}),f.support.opacity||(f.cssHooks.opacity={get:function(a,b){return bq.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=f.isNumeric(b)?"alpha(opacity="+b*100+")":"",g=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&f.trim(g.replace(bp,""))===""){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bp.test(g)?g.replace(bp,e):g+" "+e}}),f(function(){f.support.reliableMarginRight||(f.cssHooks.marginRight={get:function(a,b){return f.swap(a,{display:"inline-block"},function(){return b?by(a,"margin-right"):a.style.marginRight})}})}),f.expr&&f.expr.filters&&(f.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!f.support.reliableHiddenOffsets&&(a.style&&a.style.display||f.css(a,"display"))==="none"},f.expr.filters.visible=function(a){return!f.expr.filters.hidden(a)}),f.each({margin:"",padding:"",border:"Width"},function(a,b){f.cssHooks[a+b]={expand:function(c){var d,e=typeof c=="string"?c.split(" "):[c],f={};for(d=0;d<4;d++)f[a+bx[d]+b]=e[d]||e[d-2]||e[0];return f}}});var bC=/%20/g,bD=/\[\]$/,bE=/\r?\n/g,bF=/#.*$/,bG=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,bH=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bI=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,bJ=/^(?:GET|HEAD)$/,bK=/^\/\//,bL=/\?/,bM=/)<[^<]*)*<\/script>/gi,bN=/^(?:select|textarea)/i,bO=/\s+/,bP=/([?&])_=[^&]*/,bQ=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,bR=f.fn.load,bS={},bT={},bU,bV,bW=["*/"]+["*"];try{bU=e.href}catch(bX){bU=c.createElement("a"),bU.href="",bU=bU.href}bV=bQ.exec(bU.toLowerCase())||[],f.fn.extend({load:function(a,c,d){if(typeof a!="string"&&bR)return bR.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var g=a.slice(e,a.length);a=a.slice(0,e)}var h="GET";c&&(f.isFunction(c)?(d=c,c=b):typeof c=="object"&&(c=f.param(c,f.ajaxSettings.traditional),h="POST"));var i=this;f.ajax({url:a,type:h,dataType:"html",data:c,complete:function(a,b,c){c=a.responseText,a.isResolved()&&(a.done(function(a){c=a}),i.html(g?f("
    ").append(c.replace(bM,"")).find(g):c)),d&&i.each(d,[c,b,a])}});return this},serialize:function(){return f.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?f.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||bN.test(this.nodeName)||bH.test(this.type))}).map(function(a,b){var c=f(this).val();return c==null?null:f.isArray(c)?f.map(c,function(a,c){return{name:b.name,value:a.replace(bE,"\r\n")}}):{name:b.name,value:c.replace(bE,"\r\n")}}).get()}}),f.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){f.fn[b]=function(a){return this.on(b,a)}}),f.each(["get","post"],function(a,c){f[c]=function(a,d,e,g){f.isFunction(d)&&(g=g||e,e=d,d=b);return f.ajax({type:c,url:a,data:d,success:e,dataType:g})}}),f.extend({getScript:function(a,c){return f.get(a,b,c,"script")},getJSON:function(a,b,c){return f.get(a,b,c,"json")},ajaxSetup:function(a,b){b?b$(a,f.ajaxSettings):(b=a,a=f.ajaxSettings),b$(a,b);return a},ajaxSettings:{url:bU,isLocal:bI.test(bV[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded; charset=UTF-8",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":bW},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":f.parseJSON,"text xml":f.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:bY(bS),ajaxTransport:bY(bT),ajax:function(a,c){function w(a,c,l,m){if(s!==2){s=2,q&&clearTimeout(q),p=b,n=m||"",v.readyState=a>0?4:0;var o,r,u,w=c,x=l?ca(d,v,l):b,y,z;if(a>=200&&a<300||a===304){if(d.ifModified){if(y=v.getResponseHeader("Last-Modified"))f.lastModified[k]=y;if(z=v.getResponseHeader("Etag"))f.etag[k]=z}if(a===304)w="notmodified",o=!0;else try{r=cb(d,x),w="success",o=!0}catch(A){w="parsererror",u=A}}else{u=w;if(!w||a)w="error",a<0&&(a=0)}v.status=a,v.statusText=""+(c||w),o?h.resolveWith(e,[r,w,v]):h.rejectWith(e,[v,w,u]),v.statusCode(j),j=b,t&&g.trigger("ajax"+(o?"Success":"Error"),[v,d,o?r:u]),i.fireWith(e,[v,w]),t&&(g.trigger("ajaxComplete",[v,d]),--f.active||f.event.trigger("ajaxStop"))}}typeof a=="object"&&(c=a,a=b),c=c||{};var d=f.ajaxSetup({},c),e=d.context||d,g=e!==d&&(e.nodeType||e instanceof f)?f(e):f.event,h=f.Deferred(),i=f.Callbacks("once memory"),j=d.statusCode||{},k,l={},m={},n,o,p,q,r,s=0,t,u,v={readyState:0,setRequestHeader:function(a,b){if(!s){var c=a.toLowerCase();a=m[c]=m[c]||a,l[a]=b}return this},getAllResponseHeaders:function(){return s===2?n:null},getResponseHeader:function(a){var c;if(s===2){if(!o){o={};while(c=bG.exec(n))o[c[1].toLowerCase()]=c[2]}c=o[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){s||(d.mimeType=a);return this},abort:function(a){a=a||"abort",p&&p.abort(a),w(0,a);return this}};h.promise(v),v.success=v.done,v.error=v.fail,v.complete=i.add,v.statusCode=function(a){if(a){var b;if(s<2)for(b in a)j[b]=[j[b],a[b]];else b=a[v.status],v.then(b,b)}return this},d.url=((a||d.url)+"").replace(bF,"").replace(bK,bV[1]+"//"),d.dataTypes=f.trim(d.dataType||"*").toLowerCase().split(bO),d.crossDomain==null&&(r=bQ.exec(d.url.toLowerCase()),d.crossDomain=!(!r||r[1]==bV[1]&&r[2]==bV[2]&&(r[3]||(r[1]==="http:"?80:443))==(bV[3]||(bV[1]==="http:"?80:443)))),d.data&&d.processData&&typeof d.data!="string"&&(d.data=f.param(d.data,d.traditional)),bZ(bS,d,c,v);if(s===2)return!1;t=d.global,d.type=d.type.toUpperCase(),d.hasContent=!bJ.test(d.type),t&&f.active++===0&&f.event.trigger("ajaxStart");if(!d.hasContent){d.data&&(d.url+=(bL.test(d.url)?"&":"?")+d.data,delete d.data),k=d.url;if(d.cache===!1){var x=f.now(),y=d.url.replace(bP,"$1_="+x);d.url=y+(y===d.url?(bL.test(d.url)?"&":"?")+"_="+x:"")}}(d.data&&d.hasContent&&d.contentType!==!1||c.contentType)&&v.setRequestHeader("Content-Type",d.contentType),d.ifModified&&(k=k||d.url,f.lastModified[k]&&v.setRequestHeader("If-Modified-Since",f.lastModified[k]),f.etag[k]&&v.setRequestHeader("If-None-Match",f.etag[k])),v.setRequestHeader("Accept",d.dataTypes[0]&&d.accepts[d.dataTypes[0]]?d.accepts[d.dataTypes[0]]+(d.dataTypes[0]!=="*"?", "+bW+"; q=0.01":""):d.accepts["*"]);for(u in d.headers)v.setRequestHeader(u,d.headers[u]);if(d.beforeSend&&(d.beforeSend.call(e,v,d)===!1||s===2)){v.abort();return!1}for(u in{success:1,error:1,complete:1})v[u](d[u]);p=bZ(bT,d,c,v);if(!p)w(-1,"No Transport");else{v.readyState=1,t&&g.trigger("ajaxSend",[v,d]),d.async&&d.timeout>0&&(q=setTimeout(function(){v.abort("timeout")},d.timeout));try{s=1,p.send(l,w)}catch(z){if(s<2)w(-1,z);else throw z}}return v},param:function(a,c){var d=[],e=function(a,b){b=f.isFunction(b)?b():b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=f.ajaxSettings.traditional);if(f.isArray(a)||a.jquery&&!f.isPlainObject(a))f.each(a,function(){e(this.name,this.value)});else for(var g in a)b_(g,a[g],c,e);return d.join("&").replace(bC,"+")}}),f.extend({active:0,lastModified:{},etag:{}});var cc=f.now(),cd=/(\=)\?(&|$)|\?\?/i;f.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return f.expando+"_"+cc++}}),f.ajaxPrefilter("json jsonp",function(b,c,d){var e=typeof b.data=="string"&&/^application\/x\-www\-form\-urlencoded/.test(b.contentType);if(b.dataTypes[0]==="jsonp"||b.jsonp!==!1&&(cd.test(b.url)||e&&cd.test(b.data))){var g,h=b.jsonpCallback=f.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,i=a[h],j=b.url,k=b.data,l="$1"+h+"$2";b.jsonp!==!1&&(j=j.replace(cd,l),b.url===j&&(e&&(k=k.replace(cd,l)),b.data===k&&(j+=(/\?/.test(j)?"&":"?")+b.jsonp+"="+h))),b.url=j,b.data=k,a[h]=function(a){g=[a]},d.always(function(){a[h]=i,g&&f.isFunction(i)&&a[h](g[0])}),b.converters["script json"]=function(){g||f.error(h+" was not called");return g[0]},b.dataTypes[0]="json";return"script"}}),f.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){f.globalEval(a);return a}}}),f.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),f.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.head||c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(c||!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var ce=a.ActiveXObject?function(){for(var a in cg)cg[a](0,1)}:!1,cf=0,cg;f.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&ch()||ci()}:ch,function(a){f.extend(f.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(f.ajaxSettings.xhr()),f.support.ajax&&f.ajaxTransport(function(c){if(!c.crossDomain||f.support.cors){var d;return{send:function(e,g){var h=c.xhr(),i,j;c.username?h.open(c.type,c.url,c.async,c.username,c.password):h.open(c.type,c.url,c.async);if(c.xhrFields)for(j in c.xhrFields)h[j]=c.xhrFields[j];c.mimeType&&h.overrideMimeType&&h.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(j in e)h.setRequestHeader(j,e[j])}catch(k){}h.send(c.hasContent&&c.data||null),d=function(a,e){var j,k,l,m,n;try{if(d&&(e||h.readyState===4)){d=b,i&&(h.onreadystatechange=f.noop,ce&&delete cg[i]);if(e)h.readyState!==4&&h.abort();else{j=h.status,l=h.getAllResponseHeaders(),m={},n=h.responseXML,n&&n.documentElement&&(m.xml=n);try{m.text=h.responseText}catch(a){}try{k=h.statusText}catch(o){k=""}!j&&c.isLocal&&!c.crossDomain?j=m.text?200:404:j===1223&&(j=204)}}}catch(p){e||g(-1,p)}m&&g(j,k,m,l)},!c.async||h.readyState===4?d():(i=++cf,ce&&(cg||(cg={},f(a).unload(ce)),cg[i]=d),h.onreadystatechange=d)},abort:function(){d&&d(0,1)}}}});var cj={},ck,cl,cm=/^(?:toggle|show|hide)$/,cn=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,co,cp=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]],cq;f.fn.extend({show:function(a,b,c){var d,e;if(a||a===0)return this.animate(ct("show",3),a,b,c);for(var g=0,h=this.length;g=i.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),i.animatedProperties[this.prop]=!0;for(b in i.animatedProperties)i.animatedProperties[b]!==!0&&(g=!1);if(g){i.overflow!=null&&!f.support.shrinkWrapBlocks&&f.each(["","X","Y"],function(a,b){h.style["overflow"+b]=i.overflow[a]}),i.hide&&f(h).hide();if(i.hide||i.show)for(b in i.animatedProperties)f.style(h,b,i.orig[b]),f.removeData(h,"fxshow"+b,!0),f.removeData(h,"toggle"+b,!0);d=i.complete,d&&(i.complete=!1,d.call(h))}return!1}i.duration==Infinity?this.now=e:(c=e-this.startTime,this.state=c/i.duration,this.pos=f.easing[i.animatedProperties[this.prop]](this.state,c,0,1,i.duration),this.now=this.start+(this.end-this.start)*this.pos),this.update();return!0}},f.extend(f.fx,{tick:function(){var a,b=f.timers,c=0;for(;c-1,k={},l={},m,n;j?(l=e.position(),m=l.top,n=l.left):(m=parseFloat(h)||0,n=parseFloat(i)||0),f.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):e.css(k)}},f.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),d=cx.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(f.css(a,"marginTop"))||0,c.left-=parseFloat(f.css(a,"marginLeft"))||0,d.top+=parseFloat(f.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(f.css(b[0],"borderLeftWidth"))||0;return{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&!cx.test(a.nodeName)&&f.css(a,"position")==="static")a=a.offsetParent;return a})}}),f.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,c){var d=/Y/.test(c);f.fn[a]=function(e){return f.access(this,function(a,e,g){var h=cy(a);if(g===b)return h?c in h?h[c]:f.support.boxModel&&h.document.documentElement[e]||h.document.body[e]:a[e];h?h.scrollTo(d?f(h).scrollLeft():g,d?g:f(h).scrollTop()):a[e]=g},a,e,arguments.length,null)}}),f.each({Height:"height",Width:"width"},function(a,c){var d="client"+a,e="scroll"+a,g="offset"+a;f.fn["inner"+a]=function(){var a=this[0];return a?a.style?parseFloat(f.css(a,c,"padding")):this[c]():null},f.fn["outer"+a]=function(a){var b=this[0];return b?b.style?parseFloat(f.css(b,c,a?"margin":"border")):this[c]():null},f.fn[c]=function(a){return f.access(this,function(a,c,h){var i,j,k,l;if(f.isWindow(a)){i=a.document,j=i.documentElement[d];return f.support.boxModel&&j||i.body&&i.body[d]||j}if(a.nodeType===9){i=a.documentElement;if(i[d]>=i[e])return i[d];return Math.max(a.body[e],i[e],a.body[g],i[g])}if(h===b){k=f.css(a,c),l=parseFloat(k);return f.isNumeric(l)?l:k}f(a).css(c,h)},c,a,arguments.length,null)}}),a.jQuery=a.$=f,typeof define=="function"&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return f})})(window); \ No newline at end of file diff --git a/doc/themes/nilearn/static/nature.css_t b/doc/themes/nilearn/static/nature.css_t index 95e56cda17..2e1ded71cf 100644 --- a/doc/themes/nilearn/static/nature.css_t +++ b/doc/themes/nilearn/static/nature.css_t @@ -107,6 +107,30 @@ div.highlight-python pre { font-size: 90%; } +div.highlight:hover span.copybutton { + background-color: #3F556B; +} + +div.highlight:hover span.copybutton:hover { + background-color: #20252B; +} + +@media (min-width: 1060px) { + div.highlight:hover span.copybutton:after{ + background: #3F556B; + border-radius: 5px; + color: white; + content: attr(title); + left: 110%; + padding: 5px 15px; + position: absolute; + z-index: 98; + width: 140px; + top: -10px; + } +} + + @media (max-width: 800px) { div.highlight-python pre { font-size: 85%; @@ -1093,6 +1117,11 @@ div.body p { margin: 1.2em 0 .5em 0; } +/* More vertical space after a code block */ +div.highlight-python + p { + margin-top: 1.5em; +} + .float-right { float: right; } diff --git a/doc/user_guide.rst b/doc/user_guide.rst index 86a8d7da8a..ae49064252 100644 --- a/doc/user_guide.rst +++ b/doc/user_guide.rst @@ -4,6 +4,10 @@ User guide: table of contents ============================== +.. sidebar:: **Download for offline viewing** + + Download the `user guide and examples + `_. .. include:: includes/big_toc_css.rst @@ -17,7 +21,8 @@ User guide: table of contents introduction.rst decoding/index.rst connectivity/index.rst - manipulating_visualizing/index.rst + plotting/index.rst + manipulating_images/index.rst building_blocks/index.rst modules/reference.rst - + auto_examples/index.rst diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 3578b53837..2adb2c94da 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,3 +1,742 @@ +0.5.0 alpha +=========== + +This is an alpha release: to download it, you need to explicitly ask for +the version number:: + + pip install nilearn==0.5.0a0 + +Highlights +---------- + + - **Minimum supported versions of packages have been bumped up.** + - scikit-learn -- v0.18 + - scipy -- v0.17 + - pandas -- v0.18 + - numpy -- v1.11 + - matplotlib -- v1.5.1 + + - New :ref:`interactive plotting functions `, + eg for use in a notebook. + +Enhancements +------------ + + - All NiftiMaskers now have a `dtype` argument. For now the default behaviour + is to keep the same data type as the input data. + + - New functions :func:`nilearn.plotting.view_surf` and + :func:`nilearn.plotting.view_surf` and + :func:`nilearn.plotting.view_img_on_surf` for interactive visualization of + maps on the cortical surface in a web browser. + + - New functions :func:`nilearn.plotting.view_connectome` and + :func:`nilearn.plotting.view_markers` to visualize connectomes and + seed locations in 3D + + - New function :func:`nilearn.plotting.view_stat_map` for interactive + visualization of volumes with 3 orthogonal cuts. + + - Add :func:`nilearn.datasets.fetch_surf_fsaverage` to download either + fsaverage or fsaverage 5 (Freesurfer cortical meshes). + + - Added :func:`nilearn.datasets.fetch_atlas_pauli_2017` to download a + recent subcortical neuroimaging atlas. + + - Added :func:`nilearn.plotting.find_parcellation_cut_coords` for + extraction of coordinates on brain parcellations denoted as labels. + + - Added :func:`nilearn.plotting.find_probabilistic_atlas_cut_coords` for + extraction of coordinates on brain probabilistic maps. + + - Added :func:`nilearn.datasets.fetch_neurovault_auditory_computation_task` + and :func:`nilearn.datasets.fetch_neurovault_motor_task` for simple example data. + + +Changes +------- + + - `nilearn.datasets.fetch_surf_fsaverage5` is deprecated and will be + removed in a future release. Use :func:`nilearn.datasets.fetch_surf_fsaverage`, + with the parameter mesh="fsaverage5" (the default) instead. + + - fsaverage5 surface data files are now shipped directly with Nilearn. + Look to issue #1705 for discussion. + + - `sklearn.cross_validation` and `sklearn.grid_search` have been + replaced by `sklearn.model_selection` in all the examples. + + +0.4.2 +===== +Few important bugs fix release for OHBM conference. + +Changes +------- + - Default colormaps for surface plotting functions have changed to be more + consistent with slice plotting. + :func:`nilearn.plotting.plot_surf_stat_map` now uses "cold_hot", as + :func:`nilearn.plotting.plot_stat_map` does, and + :func:`nilearn.plotting.plot_surf_roi` now uses "gist_ncar", as + :func:`nilearn.plotting.plot_roi` does. + + - Improve 3D surface plotting: lock the aspect ratio of the plots and + reduce the whitespace around the plots. + +Bug fixes +--------- + + - Fix bug with input repetition time (TR) which had no effect in signal + cleaning. Fixed by Pradeep Raamana. + + - Fix issues with signal extraction on list of 3D images in + :class:`nilearn.regions.Parcellations`. + + - Fix issues with raising AttributeError rather than HTTPError in datasets + fetching utilities. By Jerome Dockes. + + - Fix issues in datasets testing function uncompression of files. By Pierre Glaser. + +0.4.1 +===== + +This bug fix release is focussed on few bug fixes and minor developments. + +Enhancements +------------ + + - :class:`nilearn.decomposition.CanICA` and + :class:`nilearn.decomposition.DictLearning` has new attribute + `components_img_` providing directly the components learned as + a Nifti image. This avoids the step of unmasking the attribute + `components_` which is true for older versions. + + - New object :class:`nilearn.regions.Parcellations` for learning brain + parcellations on fmri data. + + - Add optional reordering of the matrix using a argument `reorder` + with :func:`nilearn.plotting.plot_matrix`. + + .. note:: + This feature is usable only if SciPy version is >= 1.0.0 + +Changes +------- + + - Using output attribute `components_` which is an extracted components + in :class:`nilearn.decomposition.CanICA` and + :class:`nilearn.decomposition.DictLearning` is deprecated and will + be removed in next two releases. Use `components_img_` instead. + +Bug fixes +--------- + + - Fix issues using :func:`nilearn.plotting.plot_connectome` when string is + passed in `node_color` with display modes left and right hemispheric cuts + in the glass brain. + + - Fix bug while plotting only coordinates using add_markers on glass brain. + See issue #1595 + + - Fix issues with estimators in decomposition module when input images are + given in glob patterns. + + - Fix bug loading Nifti2Images. + + - Fix bug while adjusting contrast of the background template while using + :func:`nilearn.plotting.plot_prob_atlas` + + - Fix colormap bug with recent matplotlib 2.2.0 + +0.4.0 +===== + +**Highlights**: + + - :func:`nilearn.surface.vol_to_surf` to project volume data to the + surface. + + - :func:`nilearn.plotting.plot_matrix` to display matrices, eg connectomes + +Enhancements +------------- + + - New function :func:`nilearn.surface.vol_to_surf` to project a 3d or + 4d brain volume on the cortical surface. + + - New matrix plotting function, eg to display connectome matrices: + :func:`nilearn.plotting.plot_matrix` + + - Expose :func:`nilearn.image.coord_transform` for end users. Useful + to transform coordinates (x, y, z) from one image space to + another space. + + - :func:`nilearn.image.resample_img` now takes a linear resampling + option (implemented by Joe Necus) + + - :func:`nilearn.datasets.fetch_atlas_talairach` to fetch the Talairach + atlas (http://talairach.org) + + - Enhancing new surface plotting functions, added new parameters + "axes" and "figure" to accept user-specified instances in + :func:`nilearn.plotting.plot_surf` and + :func:`nilearn.plotting.plot_surf_stat_map` and + :func:`nilearn.plotting.plot_surf_roi` + + - :class:`nilearn.decoding.SearchLight` has new parameter "groups" to + do LeaveOneGroupOut type cv with new scikit-learn module model selection. + + - Enhancing the glass brain plotting in back view 'y' direction. + + - New parameter "resampling_interpolation" is added in most used + plotting functions to have user control for faster visualizations. + + - Upgraded to Sphinx-Gallery 0.1.11 + +Bug fixes +---------- + + - Dimming factor applied to background image in plotting + functions with "dim" parameter will no longer accepts as + string ('-1'). An error will be raised. + + - Fixed issues with matplotlib 2.1.0. + + - Fixed issues with SciPy 1.0.0. + +Changes +--------- + + - **Backward incompatible change**: :func:`nilearn.plotting.find_xyz_cut_coords` + now takes a `mask_img` argument which is a niimg, rather than a `mask` + argument, which used to be a numpy array. + + - The minimum required version for scipy is now 0.14 + + - Dropped support for Nibabel older than 2.0.2. + + - :func:`nilearn.image.smooth_img` no longer accepts smoothing + parameter fwhm as 0. Behavior is changed in according to the + issues with recent SciPy version 1.0.0. + + - "dim" factor range is slightly increased to -2 to 2 from -1 to 1. + Range exceeding -1 meaning more increase in constrast should be + cautiously set. + + - New 'anterior' and 'posterior' view added to the plot_surf family views + + - Using argument `anat_img` for placing background image in + :func:`nilearn.plotting.plot_prob_atlas` is deprecated. Use argument + `bg_img` instead. + + - The examples now use pandas for the behavioral information. + +Contributors +------------- + +The following people contributed to this release:: + + 127 Jerome Dockes + 62 Gael Varoquaux + 36 Kamalakar Daddy + 11 Jeff Chiang + 9 Elizabeth DuPre + 9 Jona Sassenhagen + 7 Sylvain Lan + 6 J Necus + 5 Pierre-Olivier Quirion + 3 AnaLu + 3 Jean Remi King + 3 MADHYASTHA Meghana + 3 Salma Bougacha + 3 sfvnMAC + 2 Eric Larson + 2 Horea Christian + 2 Moritz Boos + 1 Alex Rothberg + 1 Bertrand Thirion + 1 Christophe Bedetti + 1 John Griffiths + 1 Mehdi Rahim + 1 Sylvain LANNUZEL + 1 Yaroslav Halchenko + 1 clfs + + +0.3.1 +===== + +This is a minor release for BrainHack. + +Highlights +---------- + +* **Dropped support for scikit-learn older than 0.14.1** Minimum supported version + is now 0.15. + +Changelog +--------- + + - The function sym_to_vec is deprecated and will be removed in + release 0.4. Use :func:`nilearn.connectome.sym_matrix_to_vec` instead. + + - Added argument `smoothing_fwhm` to + :class:`nilearn.regions.RegionExtractor` to control smoothing according + to the resolution of atlas images. + +Bug fix +------- + + - The helper function `largest_connected_component` should now work with + inputs of non-native data dtypes. + + - Fix plotting issues when non-finite values are present in background + anatomical image. + + - A workaround to handle non-native endianess in the Nifti images passed + to resampling the image. + +Enhancements +------------- + - New data fetcher functions :func:`nilearn.datasets.fetch_neurovault` and + :func:`nilearn.datasets.fetch_neurovault_ids` help you download + statistical maps from the Neurovault (http://neurovault.org) platform. + + - New function :func:`nilearn.connectome.vec_to_sym_matrix` reshapes + vectors to symmetric matrices. It acts as the reverse of function + :func:`nilearn.connectome.sym_matrix_to_vec`. + + - Add an option allowing to vectorize connectivity matrices returned by the + "transform" method of :class:`nilearn.connectome.ConnectivityMeasure`. + + - :class:`nilearn.connectome.ConnectivityMeasure` now exposes an + "inverse_transform" method, useful for going back from vectorized + connectivity coefficients to connectivity matrices. Also, it allows to + recover the covariance matrices for the "tangent" kind. + + - Reworking and renaming of connectivity measures example. Renamed from + plot_connectivity_measures to plot_group_level_connectivity. + + - Tighter bounding boxes when using add_contours for plotting. + + - Function :func:`nilearn.image.largest_connected_component_img` to + directly extract the largest connected component from Nifti images. + + - Improvements in plotting, decoding and functional connectivity examples. + +0.3.0 +====== + +In addition, more details of this release are listed below. Please checkout +in **0.3.0 beta** release section for minimum version support of dependencies, +latest updates, highlights, changelog and enhancements. + +Changelog +--------- + + - Function :func:`nilearn.plotting.find_cut_slices` now supports to accept + Nifti1Image as an input for argument `img`. + + - Helper functions `_get_mask_volume` and `_adjust_screening_percentile` + are now moved to param_validation file in utilties module to be used in + common with Decoder object. + +Bug fix +-------- + + - Fix bug uncompressing tar files with datasets fetcher. + + - Fixed bunch of CircleCI documentation build failures. + + - Fixed deprecations `set_axis_bgcolor` related to matplotlib in + plotting functions. + + - Fixed bug related to not accepting a list of arrays as an input to + unmask, in masking module. + +Enhancements +------------- + + - ANOVA SVM example on Haxby datasets `plot_haxby_anova_svm` in Decoding section + now uses `SelectPercentile` to select voxels rather than `SelectKBest`. + + - New function `fast_svd` implementation in base decomposition module to + Automatically switch between randomized and lapack SVD (heuristic + of scikit-learn). + +0.3.0 beta +=========== + +To install the beta version, use:: + + pip install --upgrade --pre nilearn + +Highlights +---------- + +* Simple surface plotting + +* A function to break a parcellation into its connected components + +* **Dropped support for scikit-learn older than 0.14.1** Minimum supported version + is now 0.14.1. + +* **Dropped support for Python 2.6** + +* Minimum required version of NiBabel is now 1.2.0, to support loading annoted + data with freesurfer. + +Changelog +--------- + + - A helper function _safe_get_data as a nilearn utility now safely + removes NAN values in the images with argument ensure_finite=True. + + - Connectome functions :func:`nilearn.connectome.cov_to_corr` and + :func:`nilearn.connectome.prec_to_partial` can now be used. + +Bug fix +-------- + + - Fix colormap issue with colorbar=True when using qualitative colormaps + Fixed in according with changes of matplotlib 2.0 fixes. + + - Fix plotting functions to work with NAN values in the images. + + - Fix bug related get dtype of the images with nibabel get_data(). + + - Fix bug in nilearn clean_img + +Enhancements +............ + + - A new function :func:`nilearn.regions.connected_label_regions` to + extract the connected components represented as same label to regions + apart with each region labelled as unique label. + + - New plotting modules for surface plotting visualization. Matplotlib with + version higher 1.3.1 is required for plotting surface data using these + functions. + + - Function :func:`nilearn.plotting.plot_surf` can be used for plotting + surfaces mesh data with optional background. + + - A function :func:`nilearn.plotting.plot_surf_stat_map` can be used for + plotting statistical maps on a brain surface with optional background. + + - A function :func:`nilearn.plotting.plot_surf_roi` can be used for + plotting statistical maps rois onto brain surface. + + - A function `nilearn.datasets.fetch_surf_fsaverage5` can be used + for surface data object to be as background map for the above plotting + functions. + + - A new data fetcher function + :func:`nilearn.datasets.fetch_atlas_surf_destrieux` + can give you Destrieux et. al 2010 cortical atlas in fsaverage5 + surface space. + + - A new functional data fetcher function + :func:`nilearn.datasets.fetch_surf_nki_enhanced` gives you resting state + data preprocessed and projected to fsaverage5 surface space. + + - Two good examples in plotting gallery shows how to fetch atlas and NKI + data and used for plotting on brain surface. + + - Helper function `load_surf_mesh` in surf_plotting module for loading + surface mesh data into two arrays, containing (x, y, z) coordinates + for mesh vertices and indices of mesh faces. + + - Helper function `load_surf_data` in surf_plotting module for loading + data of numpy array to represented on a surface mesh. + + - Add fetcher for Allen et al. 2011 RSN atlas in + :func:`nilearn.datasets.fetch_atlas_allen_2011`. + + - A function :func:`nilearn.datasets.fetch_cobre` is now updated to new + light release of COBRE data (schizophrenia) + + - A new example to show how to extract regions on labels image in example + section manipulating images. + + - coveralls is replaces with codecov + + - Upgraded to Sphinx version 0.1.7 + + - Extensive plotting example shows how to use contours and filled contours + on glass brain. + +0.2.6 +===== + +Changelog +--------- + +This release enhances usage of several functions by fine tuning their +parameters. It allows to select which Haxby subject to fetch. It also refactors +documentation to make it easier to understand. +Sphinx-gallery has been updated and nilearn is ready for new nibabel 2.1 version. +Several bugs related to masks in Searchlight and ABIDE fetching have been +resolved. + +Bug fix +........ + + - Change default dtype in :func:`nilearn.image.concat_imgs` to be the + original type of the data (see #1238). + + - Fix SearchLight that did not run without process_mask or with one voxel + mask. + + - Fix flipping of left hemisphere when plotting glass brain. + + - Fix bug when downloading ABIDE timeseries + +Enhancements +............ + + - Sphinx-gallery updated to version 0.1.3. + + - Refactoring of examples and documentation. + + - Better ordering of regions in + :func:`nilearn.datasets.fetch_coords_dosenbach_2010`. + + - Remove outdated power atlas example. + + +API changes summary +................... + + - The parameter 'n_subjects' is deprecated and will be removed in future + release. Use 'subjects' instead in :func:`nilearn.datasets.fetch_haxby`. + + - The function :func:`nilearn.datasets.fetch_haxby` will now fetch the + data accepting input given in 'subjects' as a list than integer. + + - Replace `get_affine` by `affine` with recent versions of nibabel. + +0.2.5.1 +======= + +Changelog +--------- + +This is a bugfix release. +The new minimum required version of scikit-learn is 0.14.1 + +API changes summary +................... + + - default option for `dim` argument in plotting functions which uses MNI + template as a background image is now changed to 'auto' mode. Meaning + that an automatic contrast setting on background image is applied by + default. + + - Scikit-learn validation tools have been imported and are now used to check + consistency of input data, in SpaceNet for example. + +New features +............ + + - Add an option to select only off-diagonal elements in sym_to_vec. Also, + the scaling of matrices is modified: we divide the diagonal by sqrt(2) + instead of multiplying the off-diagonal elements. + + - Connectivity examples rely on + :class:`nilearn.connectome.ConnectivityMeasure` + +Bug fix +........ + + - Scipy 0.18 introduces a bug in a corner-case of resampling. Nilearn + 0.2.5 can give wrong results with scipy 0.18, but this is fixed in + 0.2.6. + + - Broken links and references fixed in docs + +0.2.5 +===== + +Changelog +--------- + +The 0.2.5 release includes plotting for connectomes and glass brain with +hemisphere-specific projection, as well as more didactic examples and +improved documentation. + +New features +............ + + - New display_mode options in :func:`nilearn.plotting.plot_glass_brain` + and :func:`nilearn.plotting.plot_connectome`. It + is possible to plot right and left hemisphere projections separately. + + - A function to load canonical brain mask image in MNI template space, + :func:`nilearn.datasets.load_mni152_brain_mask` + + - A function to load brain grey matter mask image, + :func:`nilearn.datasets.fetch_icbm152_brain_gm_mask` + + - New function :func:`nilearn.image.load_img` loads data from a filename or a + list of filenames. + + - New function :func:`nilearn.image.clean_img` applies the cleaning function + :func:`nilearn.signal.clean` on all voxels. + + - New simple data downloader + :func:`nilearn.datasets.fetch_localizer_button_task` to simplify + some examples. + + - The dataset function + :func:`nilearn.datasets.fetch_localizer_contrasts` can now download + a specific list of subjects rather than a range of subjects. + + - New function :func:`nilearn.datasets.get_data_dirs` to check where + nilearn downloads data. + +Contributors +------------- + +Contributors (from ``git shortlog -ns 0.2.4..0.2.5``):: + + 55 Gael Varoquaux + 39 Alexandre Abraham + 26 Martin Perez-Guevara + 20 Kamalakar Daddy + 8 amadeuskanaan + 3 Alexandre Abadie + 3 Arthur Mensch + 3 Elvis Dohmatob + 3 Loïc Estève + 2 Jerome Dockes + 1 Alexandre M. S + 1 Bertrand Thirion + 1 Ivan Gonzalez + 1 robbisg + +0.2.4 +===== + +Changelog +--------- + +The 0.2.4 is a small release focused on documentation for teaching. + +New features +............ + - The path given to the "memory" argument of object now have their + "~" expanded to the homedir + + - Display object created by plotting now uniformely expose an + "add_markers" method. + + - plotting plot_connectome with colorbar is now implemented in function + :func:`nilearn.plotting.plot_connectome` + + - New function :func:`nilearn.image.resample_to_img` to resample one + img on another one (just resampling / interpolation, no + coregistration) + +API changes summary +................... + - Atlas fetcher :func:`nilearn.datasets.fetch_atlas_msdl` now returns directly + labels of the regions in output variable 'labels' and its coordinates + in output variable 'region_coords' and its type of network in 'networks'. + - The output variable name 'regions' is now changed to 'maps' in AAL atlas + fetcher in :func:`nilearn.datasets.fetch_atlas_aal`. + - AAL atlas now returns directly its labels in variable 'labels' and its + index values in variable 'indices'. + +0.2.3 +===== + +Changelog +--------- + +The 0.2.3 is a small feature release for BrainHack 2016. + +New features +............ + - Mathematical formulas based on numpy functions can be applied on an + image or a list of images using :func:`nilearn.image.math_img`. + - Downloader for COBRE datasets of 146 rest fMRI subjects with + :func:`nilearn.datasets.fetch_cobre` + - Downloader for Dosenbach atlas + :func:`nilearn.datasets.fetch_coords_dosenbach_2010` + - Fetcher for multiscale functional brain parcellations (BASC) + :func:`nilearn.datasets.fetch_atlas_basc_multiscale_2015` + +Bug fixes +......... + - Better dimming on white background for plotting + +0.2.2 +====== + +Changelog +--------- + +The 0.2.2 is a bugfix + dependency update release (for sphinx gallery). It +aims at preparing a renewal of the tutorials. + +New features +............ + - Fetcher for Megatrawl Netmats dataset. + +Enhancements +............ + - Flake8 is now run on pull requests. + - Reworking of the documentation organization. + - Sphinx-gallery updated to version 0.1.1 + - The default n_subjects=None in :func:`nilearn.datasets.fetch_adhd` is now + changed to n_subjects=30. + +Bug fixes +......... + - Fix `symmetric_split` behavior in + :func:`nilearn.datasets.fetch_atlas_harvard_oxford` + - Fix casting errors when providing integer data to + :func:`nilearn.image.high_variance_confounds` + - Fix matplotlib 1.5.0 compatibility in + :func:`nilearn.plotting.plot_prob_atlas` + - Fix matplotlib backend choice on Mac OS X. + - :func:`nilearn.plotting.find_xyz_cut_coords` raises a meaningful error + when 4D data is provided instead of 3D. + - :class:`nilearn.input_data.NiftiSpheresMasker` handles radius smaller than + the size of a voxel + - :class:`nilearn.regions.RegionExtractor` handles data containing Nans. + - Confound regression does not force systematically the normalization of + the confounds. + - Force time series normalization in + :class:`nilearn.connectome.ConnectivityMeasure` + and check dimensionality of the input. + - `nilearn._utils.numpy_conversions.csv_to_array` could consider + valid CSV files as invalid. + +API changes summary +................... + - Deprecated dataset downloading function have been removed. + - Download progression message refreshing rate has been lowered to sparsify + CircleCI logs. + +Contributors +............. + +Contributors (from ``git shortlog -ns 0.2.1..0.2.2``):: + + 39 Kamalakar Daddy + 22 Alexandre Abraham + 21 Loïc Estève + 19 Gael Varoquaux + 12 Alexandre Abadie + 7 Salma + 3 Danilo Bzdok + 1 Arthur Mensch + 1 Ben Cipollini + 1 Elvis Dohmatob + 1 Óscar Nájera + 0.2.1 ====== @@ -227,3 +966,4 @@ Contributors (from ``git shortlog -ns 0.1``):: 1 Matthias Ekman 1 Michael Waskom 1 Vincent Michel + diff --git a/examples/.gitignore b/examples/.gitignore index b2efbc834e..e5e77a320a 100644 --- a/examples/.gitignore +++ b/examples/.gitignore @@ -1,3 +1,3 @@ *.nii *.nii.gz -*.png \ No newline at end of file +*.png diff --git a/examples/01_plotting/README.txt b/examples/01_plotting/README.txt new file mode 100644 index 0000000000..eb0027b784 --- /dev/null +++ b/examples/01_plotting/README.txt @@ -0,0 +1,4 @@ +Visualization of brain images +----------------------------- + +See :ref:`plotting` for more details. diff --git a/examples/01_plotting/plot_3d_map_to_surface_projection.py b/examples/01_plotting/plot_3d_map_to_surface_projection.py new file mode 100644 index 0000000000..c8d08a0e6e --- /dev/null +++ b/examples/01_plotting/plot_3d_map_to_surface_projection.py @@ -0,0 +1,103 @@ +""" +Making a surface plot of a 3D statistical map +============================================= + +project a 3D statistical map onto a cortical mesh using +:func:`nilearn.surface.vol_to_surf`. Display a surface plot of the projected +map using :func:`nilearn.plotting.plot_surf_stat_map`. + +""" + +############################################################################## +# Get a statistical map +# --------------------- + +from nilearn import datasets + +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] + + +############################################################################## +# Get a cortical mesh +# ------------------- + +fsaverage = datasets.fetch_surf_fsaverage() + +############################################################################## +# Sample the 3D data around each node of the mesh +# ----------------------------------------------- + +from nilearn import surface + +texture = surface.vol_to_surf(stat_img, fsaverage.pial_right) + +############################################################################## +# Plot the result +# --------------- + +from nilearn import plotting + +plotting.plot_surf_stat_map(fsaverage.infl_right, texture, hemi='right', + title='Surface right hemisphere', colorbar=True, + threshold=1., bg_map=fsaverage.sulc_right) + +############################################################################## +# Plot 3D image for comparison +# ---------------------------- + +plotting.plot_glass_brain(stat_img, display_mode='r', plot_abs=False, + title='Glass brain', threshold=2.) + +plotting.plot_stat_map(stat_img, display_mode='x', threshold=1., + cut_coords=range(0, 51, 10), title='Slices') + + +############################################################################## +# Plot with higher-resolution mesh +# -------------------------------- +# +# `fetch_surf_fsaverage` takes a "mesh" argument which specifies +# wether to fetch the low-resolution fsaverage5 mesh, or the high-resolution +# fsaverage mesh. using mesh="fsaverage" will result in more memory usage and +# computation time, but finer visualizations. + +big_fsaverage = datasets.fetch_surf_fsaverage('fsaverage') +big_texture = surface.vol_to_surf(stat_img, big_fsaverage.pial_right) + +plotting.plot_surf_stat_map(big_fsaverage.infl_right, + big_texture, hemi='right', colorbar=True, + title='Surface right hemisphere: fine mesh', + threshold=1., bg_map=big_fsaverage.sulc_right) + + +plotting.show() + + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_surf_stat_map` is to use +# :func:`nilearn.plotting.view_surf` or +# :func:`nilearn.plotting.view_img_on_surf` that give more interactive +# visualizations in a web browser. See :ref:`interactive-surface-plotting` for +# more details. + +view = plotting.view_surf(fsaverage.infl_right, texture, threshold='90%', + bg_map=fsaverage.sulc_right) +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view + +############################################################################## +# We don't need to do the projection ourselves, we can use view_img_on_surf: + +view = plotting.view_img_on_surf(stat_img, threshold='90%') +# view.open_in_browser() + +view diff --git a/examples/manipulating_visualizing/plot_atlas.py b/examples/01_plotting/plot_atlas.py similarity index 59% rename from examples/manipulating_visualizing/plot_atlas.py rename to examples/01_plotting/plot_atlas.py index 0f3c944efc..18202b1567 100644 --- a/examples/manipulating_visualizing/plot_atlas.py +++ b/examples/01_plotting/plot_atlas.py @@ -5,13 +5,22 @@ Plot the regions of a reference atlas (here the Harvard-Oxford atlas). """ +########################################################################## +# Retrieving the atlas data +# ------------------------- + from nilearn import datasets -from nilearn import plotting dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm') atlas_filename = dataset.maps print('Atlas ROIs are located at: %s' % atlas_filename) +########################################################################### +# Visualizing the Harvard-Oxford atlas +# ------------------------------------ + +from nilearn import plotting + plotting.plot_roi(atlas_filename, title="Harvard Oxford atlas") plotting.show() diff --git a/examples/01_plotting/plot_colormaps.py b/examples/01_plotting/plot_colormaps.py new file mode 100644 index 0000000000..a91332bdbc --- /dev/null +++ b/examples/01_plotting/plot_colormaps.py @@ -0,0 +1,50 @@ +""" +Matplotlib colormaps in Nilearn +================================ + +Visualize HCP connectome workbench color maps shipped with Nilearn +which can be used for plotting brain images on surface. + +See :ref:`surface-plotting` for surface plotting details. +""" +import numpy as np +import matplotlib.pyplot as plt + +from nilearn.plotting.cm import _cmap_d as nilearn_cmaps + +########################################################################### +# Plot color maps +# ---------------- + +nmaps = len(nilearn_cmaps) +a = np.outer(np.arange(0, 1, 0.01), np.ones(10)) + +# Initialize the figure +plt.figure(figsize=(10, 4.2)) +plt.subplots_adjust(top=0.4, bottom=0.05, left=0.01, right=0.99) + +for index, cmap in enumerate(nilearn_cmaps): + plt.subplot(1, nmaps + 1, index + 1) + plt.imshow(a, cmap=nilearn_cmaps[cmap]) + plt.axis('off') + plt.title(cmap, fontsize=10, va='bottom', rotation=90) + +########################################################################### +# Plot matplotlib color maps +# -------------------------- +plt.figure(figsize=(10, 5)) +plt.subplots_adjust(top=0.8, bottom=0.05, left=0.01, right=0.99) +deprecated_cmaps = ['Vega10', 'Vega20', 'Vega20b', 'Vega20c', 'spectral'] +m_cmaps = [] +for m in plt.cm.datad: + if not m.endswith("_r") and m not in deprecated_cmaps: + m_cmaps.append(m) +m_cmaps.sort() + +for index, cmap in enumerate(m_cmaps): + plt.subplot(1, len(m_cmaps) + 1, index + 1) + plt.imshow(a, cmap=plt.get_cmap(cmap), aspect='auto') + plt.axis('off') + plt.title(cmap, fontsize=10, va='bottom', rotation=90) + +plt.show() diff --git a/examples/01_plotting/plot_demo_glass_brain.py b/examples/01_plotting/plot_demo_glass_brain.py new file mode 100644 index 0000000000..3535de189b --- /dev/null +++ b/examples/01_plotting/plot_demo_glass_brain.py @@ -0,0 +1,42 @@ +""" +Glass brain plotting in nilearn +=============================== + +See :ref:`plotting` for more plotting functionalities. +""" + + +############################################################################### +# Retrieve data from Internet +# --------------------------- + +from nilearn import datasets + +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] + +############################################################################### +# Glass brain plotting: whole brain sagittal cuts +# ----------------------------------------------- + +from nilearn import plotting + +plotting.plot_glass_brain(stat_img, threshold=3) + +############################################################################### +# Glass brain plotting: black backgrond +# ------------------------------------- +# On a black background (option "black_bg"), and with only the x and +# the z view (option "display_mode"). +plotting.plot_glass_brain( + stat_img, title='plot_glass_brain', + black_bg=True, display_mode='xz', threshold=3) + +############################################################################### +# Glass brain plotting: Hemispheric sagittal cuts +# ----------------------------------------------- +plotting.plot_glass_brain(stat_img, + title='plot_glass_brain with display_mode="lyrz"', + display_mode='lyrz', threshold=3) + +plotting.show() diff --git a/examples/01_plotting/plot_demo_glass_brain_extensive.py b/examples/01_plotting/plot_demo_glass_brain_extensive.py new file mode 100644 index 0000000000..8d752a6aa9 --- /dev/null +++ b/examples/01_plotting/plot_demo_glass_brain_extensive.py @@ -0,0 +1,213 @@ +""" +Glass brain plotting in nilearn (all options) +============================================= + +First part of this example goes through different options of the +:func:`nilearn.plotting.plot_glass_brain` function (including plotting +negative values). + +Second part, goes through same options but selected of the same glass brain +function but plotting is seen with contours. + +See :ref:`plotting` for more plotting functionalities and +:ref:`Section 4.3 ` for more details about display objects +in Nilearn. + +Also, see :func:`nilearn.datasets.fetch_neurovault_motor_task` for details +about the plotting data and associated meta-data. +""" + + +############################################################################### +# Retrieve the data +# ------------------ +# +# Nilearn comes with set of functions that download public data from Internet +# +# Let us first see where the data will be downloded and stored on our disk: +# +from nilearn import datasets +print('Datasets shipped with nilearn are stored in: %r' % datasets.get_data_dirs()) + +############################################################################### +# Let us now retrieve a motor task contrast map +# corresponding to a group one-sample t-test + +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] +# stat_img is just the name of the file that we downloded +print(stat_img) + +############################################################################### +# Demo glass brain plotting +# -------------------------- +from nilearn import plotting + +# Whole brain sagittal cuts and map is thresholded at 3 +plotting.plot_glass_brain(stat_img, threshold=3) + + +############################################################################### +# With a colorbar +plotting.plot_glass_brain(stat_img, threshold=3, colorbar=True) + + +############################################################################### +# Black background, and only the (x, z) cuts +plotting.plot_glass_brain(stat_img, title='plot_glass_brain', + black_bg=True, display_mode='xz', threshold=3) + + +############################################################################### +# Plotting the sign of the activation with plot_abs to False +plotting.plot_glass_brain(stat_img, threshold=0, colorbar=True, + plot_abs=False) + + +############################################################################### +# The sign of the activation and a colorbar +plotting.plot_glass_brain(stat_img, threshold=3, + colorbar=True, plot_abs=False) + + +############################################################################### +# Different projections for the left and right hemispheres +# --------------------------------------------------------- +# +# Hemispheric sagittal cuts +plotting.plot_glass_brain(stat_img, + title='plot_glass_brain with display_mode="lzr"', + black_bg=True, display_mode='lzr', threshold=3) + +############################################################################### +plotting.plot_glass_brain(stat_img, threshold=0, colorbar=True, + title='plot_glass_brain with display_mode="lyrz"', + plot_abs=False, display_mode='lyrz') + +############################################################################### +# Demo glass brain plotting with contours and with fillings +# --------------------------------------------------------- +# To plot maps with contours, we call the plotting function into variable from +# which we can use specific display features which are inherited automatically. +# In this case, we focus on using add_contours +# First, we initialize the plotting function into "display" and first +# argument set to None since we want an empty glass brain to plotting the +# statistical maps with "add_contours" +display = plotting.plot_glass_brain(None) +# Here, we project statistical maps +display.add_contours(stat_img) +# and a title +display.title('"stat_img" on glass brain without threshold') + +############################################################################### +# Plotting with `filled=True` implies contours with fillings. Here, we are not +# specifying levels +display = plotting.plot_glass_brain(None) +# Here, we project statistical maps with filled=True +display.add_contours(stat_img, filled=True) +# and a title +display.title('Same map but with fillings in the contours') + +############################################################################### +# Here, we input specific level (cut-off) in the statistical map. In other way, +# we are thresholding our statistical map + +# Here, we set the threshold using parameter called `levels` with value given +# in a list and choosing color to Red. +display = plotting.plot_glass_brain(None) +display.add_contours(stat_img, levels=[3.], colors='r') +display.title('"stat_img" on glass brain with threshold') + +############################################################################### +# Plotting with same demonstration but inlcudes now filled=True +display = plotting.plot_glass_brain(None) +display.add_contours(stat_img, filled=True, levels=[3.], colors='r') +display.title('Same demonstration but using fillings inside contours') + +############################################################################## +# Plotting with black background, `black_bg` should be set with +# `plot_glass_brain` + +# We can set black background using black_bg=True +display = plotting.plot_glass_brain(None, black_bg=True) +display.add_contours(stat_img, levels=[3.], colors='g') +display.title('"stat_img" on glass brain with black background') + +############################################################################## +# Black background plotting with filled in contours +display = plotting.plot_glass_brain(None, black_bg=True) +display.add_contours(stat_img, filled=True, levels=[3.], colors='g') +display.title('Glass brain with black background and filled in contours') + +############################################################################## +# Display contour projections in both hemispheres +# ------------------------------------------------- +# Key argument to vary here is `display_mode` for hemispheric plotting + +# Now, display_mode is chosen as 'lr' for both hemispheric plots +display = plotting.plot_glass_brain(None, display_mode='lr') +display.add_contours(stat_img, levels=[3.], colors='r') +display.title('"stat_img" on glass brain only "l" "r" hemispheres') + +############################################################################## +# Filled contours in both hemispheric plotting, just by adding filled=True +display = plotting.plot_glass_brain(None, display_mode='lr') +display.add_contours(stat_img, filled=True, levels=[3.], colors='r') +display.title('Filled contours on glass brain only "l" "r" hemispheres') + +############################################################################## +# With positive and negative sign of activations with `plot_abs` in +# `plot_glass_brain` + +# By default parameter `plot_abs` is True and sign of activations can be +# displayed by changing `plot_abs` to False +display = plotting.plot_glass_brain(None, plot_abs=False, display_mode='lzry') +display.add_contours(stat_img) +display.title("Contours with both sign of activations without threshold") + +############################################################################## +# Now, adding just filled=True to get positive and negative sign activations +# with fillings in the contours +display = plotting.plot_glass_brain(None, plot_abs=False, display_mode='lzry') +display.add_contours(stat_img, filled=True) +display.title("Filled contours with both sign of activations without threshold") + + +############################################################################## +# Displaying both signs (positive and negative) of activations with threshold +# meaning thresholding by adding an argument `levels` in add_contours. + +import numpy as np +display = plotting.plot_glass_brain(None, plot_abs=False, display_mode='lzry') + +# In add_contours, +# we give two values through the argument `levels` which corresponds to the +# thresholds of the contour we want to draw: One is positive and the other one +# is negative. We give a list of `colors` as argument to associate a different +# color to each contour. Additionally, we also choose to plot contours with +# thick line widths, For linewidths one value would be enough so that same +# value is used for both contours. +display.add_contours(stat_img, levels=[-2.8, 3.], colors=['b', 'r'], + linewidths=4.) +display.title('Contours with sign of activations with threshold') + +############################################################################## +# Same display demonstration as above but just adding filled=True to get +# fillings inside the contours. + +# Unlike in previous plot, here we specify each sign at a time. We call negative +# values display first followed by positive values display. + +# First, we fetch our display object with same parametes used as above +display = plotting.plot_glass_brain(None, plot_abs=False, display_mode='lzry') + +# Second, we plot negative sign of activation with levels given as negative +# activation value in a list. Upper bound should be kept to -infinity +display.add_contours(stat_img, filled=True, levels=[-np.inf, -2.8], + colors='b') +# Next, within same plotting object we plot positive sign of activation +display.add_contours(stat_img, filled=True, levels=[3.], colors='r') +display.title('Now same plotting but with filled contours') + +# Finally, displaying them +plotting.show() diff --git a/examples/01_plotting/plot_demo_more_plotting.py b/examples/01_plotting/plot_demo_more_plotting.py new file mode 100644 index 0000000000..2fdc4d132b --- /dev/null +++ b/examples/01_plotting/plot_demo_more_plotting.py @@ -0,0 +1,228 @@ +""" +More plotting tools from nilearn +================================ + +In this example, we demonstrate how to use plotting options from +nilearn essential in visualizing brain image analysis results. + +We emphasize the use of parameters such as `display_mode` and `cut_coords` +with plotting function :func:`nilearn.plotting.plot_stat_map`. Also, +we show how to use various features such as `add_edges`, `add_contours`, +`add_markers` essential in visualizing regions of interest images or +mask images overlaying on subject specific anatomical/EPI image. +The display features shown here are inherited from the +:class:`nilearn.plotting.displays.OrthoSlicer` class. + +The parameter `display_mode` is used to draw brain slices along given +specific directions, where directions can be one of 'ortho', +'x', 'y', 'z', 'xy', 'xz', 'yz'. whereas parameter `cut_coords` +is used to specify a limited number of slices to visualize along given +specific slice direction. The parameter `cut_coords` can also be used +to draw the specific cuts in the slices by giving its particular +coordinates in MNI space accordingly with particular slice direction. +This helps us point to the activation specific location of the brain slices. + +See :ref:`plotting` for more details. +""" + +############################################################################### +# First, we retrieve data from nilearn provided (general-purpose) datasets +# ------------------------------------------------------------------------- + +from nilearn import datasets + +# haxby dataset to have anatomical image, EPI images and masks +haxby_dataset = datasets.fetch_haxby() +haxby_anat_filename = haxby_dataset.anat[0] +haxby_mask_filename = haxby_dataset.mask_vt[0] +haxby_func_filename = haxby_dataset.func[0] + +# localizer dataset to have contrast maps +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] + + +######################################## +# Now, we show from here how to visualize the retrieved datasets using plotting +# tools from nilearn. + +from nilearn import plotting + +######################################## +# Visualizing in - 'sagittal', 'coronal' and 'axial' with given coordinates +# ------------------------------------------------------------------------- +# The first argument is a path to the filename of a constrast map, +# optional argument `display_mode` is given as string 'ortho' to visualize +# the map in three specific directions xyz and the optional `cut_coords` +# argument, is here a list of integers denotes coordinates of each slice +# in the order [x, y, z]. By default the `colorbar` argument is set to True +# in plot_stat_map. +plotting.plot_stat_map(stat_img, display_mode='ortho', + cut_coords=[36, -27, 60], + title="display_mode='ortho', cut_coords=[36, -27, 60]") + +######################################## +# Visualizing in - single view 'axial' with number of cuts=5 +# ----------------------------------------------------------- +# In this type of visualization, the `display_mode` argument is given as +# string 'z' for axial direction and `cut_coords` as integer 5 without a +# list implies that number of cuts in the slices should be maximum of 5. +# The coordinates to cut the slices are selected automatically +plotting.plot_stat_map(stat_img, display_mode='z', cut_coords=5, + title="display_mode='z', cut_coords=5") + +######################################## +# Visualizing in - single view 'sagittal' with only two slices +# ------------------------------------------------------------- +# In this type, `display_mode` should be given as string 'x' for sagittal +# view and coordinates should be given as integers in a list +plotting.plot_stat_map(stat_img, display_mode='x', + cut_coords=[-36, 36], + title="display_mode='x', cut_coords=[-36, 36]") + +######################################## +# Visualizing in - 'coronal' view with single cut +# ------------------------------------------------ +# For coronal view, `display_mode` is given as string 'y' and `cut_coords` +# as integer 1 not as a list for single cut. The coordinates are selected +# automatically +plotting.plot_stat_map(stat_img, display_mode='y', cut_coords=1, + title="display_mode='y', cut_coords=1") + +######################################## +# Visualizing without a colorbar on the right side +# ------------------------------------------------- +# The argument `colorbar` should be given as False to show plots without +# a colorbar on the right side. +plotting.plot_stat_map(stat_img, display_mode='z', + cut_coords=1, colorbar=False, + title="display_mode='z', cut_coords=1, colorbar=False") + +######################################## +# Visualize in - two views 'sagittal' and 'axial' with given coordinates +# ------------------------------------------------------------------------- +# argument display_mode='xz' where 'x' for sagittal and 'z' for axial view. +# argument `cut_coords` should match with input number of views therefore two +# integers should be given in a list to select the slices to be displayed +plotting.plot_stat_map(stat_img, display_mode='xz', + cut_coords=[36, 60], + title="display_mode='xz', cut_coords=[36, 60]") + +######################################## +# Changing the views to 'coronal', 'sagittal' views with coordinates +# ------------------------------------------------------------------- +# display_mode='yx' for coronal and saggital view and coordinates will be +# assigned in the order of direction as [x, y, z] +plotting.plot_stat_map(stat_img, display_mode='yx', + cut_coords=[-27, 36], + title="display_mode='yx', cut_coords=[-27, 36]") + +######################################## +# Now, views are changed to 'coronal' and 'axial' views with coordinates +# ----------------------------------------------------------------------- + +plotting.plot_stat_map(stat_img, display_mode='yz', + cut_coords=[-27, 60], + title="display_mode='yz', cut_coords=[-27, 60]") + +############################################################################### +# Demonstrating various display features +# --------------------------------------- +# In second part, we switch to demonstrating various features add_* from +# nilearn where each specific feature will be helpful in projecting brain +# imaging results for further interpretation. + +# Import image processing tool for basic processing of functional brain image +from nilearn import image + +# Compute voxel-wise mean functional image across time dimension. Now we have +# functional image in 3D assigned in mean_haxby_img +mean_haxby_img = image.mean_img(haxby_func_filename) + +######################################## +# Showing how to use `add_edges` +# ------------------------------ +# Now let us see how to use `add_edges`, method useful for checking +# coregistration by overlaying anatomical image as edges (red) on top of +# mean functional image (background), both being of same subject. + +# First, we call the `plot_anat` plotting function, with a background image +# as first argument, in this case the mean fMRI image. + +display = plotting.plot_anat(mean_haxby_img, title="add_edges") + +# We are now able to use add_edges method inherited in plotting object named as +# display. First argument - anatomical image and by default edges will be +# displayed as red 'r', to choose different colors green 'g' and blue 'b'. +display.add_edges(haxby_anat_filename) + +######################################## +# How to use `add_contours` +# ------------------------- +# Plotting outline of the mask (red) on top of the mean EPI image with +# `add_contours`. This method is useful for region specific interpretation +# of brain images + +# As seen before, we call the `plot_anat` function with a background image +# as first argument, in this case again the mean fMRI image and argument +# `cut_coords` as list for manual cut with coordinates pointing at masked +# brain regions +display = plotting.plot_anat(mean_haxby_img, title="add_contours", + cut_coords=[-34, -39, -9]) +# Now use `add_contours` in display object with the path to a mask image from +# the Haxby dataset as first argument and argument `levels` given as list +# of values to select particular level in the contour to display and argument +# `colors` specified as red 'r' to see edges as red in color. +# See help on matplotlib.pyplot.contour to use more options with this method +display.add_contours(haxby_mask_filename, levels=[0.5], colors='r') + +######################################## +# Plotting outline of the mask (blue) with color fillings using same method +# `add_contours`. + +display = plotting.plot_anat(mean_haxby_img, + title="add_contours with filled=True", + cut_coords=[-34, -39, -9]) + +# By default, no color fillings will be shown using `add_contours`. To see +# contours with color fillings use argument filled=True. contour colors are +# changed to blue 'b' with alpha=0.7 sets the transparency of color fillings. +# See help on matplotlib.pyplot.contourf to use more options given that filled +# should be True +display.add_contours(haxby_mask_filename, filled=True, alpha=0.7, + levels=[0.5], colors='b') + +######################################### +# Plotting seeds using `add_markers` +# ---------------------------------- +# Plotting seed regions of interest as spheres using new feature `add_markers` +# with MNI coordinates of interest. + +display = plotting.plot_anat(mean_haxby_img, title="add_markers", + cut_coords=[-34, -39, -9]) + +# Coordinates of seed regions should be specified in first argument and second +# argument `marker_color` denotes color of the sphere in this case yellow 'y' +# and third argument `marker_size` denotes size of the sphere +coords = [(-34, -39, -9)] +display.add_markers(coords, marker_color='y', marker_size=100) + +############################################################################### +# Finally, saving the plots to file with two different ways + +# Contrast maps plotted with function `plot_stat_map` can be saved using an +# inbuilt parameter output_file as filename + .extension as string. Valid +# extensions are .png, .pdf, .svg +plotting.plot_stat_map(stat_img, + title='Using plot_stat_map output_file', + output_file='plot_stat_map.png') + +######################################## +# Another way of saving plots is using 'savefig' option from display object +display = plotting.plot_stat_map(stat_img, + title='Using display savefig') +display.savefig('plot_stat_map_from_display.png') +# In non-interactive settings make sure you close your displays +display.close() + +plotting.show() diff --git a/examples/01_plotting/plot_demo_plotting.py b/examples/01_plotting/plot_demo_plotting.py new file mode 100644 index 0000000000..d5871e550b --- /dev/null +++ b/examples/01_plotting/plot_demo_plotting.py @@ -0,0 +1,110 @@ +""" +Plotting tools in nilearn +========================== + +Nilearn comes with a set of plotting functions for easy visualization of +Nifti-like images such as statistical maps mapped onto anatomical images +or onto glass brain representation, anatomical images, functional/EPI images, +region specific mask images. + +See :ref:`plotting` for more details. +""" + +############################################################################### +# Retrieve data from nilearn provided (general-purpose) datasets +# --------------------------------------------------------------- + +from nilearn import datasets + +# haxby dataset to have EPI images and masks +haxby_dataset = datasets.fetch_haxby() + +# print basic information on the dataset +print('First subject anatomical nifti image (3D) is at: %s' % + haxby_dataset.anat[0]) +print('First subject functional nifti image (4D) is at: %s' % + haxby_dataset.func[0]) # 4D data + +haxby_anat_filename = haxby_dataset.anat[0] +haxby_mask_filename = haxby_dataset.mask_vt[0] +haxby_func_filename = haxby_dataset.func[0] + +# one motor contrast map from NeuroVault +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] + +############################################################################### +# Plotting statistical maps with function `plot_stat_map` +# -------------------------------------------------------- + +from nilearn import plotting + +# Visualizing t-map image on EPI template with manual +# positioning of coordinates using cut_coords given as a list +plotting.plot_stat_map(stat_img, + threshold=3, title="plot_stat_map", + cut_coords=[36, -27, 66]) + +############################################################################### +# Making interactive plots with function `view_stat_map` +# ------------------------------------------------------ +# An alternative to :func:`nilearn.plotting.plot_stat_map` is to use +# :func:`nilearn.plotting.view_stat_map` that gives more interactive +# visualizations in a web browser. See :ref:`interactive-stat-map-plotting` +# for more details. + +view = plotting.view_stat_map(stat_img, threshold=3) + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view + +############################################################################### +# Plotting statistical maps in a glass brain with function `plot_glass_brain` +# --------------------------------------------------------------------------- +# +# Now, the t-map image is mapped on glass brain representation where glass +# brain is always a fixed background template +plotting.plot_glass_brain(stat_img, title='plot_glass_brain', + threshold=3) + +############################################################################### +# Plotting anatomical images with function `plot_anat` +# ----------------------------------------------------- +# +# Visualizing anatomical image of haxby dataset +plotting.plot_anat(haxby_anat_filename, title="plot_anat") + +############################################################################### +# Plotting ROIs (here the mask) with function `plot_roi` +# ------------------------------------------------------- +# +# Visualizing ventral temporal region image from haxby dataset overlayed on +# subject specific anatomical image with coordinates positioned automatically on +# region of interest (roi) +plotting.plot_roi(haxby_mask_filename, bg_img=haxby_anat_filename, + title="plot_roi") + +############################################################################### +# Plotting EPI image with function `plot_epi` +# --------------------------------------------- + +# Import image processing tool +from nilearn import image + +# Compute the voxel_wise mean of functional images across time. +# Basically reducing the functional image from 4D to 3D +mean_haxby_img = image.mean_img(haxby_func_filename) + +# Visualizing mean image (3D) +plotting.plot_epi(mean_haxby_img, title="plot_epi") + +############################################################################### +# A call to plotting.show is needed to display the plots when running +# in script mode (ie outside IPython) +plotting.show() diff --git a/examples/manipulating_visualizing/plot_dim_plotting.py b/examples/01_plotting/plot_dim_plotting.py similarity index 58% rename from examples/manipulating_visualizing/plot_dim_plotting.py rename to examples/01_plotting/plot_dim_plotting.py index 13439bcf58..04b7238f78 100644 --- a/examples/manipulating_visualizing/plot_dim_plotting.py +++ b/examples/01_plotting/plot_dim_plotting.py @@ -12,39 +12,48 @@ display ROIs on top of a background image. """ +######################################################################### # Retrieve the data: the localizer dataset with contrast maps +# ----------------------------------------------------------- from nilearn import datasets -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_anats=True, - get_tmaps=True) -localizer_anat_filename = localizer_dataset.anats[1] -localizer_tmap_filename = localizer_dataset.tmaps[1] +localizer_dataset = datasets.fetch_localizer_button_task(get_anats=True) +localizer_anat_filename = localizer_dataset.anats[0] +localizer_tmap_filename = localizer_dataset.tmaps[0] -# Plotting: vary the 'dim' of the background -from nilearn import plotting +######################################################################## +# Plotting with enhancement of background image with dim=-.5 +# ---------------------------------------------------------- +from nilearn import plotting plotting.plot_stat_map(localizer_tmap_filename, bg_img=localizer_anat_filename, cut_coords=(36, -27, 66), threshold=3, title="dim=-.5", dim=-.5) +######################################################################## +# Plotting with no change of contrast in background image with dim=0 +# ------------------------------------------------------------------- plotting.plot_stat_map(localizer_tmap_filename, bg_img=localizer_anat_filename, cut_coords=(36, -27, 66), threshold=3, title="dim=0", dim=0) +######################################################################## +# Plotting with decrease of constrast in background image with dim=.5 +# ------------------------------------------------------------------- plotting.plot_stat_map(localizer_tmap_filename, bg_img=localizer_anat_filename, cut_coords=(36, -27, 66), threshold=3, title="dim=.5", dim=.5) +######################################################################## +# Plotting with more decrease in constrast with dim=1 +# --------------------------------------------------- plotting.plot_stat_map(localizer_tmap_filename, bg_img=localizer_anat_filename, cut_coords=(36, -27, 66), diff --git a/examples/manipulating_visualizing/plot_haxby_masks.py b/examples/01_plotting/plot_haxby_masks.py similarity index 86% rename from examples/manipulating_visualizing/plot_haxby_masks.py rename to examples/01_plotting/plot_haxby_masks.py index 8930d595c0..acb6bd505c 100644 --- a/examples/manipulating_visualizing/plot_haxby_masks.py +++ b/examples/01_plotting/plot_haxby_masks.py @@ -4,8 +4,6 @@ Small script to plot the masks of the Haxby dataset. """ -import numpy as np -from scipy import linalg import matplotlib.pyplot as plt from nilearn import datasets @@ -22,12 +20,7 @@ func_filename = haxby_dataset.func[0] mean_img = image.mean_img(func_filename) -z_slice = -24 -from nilearn.image.resampling import coord_transform -affine = mean_img.get_affine() -_, _, k_slice = coord_transform(0, 0, z_slice, - linalg.inv(affine)) -k_slice = np.round(k_slice) +z_slice = -14 fig = plt.figure(figsize=(4, 5.4), facecolor='k') diff --git a/examples/01_plotting/plot_multiscale_parcellations.py b/examples/01_plotting/plot_multiscale_parcellations.py new file mode 100644 index 0000000000..7e6c51545c --- /dev/null +++ b/examples/01_plotting/plot_multiscale_parcellations.py @@ -0,0 +1,46 @@ +""" +Visualizing multiscale functional brain parcellations +===================================================== + +This example shows how to download and fetch brain parcellations of +multiple networks using :func:`nilearn.datasets.fetch_atlas_basc_multiscale_2015` +and visualize them using plotting function :func:`nilearn.plotting.plot_roi`. + +We show here only three different networks of 'symmetric' version. For more +details about different versions and different networks, please refer to its +documentation. +""" + +############################################################################### +# Retrieving multiscale group brain parcellations +# ----------------------------------------------- + +# import datasets module and use `fetch_atlas_basc_multiscale_2015` function +from nilearn import datasets + +parcellations = datasets.fetch_atlas_basc_multiscale_2015(version='sym') + +# We show here networks of 64, 197, 444 +networks_64 = parcellations['scale064'] +networks_197 = parcellations['scale197'] +networks_444 = parcellations['scale444'] + +############################################################################### +# Visualizing brain parcellations +# ------------------------------- + +# import plotting module and use `plot_roi` function, since the maps are in 3D +from nilearn import plotting + +# The coordinates of all plots are selected automatically by itself +# We manually change the colormap of our choice +plotting.plot_roi(networks_64, cmap=plotting.cm.bwr, + title='64 regions of brain clusters') + +plotting.plot_roi(networks_197, cmap=plotting.cm.bwr, + title='197 regions of brain clusters') + +plotting.plot_roi(networks_444, cmap=plotting.cm.bwr_r, + title='444 regions of brain clusters') + +plotting.show() diff --git a/examples/01_plotting/plot_overlay.py b/examples/01_plotting/plot_overlay.py new file mode 100644 index 0000000000..2d9e4d4b5b --- /dev/null +++ b/examples/01_plotting/plot_overlay.py @@ -0,0 +1,72 @@ +""" +Visualizing a probablistic atlas: the default mode in the MSDL atlas +===================================================================== + +Visualizing a probablistic atlas requires visualizing the different +maps that compose it. + +Here we represent the nodes constituting the default mode network in the +`MSDL atlas +`_. + +The tools that we need to leverage are: + + * :func:`nilearn.image.index_img` to retrieve the various maps composing + the atlas + + * Adding overlays on an existing brain display, to plot each of these + maps + +Alternatively, :func:`nilearn.plotting.plot_prob_atlas` allows to plot the maps in one step that +with less control over the plot (see below) + +""" +############################################################################ +# Fetching probabilistic atlas - MSDL atlas +# ----------------------------------------- +from nilearn import datasets + +atlas_data = datasets.fetch_atlas_msdl() +atlas_filename = atlas_data.maps + +############################################################################# +# Visualizing a probabilistic atlas with plot_stat_map and add_overlay object +# --------------------------------------------------------------------------- +from nilearn import plotting, image + +# First plot the map for the PCC: index 4 in the atlas +display = plotting.plot_stat_map(image.index_img(atlas_filename, 4), + colorbar=False, + title="DMN nodes in MSDL atlas") + +# Now add as an overlay the maps for the ACC and the left and right +# parietal nodes +display.add_overlay(image.index_img(atlas_filename, 5), + cmap=plotting.cm.black_blue) +display.add_overlay(image.index_img(atlas_filename, 6), + cmap=plotting.cm.black_green) +display.add_overlay(image.index_img(atlas_filename, 3), + cmap=plotting.cm.black_pink) + +plotting.show() + + +############################################################################### +# Visualizing a probablistic atlas with plot_prob_atlas +# ===================================================== +# +# Alternatively, we can create a new 4D-image by selecting the 3rd, 4th, 5th and 6th (zero-based) probabilistic map from atlas +# via :func:`nilearn.image.index_img` and use :func:`nilearn.plotting.plot_prob_atlas` (added in version 0.2) +# to plot the selected nodes in one step. +# +# Unlike :func:`nilearn.plotting.plot_stat_map` this works with 4D images + +dmn_nodes = image.index_img(atlas_filename, [3, 4, 5, 6]) +# Note that dmn_node is now a 4D image +print(dmn_nodes.shape) +#################################### + +display = plotting.plot_prob_atlas(dmn_nodes, + cut_coords=(0, -55, 29), + title="DMN nodes in MSDL atlas") +plotting.show() diff --git a/examples/manipulating_visualizing/plot_prob_atlas.py b/examples/01_plotting/plot_prob_atlas.py similarity index 76% rename from examples/manipulating_visualizing/plot_prob_atlas.py rename to examples/01_plotting/plot_prob_atlas.py index e3c76c0a7c..085c552d27 100644 --- a/examples/manipulating_visualizing/plot_prob_atlas.py +++ b/examples/01_plotting/plot_prob_atlas.py @@ -13,6 +13,8 @@ 3. "continuous", maps are shown as just color overlays. +A colorbar can optionally be added. + The :func:`nilearn.plotting.plot_prob_atlas` function displays each map with each different color which are picked randomly from the colormap which is already defined. @@ -22,7 +24,7 @@ # Load 4D probabilistic atlases from nilearn import datasets -# Harvard Oxford Atlas +# Harvard Oxford Atlasf harvard_oxford = datasets.fetch_atlas_harvard_oxford('cort-prob-2mm') harvard_oxford_sub = datasets.fetch_atlas_harvard_oxford('sub-prob-2mm') @@ -35,6 +37,12 @@ # ICBM tissue probability icbm = datasets.fetch_icbm152_2009() +# Allen RSN networks +allen = datasets.fetch_atlas_allen_2011() + +# Pauli subcortical atlas +subcortex = datasets.fetch_atlas_pauli_2017() + # Visualization from nilearn import plotting @@ -43,12 +51,19 @@ 'MSDL': msdl.maps, 'Smith 2009 10 RSNs': smith.rsn10, 'Smith2009 20 RSNs': smith.rsn20, 'Smith2009 70 RSNs': smith.rsn70, - 'Smith2009 10 Brainmap': smith.bm10, 'Smith2009 20 Brainmap': smith.bm20, 'Smith2009 70 Brainmap': smith.bm70, - 'ICBM tissues': (icbm['wm'], icbm['gm'], icbm['csf'])} + 'ICBM tissues': (icbm['wm'], icbm['gm'], icbm['csf']), + 'Allen2011': allen.rsn28, + 'Pauli2017 Subcortical Atlas': subcortex.maps, + } for name, atlas in sorted(atlas_types.items()): plotting.plot_prob_atlas(atlas, title=name) +# An optional colorbar can be set +plotting.plot_prob_atlas(smith.bm10, title='Smith2009 10 Brainmap (with' + ' colorbar)', + colorbar=True) +print('ready') plotting.show() diff --git a/examples/01_plotting/plot_surf_atlas.py b/examples/01_plotting/plot_surf_atlas.py new file mode 100644 index 0000000000..df2ea43aaf --- /dev/null +++ b/examples/01_plotting/plot_surf_atlas.py @@ -0,0 +1,98 @@ +""" +Loading and plotting of a cortical surface atlas +================================================= + +The Destrieux parcellation (Destrieux et al, 2010) in fsaverage5 space as +distributed with Freesurfer is used as the chosen atlas. + +The :func:`nilearn.plotting.plot_surf_roi` function is used +to plot the parcellation on the pial surface. + +See :ref:`plotting` for more details. + +References +---------- + +Destrieux et al, (2010). Automatic parcellation of human cortical gyri and +sulci using standard anatomical nomenclature. NeuroImage, 53, 1. +URL http://dx.doi.org/10.1016/j.neuroimage.2010.06.010. +""" + +############################################################################### +# Data fetcher +# ------------ + +# Retrieve destrieux parcellation in fsaverage5 space from nilearn +from nilearn import datasets + +destrieux_atlas = datasets.fetch_atlas_surf_destrieux() + +# The parcellation is already loaded into memory +parcellation = destrieux_atlas['map_left'] + +# Retrieve fsaverage5 surface dataset for the plotting background. It contains +# the surface template as pial and inflated version and a sulcal depth maps +# which is used for shading +fsaverage = datasets.fetch_surf_fsaverage() + +# The fsaverage dataset contains file names pointing to the file locations +print('Fsaverage5 pial surface of left hemisphere is at: %s' % + fsaverage['pial_left']) +print('Fsaverage5 inflated surface of left hemisphere is at: %s' % + fsaverage['infl_left']) +print('Fsaverage5 sulcal depth map of left hemisphere is at: %s' % + fsaverage['sulc_left']) + +############################################################################### +# Visualization +# ------------- + +# Display Destrieux parcellation on fsaverage5 pial surface using nilearn +from nilearn import plotting + +plotting.plot_surf_roi(fsaverage['pial_left'], roi_map=parcellation, + hemi='left', view='lateral', + bg_map=fsaverage['sulc_left'], bg_on_data=True, + darkness=.5) + +############################################################################### +# Display Destrieux parcellation on inflated fsaverage5 surface +plotting.plot_surf_roi(fsaverage['infl_left'], roi_map=parcellation, + hemi='left', view='lateral', + bg_map=fsaverage['sulc_left'], bg_on_data=True, + darkness=.5) + +############################################################################### +# Display Destrieux parcellation with different views: posterior +plotting.plot_surf_roi(fsaverage['infl_left'], roi_map=parcellation, + hemi='left', view='posterior', + bg_map=fsaverage['sulc_left'], bg_on_data=True, + darkness=.5) + +############################################################################### +# Display Destrieux parcellation with different views: ventral +plotting.plot_surf_roi(fsaverage['infl_left'], roi_map=parcellation, + hemi='left', view='ventral', + bg_map=fsaverage['sulc_left'], bg_on_data=True, + darkness=.5) +plotting.show() + + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_surf_roi` is to use +# :func:`nilearn.plotting.view_surf` for more interactive +# visualizations in a web browser. See :ref:`interactive-surface-plotting` for +# more details. + +view = plotting.view_surf(fsaverage.infl_left, parcellation, + cmap='gist_ncar', symmetric_cmap=False) +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view diff --git a/examples/01_plotting/plot_surf_stat_map.py b/examples/01_plotting/plot_surf_stat_map.py new file mode 100644 index 0000000000..edd5b809dd --- /dev/null +++ b/examples/01_plotting/plot_surf_stat_map.py @@ -0,0 +1,155 @@ +""" +Seed-based connectivity on the surface +======================================= + +The dataset that is a subset of the enhanced NKI Rockland sample +(http://fcon_1000.projects.nitrc.org/indi/enhanced/, Nooner et al, 2012) + +Resting state fMRI scans (TR=645ms) of 102 subjects were preprocessed +(https://github.com/fliem/nki_nilearn) and projected onto the Freesurfer +fsaverage5 template (Dale et al, 1999, Fischl et al, 1999). For this example +we use the time series of a single subject's left hemisphere. + +The Destrieux parcellation (Destrieux et al, 2010) in fsaverage5 space as +distributed with Freesurfer is used to select a seed region in the posterior +cingulate cortex. + +Functional connectivity of the seed region to all other cortical nodes in the +same hemisphere is calculated using Pearson product-moment correlation +coefficient. + +The :func:`nilearn.plotting.plot_surf_stat_map` function is used +to plot the resulting statistical map on the (inflated) pial surface. + +See also :ref:`for a similar example but using volumetric input data +`. + +See :ref:`plotting` for more details on plotting tools. + +References +---------- + +Nooner et al, (2012). The NKI-Rockland Sample: A model for accelerating the +pace of discovery science in psychiatry. Frontiers in neuroscience 6, 152. +URL http://dx.doi.org/10.3389/fnins.2012.00152 + +Dale et al, (1999). Cortical surface-based analysis.I. Segmentation and +surface reconstruction. Neuroimage 9. +URL http://dx.doi.org/10.1006/nimg.1998.0395 + +Fischl et al, (1999). Cortical surface-based analysis. II: Inflation, +flattening, and a surface-based coordinate system. Neuroimage 9. +http://dx.doi.org/10.1006/nimg.1998.0396 + +Destrieux et al, (2010). Automatic parcellation of human cortical gyri and +sulci using standard anatomical nomenclature. NeuroImage, 53, 1. +URL http://dx.doi.org/10.1016/j.neuroimage.2010.06.010. +""" + +############################################################################### +# Retrieving the data +# ------------------- + +# NKI resting state data from nilearn +from nilearn import datasets + +nki_dataset = datasets.fetch_surf_nki_enhanced(n_subjects=1) + +# The nki dictionary contains file names for the data +# of all downloaded subjects. +print(('Resting state data of the first subjects on the ' + 'fsaverag5 surface left hemisphere is at: %s' % + nki_dataset['func_left'][0])) + +# Destrieux parcellation for left hemisphere in fsaverage5 space +destrieux_atlas = datasets.fetch_atlas_surf_destrieux() +parcellation = destrieux_atlas['map_left'] +labels = destrieux_atlas['labels'] + +# Fsaverage5 surface template +fsaverage = datasets.fetch_surf_fsaverage() + +# The fsaverage dataset contains file names pointing to +# the file locations +print('Fsaverage5 pial surface of left hemisphere is at: %s' % + fsaverage['pial_left']) +print('Fsaverage5 inflated surface of left hemisphere is at: %s' % + fsaverage['infl_left']) +print('Fsaverage5 sulcal depth map of left hemisphere is at: %s' % + fsaverage['sulc_left']) + +############################################################################### +# Extracting the seed time series +# -------------------------------- + +# Load resting state time series from nilearn +from nilearn import surface + +timeseries = surface.load_surf_data(nki_dataset['func_left'][0]) + +# Extract seed region via label +pcc_region = b'G_cingul-Post-dorsal' + +import numpy as np +pcc_labels = np.where(parcellation == labels.index(pcc_region))[0] + +# Extract time series from seed region +seed_timeseries = np.mean(timeseries[pcc_labels], axis=0) + +############################################################################### +# Calculating seed-based functional connectivity +# ---------------------------------------------- + +# Calculate Pearson product-moment correlation coefficient between seed +# time series and timeseries of all cortical nodes of the hemisphere +from scipy import stats + +stat_map = np.zeros(timeseries.shape[0]) +for i in range(timeseries.shape[0]): + stat_map[i] = stats.pearsonr(seed_timeseries, timeseries[i])[0] + +# Re-mask previously masked nodes (medial wall) +stat_map[np.where(np.mean(timeseries, axis=1) == 0)] = 0 + +############################################################################### +# Display ROI on surface + +from nilearn import plotting + +plotting.plot_surf_roi(fsaverage['pial_left'], roi_map=pcc_labels, + hemi='left', view='medial', + bg_map=fsaverage['sulc_left'], bg_on_data=True, + title='PCC Seed') + +############################################################################### +# Display unthresholded stat map with dimmed background +plotting.plot_surf_stat_map(fsaverage['pial_left'], stat_map=stat_map, + hemi='left', view='medial', colorbar=True, + bg_map=fsaverage['sulc_left'], bg_on_data=True, + darkness=.5, title='Correlation map') + +############################################################################### +# Display unthresholded stat map without background map, transparency is +# automatically set to .5, but can also be controlled with the alpha parameter +plotting.plot_surf_stat_map(fsaverage['pial_left'], stat_map=stat_map, + hemi='left', view='medial', colorbar=True, + title='Plotting without background') + +############################################################################### +# Many different options are available for plotting, for example thresholding, +# or using custom colormaps +plotting.plot_surf_stat_map(fsaverage['pial_left'], stat_map=stat_map, + hemi='left', view='medial', colorbar=True, + bg_map=fsaverage['sulc_left'], bg_on_data=True, + cmap='Spectral', threshold=.5, + title='Threshold and colormap') + +############################################################################### +# The plots can be saved to file, in which case the display is closed after +# creating the figure +plotting.plot_surf_stat_map(fsaverage['infl_left'], stat_map=stat_map, + hemi='left', bg_map=fsaverage['sulc_left'], + bg_on_data=True, threshold=.6, colorbar=True, + output_file='plot_surf_stat_map.png') + +plotting.show() diff --git a/examples/01_plotting/plot_surface_projection_strategies.py b/examples/01_plotting/plot_surface_projection_strategies.py new file mode 100644 index 0000000000..af740a0a90 --- /dev/null +++ b/examples/01_plotting/plot_surface_projection_strategies.py @@ -0,0 +1,69 @@ +""" +Technical point: Illustration of the volume to surface sampling schemes +======================================================================= + +In nilearn, :func:`nilearn.surface.vol_to_surf` allows us to measure values of +a 3d volume at the nodes of a cortical mesh, transforming it into surface data. +This data can then be plotted with :func:`nilearn.plotting.plot_surf_stat_map` +for example. + +This script shows, on a toy example, where samples are drawn around each mesh +vertex. Image values are interpolated at each sample location, then these +samples are averaged to produce a value for the vertex. + +Two strategies are available to choose sample locations: they can be spread +along the normal to the mesh, or inside a ball around the vertex. Don't worry +too much about choosing one or the other: they take a similar amount of time +and give almost identical results for most images. + +""" + +import numpy as np + +import matplotlib +from matplotlib import pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + +from nilearn.surface import surface + + +###################################################################### +# Build a mesh (of a cylinder) +###################################################################### + +N_Z = 5 +N_T = 10 +u, v = np.mgrid[:N_T, :N_Z] +triangulation = matplotlib.tri.Triangulation(u.flatten(), v.flatten()) +angles = u.flatten() * 2 * np.pi / N_T +x, y = np.cos(angles), np.sin(angles) +z = v.flatten() * 2 / N_Z + +mesh = [np.asarray([x, y, z]).T, triangulation.triangles] + + +######################################################################### +# Get the locations from which vol_to_surf would draw its samples +######################################################################### + +line_sample_points = surface._line_sample_locations( + mesh, np.eye(4), segment_half_width=.2, n_points=6) + +ball_sample_points = surface._ball_sample_locations( + mesh, np.eye(4), ball_radius=.15, n_points=20) + + +###################################################################### +# Plot the mesh and the sample locations +###################################################################### + +for sample_points in [line_sample_points, ball_sample_points]: + fig = plt.figure() + ax = plt.subplot(projection='3d') + ax.set_aspect(1) + + ax.plot_trisurf(x, y, z, triangles=triangulation.triangles) + + ax.scatter(*sample_points.T, color='r') + +plt.show() diff --git a/examples/manipulating_visualizing/plot_visualization.py b/examples/01_plotting/plot_visualization.py similarity index 88% rename from examples/manipulating_visualizing/plot_visualization.py rename to examples/01_plotting/plot_visualization.py index 2076202b0c..a52744f313 100644 --- a/examples/manipulating_visualizing/plot_visualization.py +++ b/examples/01_plotting/plot_visualization.py @@ -7,9 +7,11 @@ ############################################################################## # Fetch data +# ---------- from nilearn import datasets -haxby_dataset = datasets.fetch_haxby(n_subjects=1) +# By default 2nd subject will be fetched +haxby_dataset = datasets.fetch_haxby() # print basic information on the dataset print('First anatomical nifti image (3D) located is at: %s' % @@ -19,6 +21,7 @@ ############################################################################## # Visualization +# ------------- from nilearn.image.image import mean_img # Compute the mean EPI: we do the mean along the axis 3, which is time @@ -30,7 +33,7 @@ ############################################################################## # Extracting a brain mask - +# ----------------------- # Simple computation of a mask from the fMRI data from nilearn.masking import compute_epi_mask mask_img = compute_epi_mask(func_filename) @@ -41,7 +44,7 @@ ############################################################################## # Applying the mask to extract the corresponding time series - +# ---------------------------------------------------------- from nilearn.masking import apply_mask masked_data = apply_mask(func_filename, mask_img) @@ -51,7 +54,7 @@ # And now plot a few of these import matplotlib.pyplot as plt plt.figure(figsize=(7, 5)) -plt.plot(masked_data[:2, :150].T) +plt.plot(masked_data[:150, :2]) plt.xlabel('Time [TRs]', fontsize=16) plt.ylabel('Intensity', fontsize=16) plt.xlim(0, 150) diff --git a/examples/01_plotting/plot_visualize_megatrawls_netmats.py b/examples/01_plotting/plot_visualize_megatrawls_netmats.py new file mode 100644 index 0000000000..0106d22139 --- /dev/null +++ b/examples/01_plotting/plot_visualize_megatrawls_netmats.py @@ -0,0 +1,34 @@ +""" +Visualizing Megatrawls Network Matrices from Human Connectome Project +===================================================================== + +This example shows how to fetch network matrices data from HCP beta-release +of the Functional Connectivity Megatrawl project. + +See :func:`nilearn.datasets.fetch_megatrawls_netmats` documentation for more details. +""" +################################################################################ +# Fetching the Megatrawls Network matrices +# ---------------------------------------- +# Fetching the partial correlation matrices of dimensionality d=300 with +# timeseries method 'eigen regression' +from nilearn import datasets + +netmats = datasets.fetch_megatrawls_netmats(dimensionality=300, + timeseries='eigen_regression', + matrices='partial_correlation') +# Partial correlation matrices array of size (300, 300) are stored in the name +# of 'correlation_matrices' +partial_correlation = netmats.correlation_matrices + +################################################################################ +# Visualization +# ------------- +# Import nilearn plotting modules to use its utilities for plotting +# correlation matrices +from nilearn import plotting + +title = "Partial correlation matrices\n for d=300" +display = plotting.plot_matrix(partial_correlation, colorbar=True, + title=title) +plotting.show() diff --git a/examples/decoding/README.txt b/examples/02_decoding/README.txt similarity index 100% rename from examples/decoding/README.txt rename to examples/02_decoding/README.txt diff --git a/examples/decoding/plot_haxby_anova_svm.py b/examples/02_decoding/plot_haxby_anova_svm.py similarity index 53% rename from examples/decoding/plot_haxby_anova_svm.py rename to examples/02_decoding/plot_haxby_anova_svm.py index ad0640539b..737d8719e3 100644 --- a/examples/decoding/plot_haxby_anova_svm.py +++ b/examples/02_decoding/plot_haxby_anova_svm.py @@ -1,6 +1,6 @@ """ -The Haxby dataset: face vs house in object recognition -======================================================= +Decoding with ANOVA + SVM: face vs house in the Haxby dataset +=============================================================== This example does a simple but efficient decoding on the Haxby dataset: using a feature selection, followed by an SVM. @@ -9,9 +9,11 @@ ############################################################################# # Retrieve the files of the Haxby dataset +# ---------------------------------------- from nilearn import datasets -haxby_dataset = datasets.fetch_haxby_simple() +# By default 2nd subject will be fetched +haxby_dataset = datasets.fetch_haxby() # print basic information on the dataset print('Mask nifti image (3D) is located at: %s' % haxby_dataset.mask) @@ -20,36 +22,44 @@ ############################################################################# # Load the behavioral data -import numpy as np -y, session = np.loadtxt(haxby_dataset.session_target[0]).astype("int").T -conditions = np.recfromtxt(haxby_dataset.conditions_target[0])['f0'] +# ------------------------- +import pandas as pd -# Restrict to faces and houses -condition_mask = np.logical_or(conditions == b'face', conditions == b'house') -y = y[condition_mask] +# Load target information as string and give a numerical identifier to each +behavioral = pd.read_csv(haxby_dataset.session_target[0], sep=" ") +conditions = behavioral['labels'] + +# Restrict the analysis to faces and places +condition_mask = behavioral['labels'].isin(['face', 'house']) conditions = conditions[condition_mask] -# We have 2 conditions -n_conditions = np.size(np.unique(y)) +# Confirm that we now have 2 conditions +print(conditions.unique()) + +# Record these as an array of sessions, with fields +# for condition (face or house) and run +session = behavioral[condition_mask].to_records(index=False) +print(session.dtype.names) ############################################################################# -# Prepare the fMRI data +# Prepare the fMRI data: smooth and apply the mask +# ------------------------------------------------- from nilearn.input_data import NiftiMasker mask_filename = haxby_dataset.mask + # For decoding, standardizing is often very important -nifti_masker = NiftiMasker(mask_img=mask_filename, sessions=session, - smoothing_fwhm=4, standardize=True, - memory="nilearn_cache", memory_level=1) +# note that we are also smoothing the data +masker = NiftiMasker(mask_img=mask_filename, smoothing_fwhm=4, + standardize=True, memory="nilearn_cache", memory_level=1) func_filename = haxby_dataset.func[0] -X = nifti_masker.fit_transform(func_filename) +X = masker.fit_transform(func_filename) # Apply our condition_mask X = X[condition_mask] -session = session[condition_mask] ############################################################################# # Build the decoder - +# ------------------ # Define the prediction function to be used. # Here we use a Support Vector Classification, with a linear kernel from sklearn.svm import SVC @@ -57,11 +67,13 @@ # Define the dimension reduction to be used. # Here we use a classical univariate feature selection based on F-test, -# namely Anova. We set the number of features to be selected to 500 -from sklearn.feature_selection import SelectKBest, f_classif -feature_selection = SelectKBest(f_classif, k=500) +# namely Anova. When doing full-brain analysis, it is better to use +# SelectPercentile, keeping 5% of voxels +# (because it is independent of the resolution of the data). +from sklearn.feature_selection import SelectPercentile, f_classif +feature_selection = SelectPercentile(f_classif, percentile=5) -# We have our classifier (SVC), our feature selection (SelectKBest), and now, +# We have our classifier (SVC), our feature selection (SelectPercentile),and now, # we can plug them together in a *pipeline* that performs the two operations # successively: from sklearn.pipeline import Pipeline @@ -69,56 +81,53 @@ ############################################################################# # Fit the decoder and predict - -anova_svc.fit(X, y) +# ---------------------------- +anova_svc.fit(X, conditions) y_pred = anova_svc.predict(X) ############################################################################# -# Visualize the results +# Obtain prediction scores via cross validation +# ----------------------------------------------- +from sklearn.model_selection import LeaveOneGroupOut, cross_val_score + +# Define the cross-validation scheme used for validation. +# Here we use a LeaveOneGroupOut cross-validation on the session group +# which corresponds to a leave-one-session-out +cv = LeaveOneGroupOut() +# Compute the prediction accuracy for the different folds (i.e. session) +cv_scores = cross_val_score(anova_svc, X, conditions, cv=cv, groups=session) + +# Return the corresponding mean prediction accuracy +classification_accuracy = cv_scores.mean() + +# Print the results +print("Classification accuracy: %.4f / Chance level: %f" % + (classification_accuracy, 1. / len(conditions.unique()))) +# Classification accuracy: 0.70370 / Chance level: 0.5000 + + +############################################################################# +# Visualize the results +# ---------------------- # Look at the SVC's discriminating weights coef = svc.coef_ # reverse feature selection coef = feature_selection.inverse_transform(coef) # reverse masking -weight_img = nifti_masker.inverse_transform(coef) +weight_img = masker.inverse_transform(coef) -# Create the figure +# Use the mean image as a background to avoid relying on anatomical data from nilearn import image -from nilearn.plotting import plot_stat_map, show - -# Plot the mean image because we have no anatomic data mean_img = image.mean_img(func_filename) +# Create the figure +from nilearn.plotting import plot_stat_map, show plot_stat_map(weight_img, mean_img, title='SVM weights') # Saving the results as a Nifti file may also be important weight_img.to_filename('haxby_face_vs_house.nii') -############################################################################# -# Obtain prediction scores via cross validation - -from sklearn.cross_validation import LeaveOneLabelOut - -# Define the cross-validation scheme used for validation. -# Here we use a LeaveOneLabelOut cross-validation on the session label -# divided by 2, which corresponds to a leave-two-session-out -cv = LeaveOneLabelOut(session // 2) - -# Compute the prediction accuracy for the different folds (i.e. session) -cv_scores = [] -for train, test in cv: - anova_svc.fit(X[train], y[train]) - y_pred = anova_svc.predict(X[test]) - cv_scores.append(np.sum(y_pred == y[test]) / float(np.size(y[test]))) - -# Return the corresponding mean prediction accuracy -classification_accuracy = np.mean(cv_scores) - -# Print the results -print("Classification accuracy: %.4f / Chance level: %f" % - (classification_accuracy, 1. / n_conditions)) -# Classification accuracy: 0.9861 / Chance level: 0.5000 show() diff --git a/examples/decoding/plot_haxby_different_estimators.py b/examples/02_decoding/plot_haxby_different_estimators.py similarity index 74% rename from examples/decoding/plot_haxby_different_estimators.py rename to examples/02_decoding/plot_haxby_different_estimators.py index eb8fe2cd1a..319e7fb671 100644 --- a/examples/decoding/plot_haxby_different_estimators.py +++ b/examples/02_decoding/plot_haxby_different_estimators.py @@ -8,10 +8,12 @@ ############################################################################# # We start by loading the data and applying simple transformations to it +# ----------------------------------------------------------------------- # Fetch data using nilearn dataset fetcher from nilearn import datasets -haxby_dataset = datasets.fetch_haxby(n_subjects=1) +# by default 2nd subject data will be fetched +haxby_dataset = datasets.fetch_haxby() # print basic information on the dataset print('First subject anatomical nifti image (3D) located is at: %s' % @@ -21,16 +23,17 @@ # load labels import numpy as np -labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ") +import pandas as pd +labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ") stimuli = labels['labels'] # identify resting state labels in order to be able to remove them -resting_state = stimuli == b'rest' +task_mask = (stimuli != 'rest') # find names of remaining active labels -categories = np.unique(stimuli[np.logical_not(resting_state)]) +categories = stimuli[task_mask].unique() # extract tags indicating to which acquisition run a tag belongs -session_labels = labels["chunks"][np.logical_not(resting_state)] +session_labels = labels['chunks'][task_mask] # Load the fMRI data from nilearn.input_data import NiftiMasker @@ -40,25 +43,27 @@ masker = NiftiMasker(mask_img=mask_filename, standardize=True) func_filename = haxby_dataset.func[0] masked_timecourses = masker.fit_transform( - func_filename)[np.logical_not(resting_state)] + func_filename)[task_mask] ############################################################################# # Then we define the various classifiers that we use - +# --------------------------------------------------- # A support vector classifier from sklearn.svm import SVC svm = SVC(C=1., kernel="linear") # The logistic regression -from sklearn.linear_model import LogisticRegression, RidgeClassifier, \ - RidgeClassifierCV +from sklearn.linear_model import (LogisticRegression, + RidgeClassifier, + RidgeClassifierCV, + ) logistic = LogisticRegression(C=1., penalty="l1") logistic_50 = LogisticRegression(C=50., penalty="l1") logistic_l2 = LogisticRegression(C=1., penalty="l2") # Cross-validated versions of these classifiers -from sklearn.grid_search import GridSearchCV +from sklearn.model_selection import GridSearchCV # GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that # can significantly speed up the fitting process on computers with # multiple cores @@ -71,7 +76,8 @@ scoring='f1') logistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty="l2"), param_grid={ - 'C': [.1, .5, 1., 5., 10., 50., 100.]}, + 'C': [.1, .5, 1., 5., 10., 50., 100.] + }, scoring='f1') # The ridge classifier has a specific 'CV' object that can set it's @@ -88,16 +94,17 @@ 'log l2': logistic_l2, 'log l2 cv': logistic_l2_cv, 'ridge': ridge, - 'ridge cv': ridge_cv} - + 'ridge cv': ridge_cv + } ############################################################################# -# Here we compute prediction scores and run time for all these -# classifiers +# Here we compute prediction scores +# ---------------------------------- +# Run time for all these classifiers # Make a data splitting object for cross validation -from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score -cv = LeaveOneLabelOut(session_labels) +from sklearn.model_selection import LeaveOneGroupOut, cross_val_score +cv = LeaveOneGroupOut() import time @@ -108,21 +115,27 @@ print(70 * '_') for category in categories: - task_mask = np.logical_not(resting_state) - classification_target = (stimuli[task_mask] == category) + classification_target = stimuli[task_mask].isin([category]) t0 = time.time() classifiers_scores[classifier_name][category] = cross_val_score( classifier, masked_timecourses, classification_target, - cv=cv, scoring="f1") - - print("%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs" % ( - classifier_name, category, - classifiers_scores[classifier_name][category].mean(), - classifiers_scores[classifier_name][category].std(), - time.time() - t0)) - + cv=cv, + groups=session_labels, + scoring="f1", + ) + + print( + "%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs" % + ( + classifier_name, + category, + classifiers_scores[classifier_name][category].mean(), + classifiers_scores[classifier_name][category].std(), + time.time() - t0, + ), + ) ############################################################################### # Then we make a rudimentary diagram @@ -145,10 +158,10 @@ plt.xlabel('Visual stimuli category') plt.ylim(ymin=0) plt.legend(loc='lower center', ncol=3) -plt.title('Category-specific classification accuracy for different classifiers') +plt.title( + 'Category-specific classification accuracy for different classifiers') plt.tight_layout() - ############################################################################### # Finally, w plot the face vs house map for the different classifiers @@ -157,12 +170,12 @@ mean_epi_img = image.mean_img(func_filename) # Restrict the decoding to face vs house -condition_mask = np.logical_or(stimuli == b'face', stimuli == b'house') +condition_mask = stimuli.isin(['face', 'house']) masked_timecourses = masked_timecourses[ - condition_mask[np.logical_not(resting_state)]] -stimuli = stimuli[condition_mask] + condition_mask[task_mask]] +stimuli = (stimuli[condition_mask] == 'face') # Transform the stimuli to binary values -stimuli = (stimuli == b'face').astype(np.int) +stimuli.astype(np.int) from nilearn.plotting import plot_stat_map, show @@ -179,7 +192,7 @@ weight_map = weight_img.get_data() threshold = np.max(np.abs(weight_map)) * 1e-3 plot_stat_map(weight_img, bg_img=mean_epi_img, - display_mode='z', cut_coords=[-17], + display_mode='z', cut_coords=[-15], threshold=threshold, title='%s: face vs house' % classifier_name) diff --git a/examples/decoding/plot_haxby_full_analysis.py b/examples/02_decoding/plot_haxby_full_analysis.py similarity index 81% rename from examples/decoding/plot_haxby_full_analysis.py rename to examples/02_decoding/plot_haxby_full_analysis.py index d72a1084e0..e00e7f5004 100644 --- a/examples/decoding/plot_haxby_full_analysis.py +++ b/examples/02_decoding/plot_haxby_full_analysis.py @@ -15,10 +15,11 @@ ########################################################################## # First we load and prepare the data - +# ----------------------------------- # Fetch data using nilearn dataset fetcher from nilearn import datasets -haxby_dataset = datasets.fetch_haxby(n_subjects=1) +# by default we fetch 2nd subject data for analysis +haxby_dataset = datasets.fetch_haxby() func_filename = haxby_dataset.func[0] # Print basic information on the dataset @@ -31,23 +32,22 @@ from nilearn.input_data import NiftiMasker # load labels -import numpy as np -labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ") +import pandas as pd +labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ") stimuli = labels['labels'] - # identify resting state labels in order to be able to remove them -resting_state = stimuli == b"rest" +task_mask = (stimuli != 'rest') # find names of remaining active labels -categories = np.unique(stimuli[np.logical_not(resting_state)]) +categories = stimuli[task_mask].unique() # extract tags indicating to which acquisition run a tag belongs -session_labels = labels["chunks"][np.logical_not(resting_state)] +session_labels = labels["chunks"][task_mask] ########################################################################## # Then we use scikit-learn for decoding on the different masks - +# ------------------------------------------------------------- # The classifier: a support vector classifier from sklearn.svm import SVC classifier = SVC(C=1., kernel="linear") @@ -57,8 +57,8 @@ dummy_classifier = DummyClassifier() # Make a data splitting object for cross validation -from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score -cv = LeaveOneLabelOut(session_labels) +from sklearn.model_selection import LeaveOneGroupOut, cross_val_score +cv = LeaveOneGroupOut() mask_names = ['mask_vt', 'mask_face', 'mask_house'] @@ -71,26 +71,31 @@ mask_filename = haxby_dataset[mask_name][0] masker = NiftiMasker(mask_img=mask_filename, standardize=True) masked_timecourses = masker.fit_transform( - func_filename)[np.logical_not(resting_state)] + func_filename)[task_mask] mask_scores[mask_name] = {} mask_chance_scores[mask_name] = {} for category in categories: print("Processing %s %s" % (mask_name, category)) - task_mask = np.logical_not(resting_state) classification_target = (stimuli[task_mask] == category) mask_scores[mask_name][category] = cross_val_score( classifier, masked_timecourses, classification_target, - cv=cv, scoring="f1") + cv=cv, + groups=session_labels, + scoring="roc_auc", + ) mask_chance_scores[mask_name][category] = cross_val_score( dummy_classifier, masked_timecourses, classification_target, - cv=cv, scoring="f1") + cv=cv, + groups=session_labels, + scoring="roc_auc", + ) print("Scores: %1.2f +- %1.2f" % ( mask_scores[mask_name][category].mean(), @@ -99,6 +104,8 @@ ########################################################################## # We make a simple bar plot to summarize the results +# --------------------------------------------------- +import numpy as np import matplotlib.pyplot as plt plt.figure() @@ -118,9 +125,10 @@ tick_position = tick_position + .2 -plt.ylabel('Classification accurancy (f1 score)') +plt.ylabel('Classification accurancy (AUC score)') plt.xlabel('Visual stimuli category') -plt.legend(loc='best') +plt.ylim(0.3, 1) +plt.legend(loc='lower right') plt.title('Category-specific classification accuracy for different masks') plt.tight_layout() diff --git a/examples/decoding/plot_haxby_grid_search.py b/examples/02_decoding/plot_haxby_grid_search.py similarity index 89% rename from examples/decoding/plot_haxby_grid_search.py rename to examples/02_decoding/plot_haxby_grid_search.py index 10c1ee0d3d..8a9f5a09c4 100644 --- a/examples/decoding/plot_haxby_grid_search.py +++ b/examples/02_decoding/plot_haxby_grid_search.py @@ -35,22 +35,25 @@ ########################################################################### # Load the Haxby dataset +# ----------------------- from nilearn import datasets import numpy as np -haxby_dataset = datasets.fetch_haxby_simple() +import pandas as pd +# by default 2nd subject data will be fetched on which we run our analysis +haxby_dataset = datasets.fetch_haxby() # print basic information on the dataset print('Mask nifti image (3D) is located at: %s' % haxby_dataset.mask) print('Functional nifti image (4D) are located at: %s' % haxby_dataset.func[0]) # Load the behavioral data -y, session = np.loadtxt(haxby_dataset.session_target[0]).astype('int').T -conditions = np.recfromtxt(haxby_dataset.conditions_target[0])['f0'] +labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ") +y = labels['labels'] +session = labels['chunks'] # Keep only data corresponding to shoes or bottles -condition_mask = np.logical_or(conditions == b'shoe', conditions == b'bottle') +condition_mask = y.isin(['shoe', 'bottle']) y = y[condition_mask] -conditions = conditions[condition_mask] ########################################################################### # Prepare the data with the NiftiMasker @@ -69,7 +72,7 @@ ########################################################################### # Build the decoder that we will use - +# ----------------------------------- # Define the prediction function to be used. # Here we use a Support Vector Classification, with a linear kernel from sklearn.svm import SVC @@ -90,12 +93,11 @@ ########################################################################### # Compute prediction scores using cross-validation - +# ------------------------------------------------- anova_svc.fit(X, y) y_pred = anova_svc.predict(X) -from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score -cv = LeaveOneLabelOut(session[session < 10]) +from sklearn.model_selection import cross_val_score k_range = [10, 15, 30, 50, 150, 300, 500, 1000, 1500, 3000, 5000] cv_scores = [] @@ -114,7 +116,8 @@ ########################################################################### # Nested cross-validation -from sklearn.grid_search import GridSearchCV +# ------------------------- +from sklearn.model_selection import GridSearchCV # We are going to tune the parameter 'k' of the step called 'anova' in # the pipeline. Thus we need to address it as 'anova__k'. @@ -127,7 +130,7 @@ ########################################################################### # Plot the prediction scores using matplotlib - +# --------------------------------------------- from matplotlib import pyplot as plt plt.figure(figsize=(6, 4)) plt.plot(cv_scores, label='Cross validation scores') diff --git a/examples/decoding/plot_haxby_multiclass.py b/examples/02_decoding/plot_haxby_multiclass.py similarity index 77% rename from examples/decoding/plot_haxby_multiclass.py rename to examples/02_decoding/plot_haxby_multiclass.py index 2a03080898..17558d8338 100644 --- a/examples/decoding/plot_haxby_multiclass.py +++ b/examples/02_decoding/plot_haxby_multiclass.py @@ -9,9 +9,12 @@ ############################################################################## # Load the Haxby data dataset +# ---------------------------- from nilearn import datasets import numpy as np -haxby_dataset = datasets.fetch_haxby_simple() +import pandas as pd +# By default 2nd subject from haxby datasets will be fetched. +haxby_dataset = datasets.fetch_haxby() # Print basic information on the dataset print('Mask nifti images are located at: %s' % haxby_dataset.mask) @@ -21,21 +24,22 @@ mask_filename = haxby_dataset.mask # Load the behavioral data that we will predict -y, session = np.loadtxt(haxby_dataset.session_target[0]).astype('int').T -conditions = np.recfromtxt(haxby_dataset.conditions_target[0])['f0'] +labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ") +y = labels['labels'] +session = labels['chunks'] # Remove the rest condition, it is not very interesting -non_rest = conditions != b'rest' -conditions = conditions[non_rest] +non_rest = (y != 'rest') y = y[non_rest] # Get the labels of the numerical conditions represented by the vector y -unique_conditions, order = np.unique(conditions, return_index=True) +unique_conditions, order = np.unique(y, return_index=True) # Sort the conditions by the order of appearance unique_conditions = unique_conditions[np.argsort(order)] ############################################################################## # Prepare the fMRI data +# ---------------------- from nilearn.input_data import NiftiMasker # For decoding, standardizing is often very important nifti_masker = NiftiMasker(mask_img=mask_filename, standardize=True, @@ -49,7 +53,7 @@ ############################################################################## # Build the decoders, using scikit-learn -# +# ---------------------------------------- # Here we use a Support Vector Classification, with a linear kernel, # and a simple feature selection step @@ -70,7 +74,8 @@ ############################################################################## # Now we compute cross-validation scores -from sklearn.cross_validation import cross_val_score +# ---------------------------------------- +from sklearn.model_selection import cross_val_score cv_scores_ovo = cross_val_score(svc_ovo, X, y, cv=5, verbose=1) @@ -81,7 +86,7 @@ ############################################################################## # Plot barplots of the prediction scores - +# ---------------------------------------- from matplotlib import pyplot as plt plt.figure(figsize=(4, 3)) plt.boxplot([cv_scores_ova, cv_scores_ovo]) @@ -90,25 +95,24 @@ ############################################################################## # Plot a confusion matrix -# +# ------------------------ # We fit on the the first 10 sessions and plot a confusion matrix on the # last 2 sessions from sklearn.metrics import confusion_matrix +from nilearn.plotting import plot_matrix svc_ovo.fit(X[session < 10], y[session < 10]) y_pred_ovo = svc_ovo.predict(X[session >= 10]) -plt.matshow(confusion_matrix(y_pred_ovo, y[session >= 10])) -plt.title('Confusion matrix: One vs One') -plt.xticks(np.arange(len(unique_conditions)), unique_conditions) -plt.yticks(np.arange(len(unique_conditions)), unique_conditions) +plot_matrix(confusion_matrix(y_pred_ovo, y[session >= 10]), + labels=unique_conditions, + title='Confusion matrix: One vs One', cmap='hot_r') svc_ova.fit(X[session < 10], y[session < 10]) y_pred_ova = svc_ova.predict(X[session >= 10]) -plt.matshow(confusion_matrix(y_pred_ova, y[session >= 10])) -plt.title('Confusion matrix: One vs All') -plt.xticks(np.arange(len(unique_conditions)), unique_conditions) -plt.yticks(np.arange(len(unique_conditions)), unique_conditions) +plot_matrix(confusion_matrix(y_pred_ova, y[session >= 10]), + labels=unique_conditions, + title='Confusion matrix: One vs All', cmap='hot_r') plt.show() diff --git a/examples/decoding/plot_haxby_searchlight.py b/examples/02_decoding/plot_haxby_searchlight.py similarity index 74% rename from examples/decoding/plot_haxby_searchlight.py rename to examples/02_decoding/plot_haxby_searchlight.py index bd4723cb49..29bdd4e23e 100644 --- a/examples/decoding/plot_haxby_searchlight.py +++ b/examples/02_decoding/plot_haxby_searchlight.py @@ -11,44 +11,46 @@ ######################################################################### # Load Haxby dataset -import numpy as np -import nibabel +# ------------------- +import pandas as pd from nilearn import datasets -from nilearn.image import new_img_like +from nilearn.image import new_img_like, load_img -haxby_dataset = datasets.fetch_haxby_simple() +# We fetch 2nd subject from haxby datasets (which is default) +haxby_dataset = datasets.fetch_haxby() # print basic information on the dataset print('Anatomical nifti image (3D) is located at: %s' % haxby_dataset.mask) print('Functional nifti image (4D) is located at: %s' % haxby_dataset.func[0]) fmri_filename = haxby_dataset.func[0] -fmri_img = nibabel.load(fmri_filename) -y, session = np.loadtxt(haxby_dataset.session_target[0]).astype('int').T -conditions = np.recfromtxt(haxby_dataset.conditions_target[0])['f0'] +labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ") +y = labels['labels'] +session = labels['chunks'] ######################################################################### # Restrict to faces and houses +# ------------------------------ from nilearn.image import index_img -condition_mask = np.logical_or(conditions == b'face', conditions == b'house') +condition_mask = y.isin(['face', 'house']) -fmri_img = index_img(fmri_img, condition_mask) +fmri_img = index_img(fmri_filename, condition_mask) y, session = y[condition_mask], session[condition_mask] -conditions = conditions[condition_mask] ######################################################################### # Prepare masks -# +# -------------- # - mask_img is the original mask # - process_mask_img is a subset of mask_img, it contains the voxels that # should be processed (we only keep the slice z = 26 and the back of the # brain to speed up computation) +import numpy as np -mask_img = nibabel.load(haxby_dataset.mask) +mask_img = load_img(haxby_dataset.mask) # .astype() makes a copy. process_mask = mask_img.get_data().astype(np.int) -picked_slice = 27 +picked_slice = 29 process_mask[..., (picked_slice + 1):] = 0 process_mask[..., :picked_slice] = 0 process_mask[:, 30:] = 0 @@ -56,6 +58,7 @@ ######################################################################### # Searchlight computation +# ------------------------- # Make processing parallel # /!\ As each thread will print its progress, n_jobs > 1 could mess up the @@ -66,8 +69,8 @@ # Here we use a KFold cross-validation on the session, which corresponds to # splitting the samples in 4 folds and make 4 runs using each fold as a test # set once and the others as learning sets -from sklearn.cross_validation import KFold -cv = KFold(y.size, n_folds=4) +from sklearn.model_selection import KFold +cv = KFold(n_splits=4) import nilearn.decoding # The radius is the one of the Searchlight sphere that will scan the volume @@ -80,6 +83,7 @@ ######################################################################### # F-scores computation +# ---------------------- from nilearn.input_data import NiftiMasker # For decoding, standardizing is often very important @@ -96,20 +100,26 @@ ######################################################################### # Visualization - +# -------------- # Use the fmri mean image as a surrogate of anatomical data from nilearn import image mean_fmri = image.mean_img(fmri_img) -from nilearn.plotting import plot_stat_map, show -plot_stat_map(new_img_like(mean_fmri, searchlight.scores_), mean_fmri, - title="Searchlight", display_mode="z", cut_coords=[-16], - colorbar=False) +from nilearn.plotting import plot_stat_map, plot_img, show +searchlight_img = new_img_like(mean_fmri, searchlight.scores_) + +# Because scores are not a zero-center test statistics, we cannot use +# plot_stat_map +plot_img(searchlight_img, bg_img=mean_fmri, + title="Searchlight", display_mode="z", cut_coords=[-9], + vmin=.42, cmap='hot', threshold=.2, black_bg=True) # F_score results p_ma = np.ma.array(p_unmasked, mask=np.logical_not(process_mask)) -plot_stat_map(new_img_like(mean_fmri, p_ma), mean_fmri, +f_score_img = new_img_like(mean_fmri, p_ma) +plot_stat_map(f_score_img, mean_fmri, title="F-scores", display_mode="z", - cut_coords=[-16], colorbar=False) + cut_coords=[-9], + colorbar=False) show() diff --git a/examples/decoding/plot_haxby_space_net.py b/examples/02_decoding/plot_haxby_space_net.py similarity index 73% rename from examples/decoding/plot_haxby_space_net.py rename to examples/02_decoding/plot_haxby_space_net.py index 40f3c4ea4d..8b8905b9a5 100644 --- a/examples/decoding/plot_haxby_space_net.py +++ b/examples/02_decoding/plot_haxby_space_net.py @@ -11,21 +11,21 @@ ############################################################################## # Load the Haxby dataset +# ------------------------ from nilearn.datasets import fetch_haxby data_files = fetch_haxby() -# Load Target labels -import numpy as np -labels = np.recfromcsv(data_files.session_target[0], delimiter=" ") - +# Load behavioral data +import pandas as pd +behavioral = pd.read_csv(data_files.session_target[0], sep=" ") # Restrict to face and house conditions -target = labels['labels'] -condition_mask = np.logical_or(target == "face", target == "house") +conditions = behavioral['labels'] +condition_mask = conditions.isin(['face', 'house']) # Split data into train and test samples, using the chunks -condition_mask_train = np.logical_and(condition_mask, labels['chunks'] <= 6) -condition_mask_test = np.logical_and(condition_mask, labels['chunks'] > 6) +condition_mask_train = (condition_mask) & (behavioral['chunks'] <= 6) +condition_mask_test = (condition_mask) & (behavioral['chunks'] > 6) # Apply this sample mask to X (fMRI data) and y (behavioral labels) # Because the data is in one single large 4D image, we need to use @@ -34,8 +34,8 @@ func_filenames = data_files.func[0] X_train = index_img(func_filenames, condition_mask_train) X_test = index_img(func_filenames, condition_mask_test) -y_train = target[condition_mask_train] -y_test = target[condition_mask_test] +y_train = conditions[condition_mask_train] +y_test = conditions[condition_mask_test] # Compute the mean epi to be used for the background of the plotting from nilearn.image import mean_img @@ -43,6 +43,7 @@ ############################################################################## # Fit SpaceNet with a Graph-Net penalty +# -------------------------------------- from nilearn.decoding import SpaceNetClassifier # Fit model on train data and predict on test data @@ -52,12 +53,14 @@ accuracy = (y_pred == y_test).mean() * 100. print("Graph-net classification accuracy : %g%%" % accuracy) -# Visualization +############################################################################# +# Visualization of Graph-net weights +# ------------------------------------ from nilearn.plotting import plot_stat_map, show coef_img = decoder.coef_img_ plot_stat_map(coef_img, background_img, title="graph-net: accuracy %g%%" % accuracy, - cut_coords=(-34, -16), display_mode="yz") + cut_coords=(-52, -5), display_mode="yz") # Save the coefficients to a nifti file coef_img.to_filename('haxby_graph-net_weights.nii') @@ -65,17 +68,20 @@ ############################################################################## # Now Fit SpaceNet with a TV-l1 penalty +# -------------------------------------- decoder = SpaceNetClassifier(memory="nilearn_cache", penalty='tv-l1') decoder.fit(X_train, y_train) y_pred = decoder.predict(X_test) accuracy = (y_pred == y_test).mean() * 100. print("TV-l1 classification accuracy : %g%%" % accuracy) -# Visualization +############################################################################# +# Visualization of TV-L1 weights +# ------------------------------- coef_img = decoder.coef_img_ plot_stat_map(coef_img, background_img, title="tv-l1: accuracy %g%%" % accuracy, - cut_coords=(-34, -16), display_mode="yz") + cut_coords=(-52, -5), display_mode="yz") # Save the coefficients to a nifti file coef_img.to_filename('haxby_tv-l1_weights.nii') @@ -86,4 +92,3 @@ # We can see that the TV-l1 penalty is 3 times slower to converge and # gives the same prediction accuracy. However, it yields much # cleaner coefficient maps - diff --git a/examples/decoding/plot_haxby_stimuli.py b/examples/02_decoding/plot_haxby_stimuli.py similarity index 93% rename from examples/decoding/plot_haxby_stimuli.py rename to examples/02_decoding/plot_haxby_stimuli.py index 4b0863ba38..6967b354f2 100644 --- a/examples/decoding/plot_haxby_stimuli.py +++ b/examples/02_decoding/plot_haxby_stimuli.py @@ -12,7 +12,7 @@ from nilearn import datasets -haxby_dataset = datasets.fetch_haxby(n_subjects=0, fetch_stimuli=True) +haxby_dataset = datasets.fetch_haxby(subjects=[], fetch_stimuli=True) stimulus_information = haxby_dataset.stimuli for stim_type in sorted(stimulus_information.keys()): diff --git a/examples/decoding/plot_mixed_gambles_space_net.py b/examples/02_decoding/plot_mixed_gambles_space_net.py similarity index 92% rename from examples/decoding/plot_mixed_gambles_space_net.py rename to examples/02_decoding/plot_mixed_gambles_space_net.py index 264827b3f7..627e45aa4f 100644 --- a/examples/decoding/plot_mixed_gambles_space_net.py +++ b/examples/02_decoding/plot_mixed_gambles_space_net.py @@ -12,6 +12,7 @@ ########################################################################## # Load the data from the Jimura mixed-gamble experiment +# ------------------------------------------------------ from nilearn.datasets import fetch_mixed_gambles data = fetch_mixed_gambles(n_subjects=16) @@ -22,6 +23,7 @@ ########################################################################## # Fit TV-L1 +# ---------- # Here we're using the regressor object given that the task is to predict a # continuous variable, the gain of the gamble. from nilearn.decoding import SpaceNetRegressor @@ -32,6 +34,7 @@ decoder.fit(zmap_filenames, behavioral_target) # Visualize TV-L1 weights +# ------------------------ from nilearn.plotting import plot_stat_map, show plot_stat_map(decoder.coef_img_, title="tv-l1", display_mode="yz", cut_coords=[20, -2]) @@ -39,12 +42,14 @@ ########################################################################## # Fit Graph-Net +# -------------- decoder = SpaceNetRegressor(mask=mask_filename, penalty="graph-net", eps=1e-1, # prefer large alphas memory="nilearn_cache") decoder.fit(zmap_filenames, behavioral_target) # Visualize Graph-Net weights +# ---------------------------- plot_stat_map(decoder.coef_img_, title="graph-net", display_mode="yz", cut_coords=[20, -2]) diff --git a/examples/02_decoding/plot_miyawaki_encoding.py b/examples/02_decoding/plot_miyawaki_encoding.py new file mode 100644 index 0000000000..8520076823 --- /dev/null +++ b/examples/02_decoding/plot_miyawaki_encoding.py @@ -0,0 +1,259 @@ +""" +============================================================ +Encoding models for visual stimuli from Miyawaki et al. 2008 +============================================================ + +This example partly reproduces the encoding model presented in + `Visual image reconstruction from human brain activity + using a combination of multiscale local image decoders + `_, + Miyawaki, Y., Uchida, H., Yamashita, O., Sato, M. A., + Morito, Y., Tanabe, H. C., ... & Kamitani, Y. (2008). + Neuron, 60(5), 915-929. + +Encoding models try to predict neuronal activity using information from +presented stimuli, like an image or sound. Where decoding goes from +brain data to real-world stimulus, encoding goes the other direction. + +We demonstrate how to build such an **encoding model** in nilearn, predicting +**fMRI data** from **visual stimuli**, using the dataset from +`Miyawaki et al., 2008 +`_. + +Participants were shown images, which consisted of random 10x10 binary +(either black or white) pixels, and the corresponding fMRI activity was +recorded. We will try to predict the activity in each voxel +from the binary pixel-values of the presented images. Then we extract the +receptive fields for a set of voxels to see which pixel location a voxel +is most sensitive to. + +See also :doc:`plot_miyawaki_reconstruction` for a decoding +approach for the same dataset. +""" + +############################################################################## +# Loading the data +# ---------------- +# Now we can load the data set: + +from nilearn.datasets import fetch_miyawaki2008 + +dataset = fetch_miyawaki2008() + +############################################################################## +# We only use the training data of this study, +# where random binary images were shown. + +# training data starts after the first 12 files +fmri_random_runs_filenames = dataset.func[12:] +stimuli_random_runs_filenames = dataset.label[12:] + +############################################################################## +# We can use :func:`nilearn.input_data.MultiNiftiMasker` to load the fMRI +# data, clean and mask it. + +import numpy as np +from nilearn.input_data import MultiNiftiMasker + +masker = MultiNiftiMasker(mask_img=dataset.mask, detrend=True, + standardize=True) +masker.fit() +fmri_data = masker.transform(fmri_random_runs_filenames) + +# shape of the binary (i.e. black and wihte values) image in pixels +stimulus_shape = (10, 10) + +# We load the visual stimuli from csv files +stimuli = [] +for stimulus_run in stimuli_random_runs_filenames: + stimuli.append(np.reshape(np.loadtxt(stimulus_run, + dtype=np.int, delimiter=','), + (-1,) + stimulus_shape, order='F')) + +############################################################################## +# Let's take a look at some of these binary images: + +import pylab as plt +plt.figure(figsize=(8, 4)) +plt.subplot(1, 2, 1) +plt.imshow(stimuli[0][124], interpolation='nearest', cmap='gray') +plt.axis('off') +plt.title('Run {}, Stimulus {}'.format(1, 125)) +plt.subplot(1, 2, 2) +plt.imshow(stimuli[2][101], interpolation='nearest', cmap='gray') +plt.axis('off') +plt.title('Run {}, Stimulus {}'.format(3, 102)) +plt.subplots_adjust(wspace=0.5) + +############################################################################## +# We now stack the fmri and stimulus data and remove an offset in the +# beginning/end. + +fmri_data = np.vstack([fmri_run[2:] for fmri_run in fmri_data]) +stimuli = np.vstack([stimuli_run[:-2] for stimuli_run in stimuli]).astype(float) + +############################################################################## +# fmri_data is a matrix of *samples* x *voxels* + +print(fmri_data.shape) + +############################################################################## +# We flatten the last two dimensions of stimuli +# so it is a matrix of *samples* x *pixels*. + +# Flatten the stimuli +stimuli = np.reshape(stimuli, (-1, stimulus_shape[0] * stimulus_shape[1])) + +print(stimuli.shape) + +############################################################################## +# Building the encoding models +# ---------------------------- +# We can now proceed to build a simple **voxel-wise encoding model** using +# `Ridge regression `_. +# For each voxel we fit an independent regression model, +# using the pixel-values of the visual stimuli to predict the neuronal +# activity in this voxel. + +from sklearn.linear_model import Ridge +from sklearn.model_selection import KFold + +############################################################################## +# Using 10-fold cross-validation, we partition the data into 10 'folds'. +# We hold out each fold of the data for testing, then fit a ridge regression +# to the remaining 9/10 of the data, using stimuli as predictors +# and fmri_data as targets, and create predictions for the held-out 10th. +from sklearn.metrics import r2_score + +estimator = Ridge(alpha=100.) +cv = KFold(n_splits=10) + +scores = [] +for train, test in cv.split(X=stimuli): + # we train the Ridge estimator on the training set + # and predict the fMRI activity for the test set + predictions = Ridge(alpha=100.).fit( + stimuli.reshape(-1, 100)[train], fmri_data[train]).predict( + stimuli.reshape(-1, 100)[test]) + # we compute how much variance our encoding model explains in each voxel + scores.append(r2_score(fmri_data[test], predictions, + multioutput='raw_values')) + +############################################################################## +# Mapping the encoding scores on the brain +# ---------------------------------------- +# To plot the scores onto the brain, we create a Nifti1Image containing +# the scores and then threshold it: + +from nilearn.image import threshold_img +cut_score = np.mean(scores, axis=0) +cut_score[cut_score < 0] = 0 + +# bring the scores into the shape of the background brain +score_map_img = masker.inverse_transform(cut_score) + +thresholded_score_map_img = threshold_img(score_map_img, threshold=1e-6) + +############################################################################## +# Plotting the statistical map on a background brain, we mark four voxels +# which we will inspect more closely later on. +from nilearn.plotting import plot_stat_map +from nilearn.image import coord_transform + +def index_to_xy_coord(x, y, z=10): + '''Transforms data index to coordinates of the background + offset''' + coords = coord_transform(x, y, z, + affine=thresholded_score_map_img.affine) + return np.array(coords)[np.newaxis, :] + np.array([0, 1, 0]) + + +xy_indices_of_special_voxels = [(30, 10), (32, 10), (31, 9), (31, 10)] + +display = plot_stat_map(thresholded_score_map_img, bg_img=dataset.background, + cut_coords=[-8], display_mode='z', aspect=1.25, + title='Explained variance per voxel') + +# creating a marker for each voxel and adding it to the statistical map + +for i, (x, y) in enumerate(xy_indices_of_special_voxels): + display.add_markers(index_to_xy_coord(x, y), marker_color='none', + edgecolor=['b', 'r', 'magenta', 'g'][i], + marker_size=140, marker='s', + facecolor='none', lw=4.5) + + +# re-set figure size after construction so colorbar gets rescaled too +fig = plt.gcf() +fig.set_size_inches(12, 12) + + +############################################################################## +# Estimating receptive fields +# --------------------------- +# Now we take a closer look at the receptive fields of the four marked voxels. +# A voxel's `receptive field `_ +# is the region of a stimulus (like an image) where the presence of an object, +# like a white instead of a black pixel, results in a change in activity +# in the voxel. In our case the receptive field is just the vector of 100 +# regression coefficients (one for each pixel) reshaped into the 10x10 +# form of the original images. Some voxels are receptive to only very few +# pixels, so we use `Lasso regression +# `_ to estimate a sparse +# set of regression coefficients. + +from sklearn.linear_model import LassoLarsCV + +# automatically estimate the sparsity by cross-validation +lasso = LassoLarsCV(max_iter=10) + +# Mark the same pixel in each receptive field +marked_pixel = (4, 2) + +from matplotlib import gridspec +from matplotlib.patches import Rectangle + +fig = plt.figure(figsize=(12, 8)) +fig.suptitle('Receptive fields of the marked voxels', fontsize=25) + +# GridSpec allows us to do subplots with more control of the spacing +gs1 = gridspec.GridSpec(2, 3) + +# we fit the Lasso for each of the three voxels of the upper row +for i, index in enumerate([1780, 1951, 2131]): + ax = plt.subplot(gs1[0, i]) + # we reshape the coefficients into the form of the original images + rf = lasso.fit(stimuli, fmri_data[:, index]).coef_.reshape((10, 10)) + # add a black background + ax.imshow(np.zeros_like(rf), vmin=0., vmax=1., cmap='gray') + ax_im = ax.imshow(np.ma.masked_less(rf, 0.1), interpolation="nearest", + cmap=['Blues', 'Greens', 'Reds'][i], vmin=0., vmax=0.75) + # add the marked pixel + ax.add_patch(Rectangle( + (marked_pixel[1] - .5, marked_pixel[0] - .5), 1, 1, + facecolor='none', edgecolor='r', lw=4)) + plt.axis('off') + plt.colorbar(ax_im, ax=ax) + +# and then for the voxel at the bottom + +gs1.update(left=0., right=1., wspace=0.1) +ax = plt.subplot(gs1[1, 1]) +# we reshape the coefficients into the form of the original images +rf = lasso.fit(stimuli, fmri_data[:, 1935]).coef_.reshape((10, 10)) +ax.imshow(np.zeros_like(rf), vmin=0., vmax=1., cmap='gray') +ax_im = ax.imshow(np.ma.masked_less(rf, 0.1), interpolation="nearest", + cmap='RdPu', vmin=0., vmax=0.75) + +# add the marked pixel +ax.add_patch(Rectangle( + (marked_pixel[1] - .5, marked_pixel[0] - .5), 1, 1, + facecolor='none', edgecolor='r', lw=4)) +plt.axis('off') +plt.colorbar(ax_im, ax=ax) + +############################################################################## +# The receptive fields of the four voxels are not only close to each other, +# the relative location of the pixel each voxel is most sensitive to +# roughly maps to the relative location of the voxels to each other. +# We can see a relationship between some voxel's receptive field and +# its location in the brain. diff --git a/examples/decoding/plot_miyawaki_reconstruction.py b/examples/02_decoding/plot_miyawaki_reconstruction.py similarity index 96% rename from examples/decoding/plot_miyawaki_reconstruction.py rename to examples/02_decoding/plot_miyawaki_reconstruction.py index 6cfae9cec7..221023d5fd 100644 --- a/examples/decoding/plot_miyawaki_reconstruction.py +++ b/examples/02_decoding/plot_miyawaki_reconstruction.py @@ -15,6 +15,10 @@ The code is a bit elaborate as the example uses, as the original article, a multiscale prediction on the images seen by the subject. + +See also +:ref:`sphx_glr_auto_examples_02_decoding_plot_miyawaki_encoding.py` for a +encoding approach for the same dataset. """ # Some basic imports @@ -23,6 +27,7 @@ ############################################################################ # First we load the Miyawaki dataset +# ----------------------------------- from nilearn import datasets sys.stderr.write("Fetching dataset...") t0 = time.time() @@ -43,6 +48,7 @@ ############################################################################ # Then we prepare and mask the data +# ---------------------------------- import numpy as np from nilearn.input_data import MultiNiftiMasker @@ -129,7 +135,7 @@ def flatten(list_of_2d_array): ############################################################################ # We define our prediction function - +# ----------------------------------- sys.stderr.write("Training classifiers... \r") t0 = time.time() @@ -154,6 +160,7 @@ def flatten(list_of_2d_array): ############################################################################ # Here we run the prediction: the decoding itself +# ------------------------------------------------ sys.stderr.write("Calculating scores and outputs...") t0 = time.time() @@ -224,6 +231,7 @@ def split_multi_scale(y, y_shape): ############################################################################ # Let us quantify our prediction error +# ------------------------------------- from sklearn.metrics import (accuracy_score, precision_score, recall_score, f1_score) diff --git a/examples/decoding/plot_oasis_vbm.py b/examples/02_decoding/plot_oasis_vbm.py similarity index 86% rename from examples/decoding/plot_oasis_vbm.py rename to examples/02_decoding/plot_oasis_vbm.py index 1061353117..b8810422d8 100644 --- a/examples/decoding/plot_oasis_vbm.py +++ b/examples/02_decoding/plot_oasis_vbm.py @@ -40,14 +40,15 @@ # Virgile Fritsch, , Apr 2014 # Gael Varoquaux, Apr 2014 import numpy as np -from scipy import linalg import matplotlib.pyplot as plt from nilearn import datasets from nilearn.input_data import NiftiMasker n_subjects = 100 # more subjects requires more memory -### Load Oasis dataset ######################################################## +############################################################################ +# Load Oasis dataset +# ------------------- oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=n_subjects) gray_matter_map_filenames = oasis_dataset.gray_matter_maps age = oasis_dataset.ext_vars['age'].astype(float) @@ -58,7 +59,9 @@ print('First white-matter anatomy image (3D) is located at: %s' % oasis_dataset.white_matter_maps[0]) # 3D data -### Preprocess data ########################################################### +############################################################################# +# Preprocess data +# ---------------- nifti_masker = NiftiMasker( standardize=False, smoothing_fwhm=2, @@ -67,7 +70,9 @@ n_samples, n_features = gm_maps_masked.shape print("%d samples, %d features" % (n_subjects, n_features)) -### Prediction with SVR ####################################################### +############################################################################ +# Prediction pipeline with ANOVA and SVR +# --------------------------------------- print("ANOVA + SVR") # Define the prediction function to be used. # Here we use a Support Vector Classification, with a linear kernel @@ -98,7 +103,9 @@ anova_svr.fit(gm_maps_masked, age) age_pred = anova_svr.predict(gm_maps_masked) +############################################################################# # Visualization +# -------------- # Look at the SVR's discriminating weights coef = svr.coef_ # reverse feature selection @@ -112,22 +119,17 @@ from nilearn.plotting import plot_stat_map, show bg_filename = gray_matter_map_filenames[0] z_slice = 0 -from nilearn.image.resampling import coord_transform -affine = weight_img.get_affine() -_, _, k_slice = coord_transform(0, 0, z_slice, - linalg.inv(affine)) -k_slice = np.round(k_slice) + fig = plt.figure(figsize=(5.5, 7.5), facecolor='k') -weight_slice_data = weight_img.get_data()[..., k_slice, 0] -vmax = max(-np.min(weight_slice_data), np.max(weight_slice_data)) * 0.5 +# Hard setting vmax to highlight weights more display = plot_stat_map(weight_img, bg_img=bg_filename, display_mode='z', cut_coords=[z_slice], - figure=fig, vmax=vmax) + figure=fig, vmax=1) display.title('SVM weights', y=1.2) # Measure accuracy with cross validation -from sklearn.cross_validation import cross_val_score +from sklearn.model_selection import cross_val_score cv_scores = cross_val_score(anova_svr, gm_maps_masked, age) # Return the corresponding mean prediction accuracy @@ -163,9 +165,7 @@ '\n(Non-parametric + max-type correction)') display.title(title, y=1.2) -signed_neg_log_pvals_slice_data = \ - signed_neg_log_pvals_unmasked.get_data()[..., k_slice, 0] -n_detections = (np.abs(signed_neg_log_pvals_slice_data) > threshold).sum() +n_detections = (signed_neg_log_pvals_unmasked.get_data() > threshold).sum() print('\n%d detections' % n_detections) show() diff --git a/examples/decoding/plot_oasis_vbm_space_net.py b/examples/02_decoding/plot_oasis_vbm_space_net.py similarity index 95% rename from examples/decoding/plot_oasis_vbm_space_net.py rename to examples/02_decoding/plot_oasis_vbm_space_net.py index 99b2a96b77..03ddb4e504 100644 --- a/examples/decoding/plot_oasis_vbm_space_net.py +++ b/examples/02_decoding/plot_oasis_vbm_space_net.py @@ -14,6 +14,7 @@ ########################################################################### # Load the Oasis VBM dataset +# --------------------------- import numpy as np from nilearn import datasets n_subjects = 200 # increase this number if you have more RAM on your box @@ -25,7 +26,7 @@ # Split data into training set and test set from sklearn.utils import check_random_state -from sklearn.cross_validation import train_test_split +from sklearn.model_selection import train_test_split rng = check_random_state(42) gm_imgs_train, gm_imgs_test, age_train, age_test = train_test_split( gm_imgs, age, train_size=.6, random_state=rng) @@ -38,6 +39,7 @@ ########################################################################### # Fit the SpaceNet and predict with it +# ------------------------------------- from nilearn.decoding import SpaceNetRegressor # To save time (because these are anat images with many voxels), we include @@ -68,6 +70,7 @@ ########################################################################### # Visualize the quality of predictions +# ------------------------------------- import matplotlib.pyplot as plt plt.figure() plt.suptitle("graph-net: Mean Absolute Error %.2f years" % mse) diff --git a/examples/decoding/plot_simulated_data.py b/examples/02_decoding/plot_simulated_data.py similarity index 53% rename from examples/decoding/plot_simulated_data.py rename to examples/02_decoding/plot_simulated_data.py index f02ff17cab..0fa97a02d0 100644 --- a/examples/decoding/plot_simulated_data.py +++ b/examples/02_decoding/plot_simulated_data.py @@ -6,6 +6,26 @@ This example simulates data according to a very simple sketch of brain imaging data and applies machine learning techniques to predict output values. + +We use a very simple generating function to simulate data, as in `Michel +et al. 2012 `_ , a linear +model with a random design matrix **X**: + +.. math:: + + \mathbf{y} = \mathbf{X} \mathbf{w} + \mathbf{e} + +* **w**: the weights of the linear model correspond to the predictive + brain regions. Here, in the simulations, they form a 3D image with 5, four + of which in opposite corners and one in the middle, as plotted below. + +* **X**: the design matrix corresponds to the observed fMRI data. Here + we simulate random normal variables and smooth them as in Gaussian + fields. + +* **e** is random normal noise. + + """ # Licence : BSD @@ -20,7 +40,7 @@ from sklearn import linear_model, svm from sklearn.utils import check_random_state -from sklearn.cross_validation import KFold +from sklearn.model_selection import KFold from sklearn.feature_selection import f_regression import nibabel @@ -29,8 +49,9 @@ import nilearn.masking -############################################################################### -# Function to generate data +############################################################################## +# A function to generate data +############################################################################## def create_simulation_data(snr=0, n_samples=2 * 100, size=12, random_state=1): generator = check_random_state(random_state) roi_size = 2 # size / 3 @@ -42,8 +63,8 @@ def create_simulation_data(snr=0, n_samples=2 * 100, size=12, random_state=1): w[0:roi_size, -roi_size:, -roi_size:] = -0.6 w[-roi_size:, 0:roi_size:, -roi_size:] = 0.5 w[(size - roi_size) // 2:(size + roi_size) // 2, - (size - roi_size) // 2:(size + roi_size) // 2, - (size - roi_size) // 2:(size + roi_size) // 2] = 0.5 + (size - roi_size) // 2:(size + roi_size) // 2, + (size - roi_size) // 2:(size + roi_size) // 2] = 0.5 w = w.ravel() # Generate smooth background noise XX = generator.randn(n_samples, size, size, size) @@ -70,9 +91,12 @@ def create_simulation_data(snr=0, n_samples=2 * 100, size=12, random_state=1): y_test = y[n_samples // 2:] y = y[:n_samples // 2] - return X_train, X_test, y, y_test, snr, noise, w, size + return X_train, X_test, y, y_test, snr, w, size +############################################################################## +# A simple function to plot slices +############################################################################## def plot_slices(data, title=None): plt.figure(figsize=(5.5, 2.2)) vmax = np.abs(data).max() @@ -89,7 +113,8 @@ def plot_slices(data, title=None): ############################################################################### # Create data -X_train, X_test, y_train, y_test, snr, _, coefs, size = \ +############################################################################### +X_train, X_test, y_train, y_test, snr, coefs, size = \ create_simulation_data(snr=-10, n_samples=100, size=12) # Create masks for SearchLight. process_mask is the voxels where SearchLight @@ -107,44 +132,76 @@ def plot_slices(data, title=None): plot_slices(coefs, title="Ground truth") ############################################################################### -# Compute the results and estimated coef maps for different estimators -classifiers = [ +# Run different estimators +############################################################################### +# +# We can now run different estimators and look at their prediction score, +# as well as the feature maps that they recover. Namely, we will use +# +# * A support vector regression (`SVM +# `_) +# +# * An `elastic-net +# `_ +# +# * A *Bayesian* ridge estimator, i.e. a ridge estimator that sets its +# parameter according to a metaprior +# +# * A ridge estimator that set its parameter by cross-validation +# +# Note that the `RidgeCV` and the `ElasticNetCV` have names ending in `CV` +# that stands for `cross-validation`: in the list of possible `alpha` +# values that they are given, they choose the best by cross-validation. + +estimators = [ ('bayesian_ridge', linear_model.BayesianRidge(normalize=True)), ('enet_cv', linear_model.ElasticNetCV(alphas=[5, 1, 0.5, 0.1], l1_ratio=0.05)), ('ridge_cv', linear_model.RidgeCV(alphas=[100, 10, 1, 0.1], cv=5)), ('svr', svm.SVR(kernel='linear', C=0.001)), - ('searchlight', decoding.SearchLight( - mask_img, process_mask_img=process_mask_img, - radius=2.7, scoring='r2', estimator=svm.SVR(kernel="linear"), - cv=KFold(y_train.size, n_folds=4), - verbose=1, n_jobs=1)) + ('searchlight', decoding.SearchLight(mask_img, + process_mask_img=process_mask_img, + radius=2.7, + scoring='r2', + estimator=svm.SVR(kernel="linear"), + cv=KFold(n_splits=4), + verbose=1, + n_jobs=1, + ) + ) ] +############################################################################### # Run the estimators -for name, classifier in classifiers: +# +# As the estimators expose a fairly consistent API, we can all fit them in +# a for loop: they all have a `fit` method for fitting the data, a `score` +# method to retrieve the prediction score, and because they are all linear +# models, a `coef_` attribute that stores the coefficients **w** estimated + +for name, estimator in estimators: t1 = time() if name != "searchlight": - classifier.fit(X_train, y_train) + estimator.fit(X_train, y_train) else: X = nilearn.masking.unmask(X_train, mask_img) - classifier.fit(X, y_train) + estimator.fit(X, y_train) del X elapsed_time = time() - t1 if name != 'searchlight': - coefs = classifier.coef_ + coefs = estimator.coef_ coefs = np.reshape(coefs, [size, size, size]) - score = classifier.score(X_test, y_test) + score = estimator.score(X_test, y_test) title = '%s: prediction score %.3f, training time: %.2fs' % ( - classifier.__class__.__name__, score, - elapsed_time) + estimator.__class__.__name__, score, + elapsed_time) else: # Searchlight - coefs = classifier.scores_ + coefs = estimator.scores_ title = '%s: training time: %.2fs' % ( - classifier.__class__.__name__, - elapsed_time) + estimator.__class__.__name__, + elapsed_time) # We use the plot_slices function provided in the example to # plot the results @@ -160,3 +217,17 @@ def plot_slices(data, title=None): plot_slices(p_values, title="f_regress") plt.show() + +############################################################################### +# An exercice to go further +############################################################################### +# +# As an exercice, you can use recursive feature elimination (RFE) with +# the SVM +# +# Read the object's documentation to find out how to use RFE. +# +# **Performance tip**: increase the `step` parameter, or it will be very +# slow. + +from sklearn.feature_selection import RFE diff --git a/examples/connectivity/README.txt b/examples/03_connectivity/README.txt similarity index 100% rename from examples/connectivity/README.txt rename to examples/03_connectivity/README.txt diff --git a/examples/03_connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py new file mode 100644 index 0000000000..29882c5fc3 --- /dev/null +++ b/examples/03_connectivity/plot_adhd_spheres.py @@ -0,0 +1,112 @@ +""" +Extracting brain signal from spheres +==================================== + +This example extract brain signals from spheres described by the coordinates +of their center in MNI space and a given radius in millimeters. In particular, +this example extracts signals from Default Mode Network regions and compute a +connectome from them. + +""" + +########################################################################## +# Retrieve the dataset +# --------------------- +from nilearn import datasets +adhd_dataset = datasets.fetch_adhd(n_subjects=1) + +# print basic information on the dataset +print('First subject functional nifti image (4D) is at: %s' % + adhd_dataset.func[0]) # 4D data + + +########################################################################## +# Coordinates of Default Mode Network +# ------------------------------------ +dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)] +labels = [ + 'Posterior Cingulate Cortex', + 'Left Temporoparietal junction', + 'Right Temporoparietal junction', + 'Medial prefrontal cortex', + ] + + +########################################################################## +# Extracts signal from sphere around DMN seeds +# --------------------------------------------- +from nilearn import input_data + +masker = input_data.NiftiSpheresMasker( + dmn_coords, radius=8, + detrend=True, standardize=True, + low_pass=0.1, high_pass=0.01, t_r=2.5, + memory='nilearn_cache', memory_level=1, verbose=2) + +func_filename = adhd_dataset.func[0] +confound_filename = adhd_dataset.confounds[0] + +time_series = masker.fit_transform(func_filename, + confounds=[confound_filename]) + +########################################################################## +# Display time series +# -------------------- +import matplotlib.pyplot as plt +for time_serie, label in zip(time_series.T, labels): + plt.plot(time_serie, label=label) + +plt.title('Default Mode Network Time Series') +plt.xlabel('Scan number') +plt.ylabel('Normalized signal') +plt.legend() +plt.tight_layout() + + +########################################################################## +# Compute partial correlation matrix +# ----------------------------------- +# Using object :class:`nilearn.connectome.ConnectivityMeasure`: Its +# default covariance estimator is Ledoit-Wolf, allowing to obtain accurate +# partial correlations. +from nilearn.connectome import ConnectivityMeasure +connectivity_measure = ConnectivityMeasure(kind='partial correlation') +partial_correlation_matrix = connectivity_measure.fit_transform( + [time_series])[0] + +########################################################################## +# Display connectome +# ------------------- +from nilearn import plotting + +plotting.plot_connectome(partial_correlation_matrix, dmn_coords, + title="Default Mode Network Connectivity") + +########################################################################## +# Display connectome with hemispheric projections. +# Notice (0, -52, 18) is included in both hemispheres since x == 0. +plotting.plot_connectome(partial_correlation_matrix, dmn_coords, + title="Connectivity projected on hemispheres", + display_mode='lyrz') + +plotting.show() + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_connectome` is to use +# :func:`nilearn.plotting.view_connectome` that gives more interactive +# visualizations in a web browser. See :ref:`interactive-connectome-plotting` +# for more details. + + +view = plotting.view_connectome(partial_correlation_matrix, dmn_coords) + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view diff --git a/examples/03_connectivity/plot_atlas_comparison.py b/examples/03_connectivity/plot_atlas_comparison.py new file mode 100644 index 0000000000..462c28c48d --- /dev/null +++ b/examples/03_connectivity/plot_atlas_comparison.py @@ -0,0 +1,115 @@ +""" +Comparing connectomes on different reference atlases +==================================================== + +This examples shows how to turn a parcellation into connectome for +visualization. This requires choosing centers for each parcel +or network, via :func:`nilearn.plotting.find_parcellation_cut_coords` for +parcellation based on labels and +:func:`nilearn.plotting.find_probabilistic_atlas_cut_coords` for +parcellation based on probabilistic values. + +In the intermediary steps, we make use of +:class:`nilearn.input_data.NiftiLabelsMasker` and +:class:`nilearn.input_data.NiftiMapsMasker` to extract time series from nifti +objects using different parcellation atlases. +The time series of all subjects of the ADHD Dataset are concatenated and +given directly to :class:`nilearn.connectome.ConnectivityMeasure` for +computing parcel-wise correlation matrices for each atlas across all subjects. + +Mean correlation matrix is displayed on glass brain on extracted coordinates. + +# author: Amadeus Kanaan + +""" + +#################################################################### +# Load atlases +# ------------- +from nilearn import datasets + +yeo = datasets.fetch_atlas_yeo_2011() +print('Yeo atlas nifti image (3D) with 17 parcels and liberal mask is located ' + 'at: %s' % yeo['thick_17']) + +######################################################################### +# Load functional data +# -------------------- +data = datasets.fetch_adhd(n_subjects=10) + +print('Functional nifti images (4D, e.g., one subject) are located at : %r' + % data['func'][0]) +print('Counfound csv files (of same subject) are located at : %r' + % data['confounds'][0]) + +########################################################################## +# Extract coordinates on Yeo atlas - parcellations +# ------------------------------------------------ +from nilearn.input_data import NiftiLabelsMasker +from nilearn.connectome import ConnectivityMeasure + +# ConenctivityMeasure from Nilearn uses simple 'correlation' to compute +# connectivity matrices for all subjects in a list +connectome_measure = ConnectivityMeasure(kind='correlation') + +# useful for plotting connectivity interactions on glass brain +from nilearn import plotting + +# create masker to extract functional data within atlas parcels +masker = NiftiLabelsMasker(labels_img=yeo['thick_17'], standardize=True, + memory='nilearn_cache') + +# extract time series from all subjects and concatenate them +time_series = [] +for func, confounds in zip(data.func, data.confounds): + time_series.append(masker.fit_transform(func, confounds=confounds)) + +# calculate correlation matrices across subjects and display +correlation_matrices = connectome_measure.fit_transform(time_series) + +# Mean correlation matrix across 10 subjects can be grabbed like this, +# using connectome measure object +mean_correlation_matrix = connectome_measure.mean_ + +# grab center coordinates for atlas labels +coordinates = plotting.find_parcellation_cut_coords(labels_img=yeo['thick_17']) + +# plot connectome with 80% edge strength in the connectivity +plotting.plot_connectome(mean_correlation_matrix, coordinates, + edge_threshold="80%", + title='Yeo Atlas 17 thick (func)') + +########################################################################## +# Load probabilistic atlases - extracting coordinates on brain maps +# ----------------------------------------------------------------- + +msdl = datasets.fetch_atlas_msdl() + +########################################################################## +# Iterate over fetched atlases to extract coordinates - probabilistic +# ------------------------------------------------------------------- +from nilearn.input_data import NiftiMapsMasker + +# create masker to extract functional data within atlas parcels +masker = NiftiMapsMasker(maps_img=msdl['maps'], standardize=True, + memory='nilearn_cache') + +# extract time series from all subjects and concatenate them +time_series = [] +for func, confounds in zip(data.func, data.confounds): + time_series.append(masker.fit_transform(func, confounds=confounds)) + +# calculate correlation matrices across subjects and display +correlation_matrices = connectome_measure.fit_transform(time_series) + +# Mean correlation matrix across 10 subjects can be grabbed like this, +# using connectome measure object +mean_correlation_matrix = connectome_measure.mean_ + +# grab center coordinates for probabilistic atlas +coordinates = plotting.find_probabilistic_atlas_cut_coords(maps_img=msdl['maps']) + +# plot connectome with 80% edge strength in the connectivity +plotting.plot_connectome(mean_correlation_matrix, coordinates, + edge_threshold="80%", title='MSDL (probabilistic)') +plotting.show() diff --git a/examples/connectivity/plot_canica_resting_state.py b/examples/03_connectivity/plot_canica_resting_state.py similarity index 69% rename from examples/connectivity/plot_canica_resting_state.py rename to examples/03_connectivity/plot_canica_resting_state.py index 3f151ae908..8a22b60155 100644 --- a/examples/connectivity/plot_canica_resting_state.py +++ b/examples/03_connectivity/plot_canica_resting_state.py @@ -3,7 +3,7 @@ ===================================================== An example applying CanICA to resting-state data. This example applies it -to 40 subjects of the ADHD200 datasets. Then it plots a map with all the +to 30 subjects of the ADHD200 datasets. Then it plots a map with all the components together and an axial cut for each of the components separately. CanICA is an ICA method for group-level analysis of fMRI data. Compared @@ -19,10 +19,20 @@ Pre-prints for both papers are available on hal (http://hal.archives-ouvertes.fr) + +.. note:: + + The use of the attribute `components_img_` from decomposition + estimators is implemented from version 0.4.1. + For older versions, unmask the deprecated attribute `components_` + to get the components image using attribute `masker_` embedded in + estimator. + See the :ref:`section Inverse transform: unmasking data `. """ #################################################################### # First we load the ADHD200 data +# ------------------------------- from nilearn import datasets adhd_dataset = datasets.fetch_adhd(n_subjects=30) @@ -30,20 +40,24 @@ # print basic information on the dataset print('First functional nifti image (4D) is at: %s' % - adhd_dataset.func[0]) # 4D data + func_filenames[0]) # 4D data #################################################################### # Here we apply CanICA on the data -from nilearn.decomposition.canica import CanICA +# --------------------------------- +from nilearn.decomposition import CanICA canica = CanICA(n_components=20, smoothing_fwhm=6., - memory="nilearn_cache", memory_level=5, + memory="nilearn_cache", memory_level=2, threshold=3., verbose=10, random_state=0) canica.fit(func_filenames) -# Retrieve the independent components in brain space -components_img = canica.masker_.inverse_transform(canica.components_) +# Retrieve the independent components in brain space. Directly +# accesible through attribute `components_img_`. Note that this +# attribute is implemented from version 0.4.1. For older versions, +# see note section above for details. +components_img = canica.components_img_ # components_img is a Nifti Image object, and can be saved to a file with # the following line: components_img.to_filename('canica_resting_state.nii.gz') @@ -51,6 +65,7 @@ #################################################################### # To visualize we plot the outline of all components on one figure +# ----------------------------------------------------------------- from nilearn.plotting import plot_prob_atlas # Plot all ICA components together @@ -59,6 +74,7 @@ #################################################################### # Finally, we plot the map for each ICA component separately +# ----------------------------------------------------------- from nilearn.image import iter_img from nilearn.plotting import plot_stat_map, show diff --git a/examples/connectivity/plot_compare_resting_state_decomposition.py b/examples/03_connectivity/plot_compare_resting_state_decomposition.py similarity index 68% rename from examples/connectivity/plot_compare_resting_state_decomposition.py rename to examples/03_connectivity/plot_compare_resting_state_decomposition.py index 41534a5da0..d3f8c93bfe 100644 --- a/examples/connectivity/plot_compare_resting_state_decomposition.py +++ b/examples/03_connectivity/plot_compare_resting_state_decomposition.py @@ -9,16 +9,21 @@ spatial maps. It extracts maps that are naturally sparse and usually cleaner than ICA - * Gael Varoquaux et al. - Multi-subject dictionary learning to segment an atlas of brain spontaneous - activity - Information Processing in Medical Imaging, 2011, pp. 562-573, Lecture Notes - in Computer Science + * Arthur Mensch et al. `Compressed online dictionary learning for fast resting-state fMRI decomposition + `_, + ISBI 2016, Lecture Notes in Computer Science -Available on https://hal.inria.fr/inria-00588898/en/ +.. note:: + + The use of the attribute `components_img_` from decomposition + estimators is implemented from version 0.4.1. + For older versions, unmask the deprecated attribute `components_` to + get the components image using attribute `masker_` embedded in estimator. + See the :ref:`section Inverse transform: unmasking data `. """ ############################################################################### # Load ADHD rest dataset +# ----------------------- from nilearn import datasets adhd_dataset = datasets.fetch_adhd(n_subjects=30) @@ -28,29 +33,39 @@ print('First functional nifti image (4D) is at: %s' % adhd_dataset.func[0]) # 4D data -#################################s############################################# +############################################################################### # Create two decomposition estimators +# ------------------------------------ from nilearn.decomposition import DictLearning, CanICA n_components = 40 ############################################################################### # Dictionary learning +# -------------------- +# +# We use as "template" as a strategy to compute the mask, as this leads +# to slightly faster and more reproducible results. However, the images +# need to be in MNI template space dict_learning = DictLearning(n_components=n_components, memory="nilearn_cache", memory_level=2, verbose=1, random_state=0, - n_epochs=1) + n_epochs=1, + mask_strategy='template') ############################################################################### # CanICA +# ------ canica = CanICA(n_components=n_components, memory="nilearn_cache", memory_level=2, threshold=3., n_init=1, - verbose=1) + verbose=1, + mask_strategy='template') ############################################################################### # Fit both estimators +# -------------------- estimators = [dict_learning, canica] names = {dict_learning: 'DictionaryLearning', canica: 'CanICA'} components_imgs = [] @@ -59,22 +74,23 @@ print('[Example] Learning maps using %s model' % names[estimator]) estimator.fit(func_filenames) print('[Example] Saving results') - # Decomposition estimator embeds their own masker - masker = estimator.masker_ - # Drop output maps to a Nifti file - components_img = masker.inverse_transform(estimator.components_) + # Grab extracted components umasked back to Nifti image. + # Note: For older versions, less than 0.4.1. components_img_ + # is not implemented. See Note section above for details. + components_img = estimator.components_img_ components_img.to_filename('%s_resting_state.nii.gz' % names[estimator]) components_imgs.append(components_img) ############################################################################### # Visualize the results +# ---------------------- from nilearn.plotting import (plot_prob_atlas, find_xyz_cut_coords, show, plot_stat_map) from nilearn.image import index_img # Selecting specific maps to display: maps were manually chosen to be similar -indices = {dict_learning: 1, canica: 31} +indices = {dict_learning: 25, canica: 33} # We select relevant cut coordinates for displaying cut_component = index_img(components_imgs[0], indices[dict_learning]) cut_coords = find_xyz_cut_coords(cut_component) diff --git a/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py b/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py new file mode 100644 index 0000000000..fc77374edd --- /dev/null +++ b/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py @@ -0,0 +1,168 @@ +""" +Regions extraction using Dictionary Learning and functional connectomes +======================================================================= + +This example shows how to use :class:`nilearn.regions.RegionExtractor` +to extract spatially constrained brain regions from whole brain maps decomposed +using dictionary learning and use them to build a functional connectome. + +We used 20 resting state ADHD functional datasets from :func:`nilearn.datasets.fetch_adhd` +and :class:`nilearn.decomposition.DictLearning` for set of brain atlas maps. + +This example can also be inspired to apply the same steps to even regions extraction +using ICA maps. In that case, idea would be to replace dictionary learning to canonical +ICA decomposition using :class:`nilearn.decomposition.CanICA` + +Please see the related documentation of :class:`nilearn.regions.RegionExtractor` +for more details. + +.. note:: + + The use of the attribute `components_img_` from dictionary learning + estimator is implemented from version 0.4.1. For older versions, + unmask the deprecated attribute `components_` to get the components + image using attribute `masker_` embedded in estimator. + See the :ref:`section Inverse transform: unmasking data `. +""" + +################################################################################ +# Fetch ADHD resting state functional datasets +# --------------------------------------------- +# +# We use nilearn's datasets downloading utilities +from nilearn import datasets + +adhd_dataset = datasets.fetch_adhd(n_subjects=20) +func_filenames = adhd_dataset.func +confounds = adhd_dataset.confounds + +################################################################################ +# Extract resting-state networks with DictionaryLearning +# ------------------------------------------------------- + +# Import dictionary learning algorithm from decomposition module and call the +# object and fit the model to the functional datasets +from nilearn.decomposition import DictLearning + +# Initialize DictLearning object +dict_learn = DictLearning(n_components=5, smoothing_fwhm=6., + memory="nilearn_cache", memory_level=2, + random_state=0) +# Fit to the data +dict_learn.fit(func_filenames) +# Resting state networks/maps in attribute `components_img_` +# Note that this attribute is implemented from version 0.4.1. +# For older versions, see the note section above for details. +components_img = dict_learn.components_img_ + +# Visualization of resting state networks +# Show networks using plotting utilities +from nilearn import plotting + +plotting.plot_prob_atlas(components_img, view_type='filled_contours', + title='Dictionary Learning maps') + +################################################################################ +# Extract regions from networks +# ------------------------------ + +# Import Region Extractor algorithm from regions module +# threshold=0.5 indicates that we keep nominal of amount nonzero voxels across all +# maps, less the threshold means that more intense non-voxels will be survived. +from nilearn.regions import RegionExtractor + +extractor = RegionExtractor(components_img, threshold=0.5, + thresholding_strategy='ratio_n_voxels', + extractor='local_regions', + standardize=True, min_region_size=1350) +# Just call fit() to process for regions extraction +extractor.fit() +# Extracted regions are stored in regions_img_ +regions_extracted_img = extractor.regions_img_ +# Each region index is stored in index_ +regions_index = extractor.index_ +# Total number of regions extracted +n_regions_extracted = regions_extracted_img.shape[-1] + +# Visualization of region extraction results +title = ('%d regions are extracted from %d components.' + '\nEach separate color of region indicates extracted region' + % (n_regions_extracted, 5)) +plotting.plot_prob_atlas(regions_extracted_img, view_type='filled_contours', + title=title) + +################################################################################ +# Compute correlation coefficients +# --------------------------------- + +# First we need to do subjects timeseries signals extraction and then estimating +# correlation matrices on those signals. +# To extract timeseries signals, we call transform() from RegionExtractor object +# onto each subject functional data stored in func_filenames. +# To estimate correlation matrices we import connectome utilities from nilearn +from nilearn.connectome import ConnectivityMeasure + +correlations = [] +# Initializing ConnectivityMeasure object with kind='correlation' +connectome_measure = ConnectivityMeasure(kind='correlation') +for filename, confound in zip(func_filenames, confounds): + # call transform from RegionExtractor object to extract timeseries signals + timeseries_each_subject = extractor.transform(filename, confounds=confound) + # call fit_transform from ConnectivityMeasure object + correlation = connectome_measure.fit_transform([timeseries_each_subject]) + # saving each subject correlation to correlations + correlations.append(correlation) + +# Mean of all correlations +import numpy as np +mean_correlations = np.mean(correlations, axis=0).reshape(n_regions_extracted, + n_regions_extracted) + +############################################################################### +# Plot resulting connectomes +# ---------------------------- + +title = 'Correlation between %d regions' % n_regions_extracted + +# First plot the matrix +display = plotting.plot_matrix(mean_correlations, vmax=1, vmin=-1, + colorbar=True, title=title) + +# Then find the center of the regions and plot a connectome +regions_img = regions_extracted_img +coords_connectome = plotting.find_probabilistic_atlas_cut_coords(regions_img) + +plotting.plot_connectome(mean_correlations, coords_connectome, + edge_threshold='90%', title=title) + +################################################################################ +# Plot regions extracted for only one specific network +# ---------------------------------------------------- + +# First, we plot a network of index=4 without region extraction (left plot) +from nilearn import image + +img = image.index_img(components_img, 4) +coords = plotting.find_xyz_cut_coords(img) +display = plotting.plot_stat_map(img, cut_coords=coords, colorbar=False, + title='Showing one specific network') + +################################################################################ +# Now, we plot (right side) same network after region extraction to show that +# connected regions are nicely seperated. +# Each brain extracted region is identified as separate color. + +# For this, we take the indices of the all regions extracted related to original +# network given as 4. +regions_indices_of_map3 = np.where(np.array(regions_index) == 4) + +display = plotting.plot_anat(cut_coords=coords, + title='Regions from this network') + +# Add as an overlay all the regions of index 4 +colors = 'rgbcmyk' +for each_index_of_map3, color in zip(regions_indices_of_map3[0], colors): + display.add_overlay(image.index_img(regions_extracted_img, each_index_of_map3), + cmap=plotting.cm.alpha_cmap(color)) + +plotting.show() diff --git a/examples/03_connectivity/plot_group_level_connectivity.py b/examples/03_connectivity/plot_group_level_connectivity.py new file mode 100644 index 0000000000..7e6510f8c0 --- /dev/null +++ b/examples/03_connectivity/plot_group_level_connectivity.py @@ -0,0 +1,223 @@ +""" +Functional connectivity matrices for group analysis of connectomes +================================================================== + +This example compares different kinds of functional connectivity between +regions of interest : correlation, partial correlation, as well as a kind +called **tangent**. The resulting connectivity coefficients are used to +discriminate ADHD patients from healthy controls and the **tangent kind** +**outperforms** the standard connectivity kinds. +""" +# Matrix plotting from Nilearn: nilearn.plotting.plot_matrix +import numpy as np +import matplotlib.pylab as plt + + +def plot_matrices(matrices, matrix_kind): + n_matrices = len(matrices) + fig = plt.figure(figsize=(n_matrices * 4, 4)) + for n_subject, matrix in enumerate(matrices): + plt.subplot(1, n_matrices, n_subject + 1) + matrix = matrix.copy() # avoid side effects + # Set diagonal to zero, for better visualization + np.fill_diagonal(matrix, 0) + vmax = np.max(np.abs(matrix)) + title = '{0}, subject {1}'.format(matrix_kind, n_subject) + plotting.plot_matrix(matrix, vmin=-vmax, vmax=vmax, cmap='RdBu_r', + title=title, figure=fig, colorbar=False) + + +############################################################################### +# Load ADHD dataset and MSDL atlas +# -------------------------------- +# We study only 20 subjects from the ADHD dataset, to save computation time. +from nilearn import datasets + +adhd_data = datasets.fetch_adhd(n_subjects=20) + +############################################################################### +# We use probabilistic regions of interest (ROIs) from the MSDL atlas. +msdl_data = datasets.fetch_atlas_msdl() +msdl_coords = msdl_data.region_coords +n_regions = len(msdl_coords) +print('MSDL has {0} ROIs, part of the following networks :\n{1}.'.format( + n_regions, msdl_data.networks)) + +############################################################################### +# Region signals extraction +# ------------------------- +# To extract regions time series, we instantiate a +# :class:`nilearn.input_data.NiftiMapsMasker` object and pass the atlas the +# file name to it, as well as filtering band-width and detrending option. +from nilearn import input_data + +masker = input_data.NiftiMapsMasker( + msdl_data.maps, resampling_target="data", t_r=2.5, detrend=True, + low_pass=.1, high_pass=.01, memory='nilearn_cache', memory_level=1) + +############################################################################### +# Then we compute region signals and extract useful phenotypic informations. +adhd_subjects = [] +pooled_subjects = [] +site_names = [] +adhd_labels = [] # 1 if ADHD, 0 if control +for func_file, confound_file, phenotypic in zip( + adhd_data.func, adhd_data.confounds, adhd_data.phenotypic): + time_series = masker.fit_transform(func_file, confounds=confound_file) + pooled_subjects.append(time_series) + is_adhd = phenotypic['adhd'] + if is_adhd: + adhd_subjects.append(time_series) + + site_names.append(phenotypic['site']) + adhd_labels.append(is_adhd) + +print('Data has {0} ADHD subjects.'.format(len(adhd_subjects))) + +############################################################################### +# ROI-to-ROI correlations of ADHD patients +# ---------------------------------------- +# The simpler and most commonly used kind of connectivity is correlation. It +# models the full (marginal) connectivity between pairwise ROIs. We can +# estimate it using :class:`nilearn.connectome.ConnectivityMeasure`. +from nilearn.connectome import ConnectivityMeasure + +correlation_measure = ConnectivityMeasure(kind='correlation') + +############################################################################### +# From the list of ROIs time-series for ADHD subjects, the +# `correlation_measure` computes individual correlation matrices. +correlation_matrices = correlation_measure.fit_transform(adhd_subjects) + +# All individual coefficients are stacked in a unique 2D matrix. +print('Correlations of ADHD patients are stacked in an array of shape {0}' + .format(correlation_matrices.shape)) + +############################################################################### +# as well as the average correlation across all fitted subjects. +mean_correlation_matrix = correlation_measure.mean_ +print('Mean correlation has shape {0}.'.format(mean_correlation_matrix.shape)) + +############################################################################### +# We display the connectomes of the first 3 ADHD subjects and the mean +# correlation matrix over all ADHD patients. +from nilearn import plotting + +plot_matrices(correlation_matrices[:4], 'correlation') +plotting.plot_connectome(mean_correlation_matrix, msdl_coords, + title='mean correlation over 13 ADHD subjects') + +############################################################################### +# Look at blocks structure, reflecting functional networks. + +############################################################################### +# Examine partial correlations +# ---------------------------- +# We can also study **direct connections**, revealed by partial correlation +# coefficients. We just change the `ConnectivityMeasure` kind +partial_correlation_measure = ConnectivityMeasure(kind='partial correlation') + +############################################################################### +# and repeat the previous operation. +partial_correlation_matrices = partial_correlation_measure.fit_transform( + adhd_subjects) + +############################################################################### +# Most of direct connections are weaker than full connections, resulting +# in a sparse mean connectome graph. +plot_matrices(partial_correlation_matrices[:4], 'partial') +plotting.plot_connectome( + partial_correlation_measure.mean_, msdl_coords, + title='mean partial correlation over 13 ADHD subjects') + +############################################################################### +# Extract subjects variabilities around a robust group connectivity +# ----------------------------------------------------------------- +# We can use **both** correlations and partial correlations to capture +# reproducible connectivity patterns at the group-level and build a **robust** +# **group connectivity matrix**. This is done by the **tangent** kind. +tangent_measure = ConnectivityMeasure(kind='tangent') + +############################################################################### +# We fit our ADHD group and get the group connectivity matrix stored as +# in `tangent_measure.mean_`, and individual deviation matrices of each subject +# from it. +tangent_matrices = tangent_measure.fit_transform(adhd_subjects) + +############################################################################### +# `tangent_matrices` model individual connectivities as +# **perturbations** of the group connectivity matrix `tangent_measure.mean_`. +# Keep in mind that these subjects-to-group variability matrices do not +# straight reflect individual brain connections. For instance negative +# coefficients can not be interpreted as anticorrelated regions. +plot_matrices(tangent_matrices[:4], 'tangent variability') +plotting.plot_connectome( + tangent_measure.mean_, msdl_coords, + title='mean tangent connectivity over 13 ADHD subjects') + +############################################################################### +# The mean connectome graph is not as sparse as partial correlations graph, +# yet it is less dense than correlations graph. + +############################################################################### +# What kind of connectivity is most powerful for classification? +# -------------------------------------------------------------- +# *ConnectivityMeasure* can output the estimated subjects coefficients +# as a 1D arrays through the parameter *vectorize*. +connectivity_biomarkers = {} +kinds = ['correlation', 'partial correlation', 'tangent'] +for kind in kinds: + conn_measure = ConnectivityMeasure(kind=kind, vectorize=True) + connectivity_biomarkers[kind] = conn_measure.fit_transform(pooled_subjects) + +# For each kind, all individual coefficients are stacked in a unique 2D matrix. +print('{0} correlation biomarkers for each subject.'.format( + connectivity_biomarkers['correlation'].shape[1])) + +############################################################################### +# Note that we use the **pooled groups**. This is crucial for **tangent** kind, +# to get the displacements from a **unique** `mean_` of all subjects. + +############################################################################### +# We stratify the dataset into homogeneous classes according to phenotypic +# and scan site. We then split the subjects into 3 folds with the same +# proportion of each class as in the whole cohort +from sklearn.model_selection import StratifiedKFold + +classes = ['{0}{1}'.format(site_name, adhd_label) + for site_name, adhd_label in zip(site_names, adhd_labels)] +cv = StratifiedKFold(n_splits=3) +############################################################################### +# and use the connectivity coefficients to classify ADHD patients vs controls. + +# Note that in cv.split(X, y), +# providing y is sufficient to generate the splits and +# hence np.zeros(n_samples) may be used as a placeholder for X +# instead of actual training data. +from sklearn.svm import LinearSVC +from sklearn.model_selection import cross_val_score + +mean_scores = [] +for kind in kinds: + svc = LinearSVC(random_state=0) + cv_scores = cross_val_score(svc, + connectivity_biomarkers[kind], + y=adhd_labels, + cv=cv, + groups=adhd_labels, + scoring='accuracy', + ) + mean_scores.append(cv_scores.mean()) + +############################################################################### +# Finally, we can display the classification scores. +plt.figure(figsize=(6, 4)) +positions = np.arange(len(kinds)) * .1 + .1 +plt.barh(positions, mean_scores, align='center', height=.05) +yticks = [kind.replace(' ', '\n') for kind in kinds] +plt.yticks(positions, yticks) +plt.xlabel('Classification accuracy') +plt.grid(True) +plt.tight_layout() + +plt.show() diff --git a/examples/connectivity/plot_inverse_covariance_connectome.py b/examples/03_connectivity/plot_inverse_covariance_connectome.py similarity index 60% rename from examples/connectivity/plot_inverse_covariance_connectome.py rename to examples/03_connectivity/plot_inverse_covariance_connectome.py index 2f3b36fcd5..b20eef1f01 100644 --- a/examples/connectivity/plot_inverse_covariance_connectome.py +++ b/examples/03_connectivity/plot_inverse_covariance_connectome.py @@ -6,7 +6,7 @@ covariance. We use the `MSDL atlas -`_ +`_ of functional regions in rest, and the :class:`nilearn.input_data.NiftiMapsMasker` to extract time series. @@ -23,18 +23,15 @@ ############################################################################## # Retrieve the atlas and the data +# -------------------------------- from nilearn import datasets atlas = datasets.fetch_atlas_msdl() +# Loading atlas image stored in 'maps' atlas_filename = atlas['maps'] +# Loading atlas data stored in 'labels' +labels = atlas['labels'] -# Load the labels -import numpy as np -csv_filename = atlas['labels'] - -# The recfromcsv function can load a csv file -labels = np.recfromcsv(csv_filename) -names = labels['name'] - +# Loading the functional datasets data = datasets.fetch_adhd(n_subjects=1) # print basic information on the dataset @@ -43,6 +40,7 @@ ############################################################################## # Extract time series +# -------------------- from nilearn.input_data import NiftiMapsMasker masker = NiftiMapsMasker(maps_img=atlas_filename, standardize=True, memory='nilearn_cache', verbose=5) @@ -52,6 +50,7 @@ ############################################################################## # Compute the sparse inverse covariance +# -------------------------------------- from sklearn.covariance import GraphLassoCV estimator = GraphLassoCV() @@ -59,42 +58,56 @@ ############################################################################## # Display the connectome matrix -from matplotlib import pyplot as plt - +# ------------------------------ +from nilearn import plotting # Display the covariance -plt.figure(figsize=(10, 10)) # The covariance can be found at estimator.covariance_ -plt.imshow(estimator.covariance_, interpolation="nearest", - vmax=1, vmin=-1, cmap=plt.cm.RdBu_r) -# And display the labels -x_ticks = plt.xticks(range(len(names)), names, rotation=90) -y_ticks = plt.yticks(range(len(names)), names) -plt.title('Covariance') +plotting.plot_matrix(estimator.covariance_, labels=labels, + figure=(9, 7), vmax=1, vmin=-1, + title='Covariance') ############################################################################## # And now display the corresponding graph -from nilearn import plotting -coords = labels[['x', 'y', 'z']].tolist() +# ---------------------------------------- +coords = atlas.region_coords plotting.plot_connectome(estimator.covariance_, coords, title='Covariance') ############################################################################## -# Display the sparse inverse covariance (we negate it to get partial -# correlations) -plt.figure(figsize=(10, 10)) -plt.imshow(-estimator.precision_, interpolation="nearest", - vmax=1, vmin=-1, cmap=plt.cm.RdBu_r) -# And display the labels -x_ticks = plt.xticks(range(len(names)), names, rotation=90) -y_ticks = plt.yticks(range(len(names)), names) -plt.title('Sparse inverse covariance') +# Display the sparse inverse covariance +# -------------------------------------- +# we negate it to get partial correlations +plotting.plot_matrix(-estimator.precision_, labels=labels, + figure=(9, 7), vmax=1, vmin=-1, + title='Sparse inverse covariance') ############################################################################## # And now display the corresponding graph +# ---------------------------------------- plotting.plot_connectome(-estimator.precision_, coords, title='Sparse inverse covariance') plotting.show() + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_connectome` is to use +# :func:`nilearn.plotting.view_connectome` that gives more interactive +# visualizations in a web browser. See :ref:`interactive-connectome-plotting` +# for more details. + + +view = plotting.view_connectome(-estimator.precision_, coords) + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view diff --git a/examples/connectivity/plot_multi_subject_connectome.py b/examples/03_connectivity/plot_multi_subject_connectome.py similarity index 72% rename from examples/connectivity/plot_multi_subject_connectome.py rename to examples/03_connectivity/plot_multi_subject_connectome.py index db95d3c21a..3da6875919 100644 --- a/examples/connectivity/plot_multi_subject_connectome.py +++ b/examples/03_connectivity/plot_multi_subject_connectome.py @@ -2,20 +2,18 @@ Group Sparse inverse covariance for multi-subject connectome ============================================================= -This example shows how to estimate a connectome on a groupe of subjects +This example shows how to estimate a connectome on a group of subjects using the group sparse inverse covariance estimate. """ -import matplotlib.pyplot as plt import numpy as np from nilearn import plotting - n_subjects = 4 # subjects to consider for group-sparse covariance (max: 40) -def plot_matrices(cov, prec, title): +def plot_matrices(cov, prec, title, labels): """Plot covariance and precision matrices, for a given processing. """ prec = prec.copy() # avoid side effects @@ -26,23 +24,18 @@ def plot_matrices(cov, prec, title): span = max(abs(prec.min()), abs(prec.max())) # Display covariance matrix - plt.figure() - plt.imshow(cov, interpolation="nearest", - vmin=-1, vmax=1, cmap=plotting.cm.bwr) - plt.colorbar() - plt.title("%s / covariance" % title) - + plotting.plot_matrix(cov, cmap=plotting.cm.bwr, + vmin=-1, vmax=1, title="%s / covariance" % title, + labels=labels) # Display precision matrix - plt.figure() - plt.imshow(prec, interpolation="nearest", - vmin=-span, vmax=span, - cmap=plotting.cm.bwr) - plt.colorbar() - plt.title("%s / precision" % title) + plotting.plot_matrix(prec, cmap=plotting.cm.bwr, + vmin=-span, vmax=span, title="%s / precision" % title, + labels=labels) ############################################################################## # Fetching datasets +# ------------------ from nilearn import datasets msdl_atlas_dataset = datasets.fetch_atlas_msdl() adhd_dataset = datasets.fetch_adhd(n_subjects=n_subjects) @@ -54,6 +47,7 @@ def plot_matrices(cov, prec, title): ############################################################################## # Extracting region signals +# -------------------------- from nilearn import image from nilearn import input_data @@ -85,6 +79,7 @@ def plot_matrices(cov, prec, title): ############################################################################## # Computing group-sparse precision matrices +# ------------------------------------------ from nilearn.connectome import GroupSparseCovarianceCV gsc = GroupSparseCovarianceCV(verbose=2) gsc.fit(subject_time_series) @@ -96,23 +91,29 @@ def plot_matrices(cov, prec, title): ############################################################################## # Displaying results -atlas_imgs = image.iter_img(msdl_atlas_dataset.maps) -atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in atlas_imgs] +# ------------------- +atlas_img = msdl_atlas_dataset.maps +atlas_region_coords = plotting.find_probabilistic_atlas_cut_coords(atlas_img) +labels = msdl_atlas_dataset.labels -title = "GraphLasso" -plotting.plot_connectome(-gl.precision_, atlas_region_coords, - edge_threshold='90%', - title="Sparse inverse covariance (GraphLasso)") plotting.plot_connectome(gl.covariance_, atlas_region_coords, edge_threshold='90%', - title="Covariance") -plot_matrices(gl.covariance_, gl.precision_, title) + title="Covariance", + display_mode="lzr") +plotting.plot_connectome(-gl.precision_, atlas_region_coords, + edge_threshold='90%', + title="Sparse inverse covariance (GraphLasso)", + display_mode="lzr", + edge_vmax=.5, edge_vmin=-.5) +plot_matrices(gl.covariance_, gl.precision_, "GraphLasso", labels) title = "GroupSparseCovariance" plotting.plot_connectome(-gsc.precisions_[..., 0], atlas_region_coords, edge_threshold='90%', - title=title) + title=title, + display_mode="lzr", + edge_vmax=.5, edge_vmin=-.5) plot_matrices(gsc.covariances_[..., 0], - gsc.precisions_[..., 0], title) + gsc.precisions_[..., 0], title, labels) plotting.show() diff --git a/examples/connectivity/plot_probabilistic_atlas_extraction.py b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py similarity index 59% rename from examples/connectivity/plot_probabilistic_atlas_extraction.py rename to examples/03_connectivity/plot_probabilistic_atlas_extraction.py index 14627e0ff2..c455a266f9 100644 --- a/examples/connectivity/plot_probabilistic_atlas_extraction.py +++ b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py @@ -6,7 +6,7 @@ atlas, to construct a functional connectome. We use the `MSDL atlas -`_ +`_ of functional regions in rest. The key to extract signals is to use the @@ -21,18 +21,15 @@ """ ############################################################################ # Retrieve the atlas and the data +# -------------------------------- from nilearn import datasets atlas = datasets.fetch_atlas_msdl() +# Loading atlas image stored in 'maps' atlas_filename = atlas['maps'] +# Loading atlas data stored in 'labels' +labels = atlas['labels'] -# Load the labels -import numpy as np -csv_filename = atlas['labels'] - -# The recfromcsv function can load a csv file -labels = np.recfromcsv(csv_filename) -names = labels['name'] - +# Load the functional datasets data = datasets.fetch_adhd(n_subjects=1) print('First subject resting-state nifti image (4D) is located at: %s' % @@ -40,6 +37,7 @@ ############################################################################ # Extract the time series +# ------------------------ from nilearn.input_data import NiftiMapsMasker masker = NiftiMapsMasker(maps_img=atlas_filename, standardize=True, memory='nilearn_cache', verbose=5) @@ -54,28 +52,47 @@ ############################################################################ # Build and display a correlation matrix -correlation_matrix = np.corrcoef(time_series.T) +# --------------------------------------- +from nilearn.connectome import ConnectivityMeasure +correlation_measure = ConnectivityMeasure(kind='correlation') +correlation_matrix = correlation_measure.fit_transform([time_series])[0] # Display the correlation matrix -from matplotlib import pyplot as plt -plt.figure(figsize=(10, 10)) +import numpy as np +from nilearn import plotting # Mask out the major diagonal np.fill_diagonal(correlation_matrix, 0) -plt.imshow(correlation_matrix, interpolation="nearest", cmap="RdBu_r", - vmax=0.8, vmin=-0.8) -plt.colorbar() -# And display the labels -x_ticks = plt.xticks(range(len(names)), names, rotation=90) -y_ticks = plt.yticks(range(len(names)), names) - +plotting.plot_matrix(correlation_matrix, labels=labels, colorbar=True, + vmax=0.8, vmin=-0.8) ############################################################################ # And now display the corresponding graph +# ---------------------------------------- from nilearn import plotting -coords = labels[['x', 'y', 'z']].tolist() +coords = atlas.region_coords # We threshold to keep only the 20% of edges with the highest value # because the graph is very dense plotting.plot_connectome(correlation_matrix, coords, - edge_threshold="80%") + edge_threshold="80%", colorbar=True) plotting.show() + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_connectome` is to use +# :func:`nilearn.plotting.view_connectome` that gives more interactive +# visualizations in a web browser. See :ref:`interactive-connectome-plotting` +# for more details. + + +view = plotting.view_connectome(correlation_matrix, coords, threshold='80%') + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view diff --git a/examples/03_connectivity/plot_rest_parcellations.py b/examples/03_connectivity/plot_rest_parcellations.py new file mode 100644 index 0000000000..a11e0a030b --- /dev/null +++ b/examples/03_connectivity/plot_rest_parcellations.py @@ -0,0 +1,193 @@ +""" +Clustering methods to learn a brain parcellation from rest fMRI +==================================================================== + +We use spatially-constrained Ward-clustering and KMeans to create a set +of parcels. + +In a high dimensional regime, these methods can be interesting +to create a 'compressed' representation of the data, replacing the data +in the fMRI images by mean signals on the parcellation, which can +subsequently be used for statistical analysis or machine learning. + +Also, these methods can be used to learn functional connectomes +and subsequently for classification tasks. + +References +---------- + +Which clustering method to use, an empirical comparison can be found in this +paper + + * Bertrand Thirion, Gael Varoquaux, Elvis Dohmatob, Jean-Baptiste Poline. + `Which fMRI clustering gives good brain parcellations ? + `_ Frontiers in Neuroscience, + 2014. + +This parcellation may be useful in a supervised learning, see for +instance + + * Vincent Michel, Alexandre Gramfort, Gael Varoquaux, Evelyn Eger, + Christine Keribin, Bertrand Thirion. `A supervised clustering approach + for fMRI-based inference of brain states. + `_. + Pattern Recognition, Elsevier, 2011. + +The big picture discussion corresponding to this example can be found +in the documentation section :ref:`parcellating_brain`. +""" + +################################################################## +# Download a rest dataset and turn it to a data matrix +# ----------------------------------------------------- +# +# We download one subject of the ADHD dataset from Internet + +from nilearn import datasets +dataset = datasets.fetch_adhd(n_subjects=1) + +# print basic information on the dataset +print('First subject functional nifti image (4D) is at: %s' % + dataset.func[0]) # 4D data + + +######################################################################### +# Brain parcellations with Ward Clustering +# ---------------------------------------- +# +# Transforming list of images to data matrix and build brain parcellations, +# all can be done at once using `Parcellations` object. + +from nilearn.regions import Parcellations + +# Computing ward for the first time, will be long... This can be seen by +# measuring using time +import time +start = time.time() + +# Agglomerative Clustering: ward + +# We build parameters of our own for this object. Parameters related to +# masking, caching and defining number of clusters and specific parcellations +# method. +ward = Parcellations(method='ward', n_parcels=1000, + standardize=False, smoothing_fwhm=2., + memory='nilearn_cache', memory_level=1, + verbose=1) +# Call fit on functional dataset: single subject (less samples). +ward.fit(dataset.func) +print("Ward agglomeration 1000 clusters: %.2fs" % (time.time() - start)) + +# We compute now ward clustering with 2000 clusters and compare +# time with 1000 clusters. To see the benefits of caching for second time. + +# We initialize class again with n_parcels=2000 this time. +start = time.time() +ward = Parcellations(method='ward', n_parcels=2000, + standardize=False, smoothing_fwhm=2., + memory='nilearn_cache', memory_level=1, + verbose=1) +ward.fit(dataset.func) +print("Ward agglomeration 2000 clusters: %.2fs" % (time.time() - start)) + +########################################################################### +# Visualize: Brain parcellations (Ward) +# ------------------------------------- +# +# First, we display the parcellations of the brain image stored in attribute +# `labels_img_` +ward_labels_img = ward.labels_img_ + +# Now, ward_labels_img are Nifti1Image object, it can be saved to file +# with the following code: +ward_labels_img.to_filename('ward_parcellation.nii.gz') + +from nilearn import plotting +from nilearn.image import mean_img, index_img + +first_plot = plotting.plot_roi(ward_labels_img, title="Ward parcellation", + display_mode='xz') + +# Grab cut coordinates from this plot to use as a common for all plots +cut_coords = first_plot.cut_coords +########################################################################### +# Compressed representation of Ward clustering +# -------------------------------------------- +# +# Second, we illustrate the effect that the clustering has on the signal. +# We show the original data, and the approximation provided by the +# clustering by averaging the signal on each parcel. + +# Grab number of voxels from attribute mask image (mask_img_). +import numpy as np +original_voxels = np.sum(ward.mask_img_.get_data()) + +# Compute mean over time on the functional image to use the mean +# image for compressed representation comparisons +mean_func_img = mean_img(dataset.func[0]) + +# Compute common vmin and vmax +vmin = np.min(mean_func_img.get_data()) +vmax = np.max(mean_func_img.get_data()) + +plotting.plot_epi(mean_func_img, cut_coords=cut_coords, + title='Original (%i voxels)' % original_voxels, + vmax=vmax, vmin=vmin, display_mode='xz') + +# A reduced dataset can be created by taking the parcel-level average: +# Note that Parcellation objects with any method have the opportunity to +# use a `transform` call that modifies input features. Here it reduces their +# dimension. Note that we `fit` before calling a `transform` so that average +# signals can be created on the brain parcellations with fit call. +fmri_reduced = ward.transform(dataset.func) + +# Display the corresponding data compressed using the parcellation using +# parcels=2000. +fmri_compressed = ward.inverse_transform(fmri_reduced) + +plotting.plot_epi(index_img(fmri_compressed, 0), + cut_coords=cut_coords, + title='Ward compressed representation (2000 parcels)', + vmin=vmin, vmax=vmax, display_mode='xz') +# As you can see below, this approximation is almost good, although there +# are only 2000 parcels, instead of the original 60000 voxels + +######################################################################### +# Brain parcellations with KMeans Clustering +# ------------------------------------------ +# +# We use the same approach as with building parcellations using Ward +# clustering. But, in the range of a small number of clusters, +# it is most likely that we want to use standardization. Indeed with +# standardization and smoothing, the clusters will form as regions. + +# class/functions can be used here as they are already imported above. + +# This object uses method='kmeans' for KMeans clustering with 10mm smoothing +# and standardization ON +kmeans = Parcellations(method='kmeans', n_parcels=50, + standardize=True, smoothing_fwhm=10., + memory='nilearn_cache', memory_level=1, + verbose=1) +# Call fit on functional dataset: single subject (less samples) +kmeans.fit(dataset.func) +print("KMeans 50 clusters: %.2fs" % (time.time() - start)) +########################################################################### +# Visualize: Brain parcellations (KMeans) +# --------------------------------------- +# +# Grab parcellations of brain image stored in attribute `labels_img_` +kmeans_labels_img = kmeans.labels_img_ + +plotting.plot_roi(kmeans_labels_img, mean_func_img, + title="KMeans parcellation", + display_mode='xz') + +# kmeans_labels_img is a Nifti1Image object, it can be saved to file with +# the following code: +kmeans_labels_img.to_filename('kmeans_parcellation.nii.gz') + +################################################################## +# Finally show them + +plotting.show() diff --git a/examples/03_connectivity/plot_seed_to_voxel_correlation.py b/examples/03_connectivity/plot_seed_to_voxel_correlation.py new file mode 100644 index 0000000000..be263bcd37 --- /dev/null +++ b/examples/03_connectivity/plot_seed_to_voxel_correlation.py @@ -0,0 +1,180 @@ +""" +Producing single subject maps of seed-to-voxel correlation +========================================================== + +This example shows how to produce seed-to-voxel correlation maps for a single +subject based on resting-state fMRI scans. These maps depict the temporal +correlation of a **seed region** with the **rest of the brain**. + +This example is an advanced one that requires manipulating the data with numpy. +Note the difference between images, that lie in brain space, and the +numpy array, corresponding to the data inside the mask. + +See also :ref:`for a similar example using cortical surface input data +`. +""" + +# author: Franz Liem + + +########################################################################## +# Getting the data +# ---------------- + +# We will work with the first subject of the adhd data set. +# adhd_dataset.func is a list of filenames. We select the 1st (0-based) +# subject by indexing with [0]). +from nilearn import datasets + +adhd_dataset = datasets.fetch_adhd(n_subjects=1) +func_filename = adhd_dataset.func[0] +confound_filename = adhd_dataset.confounds[0] + +########################################################################## +# Note that func_filename and confound_filename are strings pointing to +# files on your hard drive. +print(func_filename) +print(confound_filename) + + +########################################################################## +# Time series extraction +# ---------------------- +# +# We are going to extract signals from the functional time series in two +# steps. First we will extract the mean signal within the **seed region of +# interest**. Second, we will extract the **brain-wide voxel-wise time series**. +# +# We will be working with one seed sphere in the Posterior Cingulate Cortex, +# considered part of the Default Mode Network. +pcc_coords = [(0, -52, 18)] + +########################################################################## +# We use :class:`nilearn.input_data.NiftiSpheresMasker` to extract the +# **time series from the functional imaging within the sphere**. The +# sphere is centered at pcc_coords and will have the radius we pass the +# NiftiSpheresMasker function (here 8 mm). +# +# The extraction will also detrend, standardize, and bandpass filter the data. +# This will create a NiftiSpheresMasker object. +from nilearn import input_data + +seed_masker = input_data.NiftiSpheresMasker( + pcc_coords, radius=8, + detrend=True, standardize=True, + low_pass=0.1, high_pass=0.01, t_r=2., + memory='nilearn_cache', memory_level=1, verbose=0) + +########################################################################## +# Then we extract the mean time series within the seed region while +# regressing out the confounds that +# can be found in the dataset's csv file +seed_time_series = seed_masker.fit_transform(func_filename, + confounds=[confound_filename]) + +########################################################################## +# Next, we can proceed similarly for the **brain-wide voxel-wise time +# series**, using :class:`nilearn.input_data.NiftiMasker` with the same input +# arguments as in the seed_masker in addition to smoothing with a 6 mm kernel +brain_masker = input_data.NiftiMasker( + smoothing_fwhm=6, + detrend=True, standardize=True, + low_pass=0.1, high_pass=0.01, t_r=2., + memory='nilearn_cache', memory_level=1, verbose=0) + +########################################################################## +# Then we extract the brain-wide voxel-wise time series while regressing +# out the confounds as before +brain_time_series = brain_masker.fit_transform(func_filename, + confounds=[confound_filename]) + + +########################################################################## +# We can now inspect the extracted time series. Note that the **seed time +# series** is an array with shape n_volumes, 1), while the +# **brain time series** is an array with shape (n_volumes, n_voxels). + +print("seed time series shape: (%s, %s)" % seed_time_series.shape) +print("brain time series shape: (%s, %s)" % brain_time_series.shape) + +########################################################################## +# We can plot the **seed time series**. + +import matplotlib.pyplot as plt + +plt.plot(seed_time_series) +plt.title('Seed time series (Posterior cingulate cortex)') +plt.xlabel('Scan number') +plt.ylabel('Normalized signal') +plt.tight_layout() + +########################################################################## +# Exemplarily, we can also select 5 random voxels from the **brain-wide +# data** and plot the time series from. + +plt.plot(brain_time_series[:, [10, 45, 100, 5000, 10000]]) +plt.title('Time series from 5 random voxels') +plt.xlabel('Scan number') +plt.ylabel('Normalized signal') +plt.tight_layout() + + + +########################################################################## +# Performing the seed-based correlation analysis +# ---------------------------------------------- +# +# Now that we have two arrays (**sphere signal**: (n_volumes, 1), +# **brain-wide voxel-wise signal** (n_volumes, n_voxels)), we can correlate +# the **seed signal** with the **signal of each voxel**. The dot product of +# the two arrays will give us this correlation. Note that the signals have +# been variance-standardized during extraction. To have them standardized to +# norm unit, we further have to divide the result by the length of the time +# series. +import numpy as np + +seed_based_correlations = np.dot(brain_time_series.T, seed_time_series) / \ + seed_time_series.shape[0] + +################################################ +# The resulting array will contain a value representing the correlation +# values between the signal in the **seed region** of interest and **each +# voxel's signal**, and will be of shape (n_voxels, 1). The correlation +# values can potentially range between -1 and 1. +print("seed-based correlation shape: (%s, %s)" % seed_based_correlations.shape) +print("seed-based correlation: min = %.3f; max = %.3f" % ( + seed_based_correlations.min(), seed_based_correlations.max())) + + +########################################################################## +# Fisher-z transformation and save nifti +# -------------------------------------- +# Now we can Fisher-z transform the data to achieve a normal distribution. +# The transformed array can now have values more extreme than +/- 1. +seed_based_correlations_fisher_z = np.arctanh(seed_based_correlations) +print("seed-based correlation Fisher-z transformed: min = %.3f; max = %.3f" % ( + seed_based_correlations_fisher_z.min(), + seed_based_correlations_fisher_z.max())) + +# Finally, we can tranform the correlation array back to a Nifti image +# object, that we can save. +seed_based_correlation_img = brain_masker.inverse_transform( + seed_based_correlations.T) +seed_based_correlation_img.to_filename('sbc_z.nii.gz') + + +########################################################################## +# Plotting the seed-based correlation map +# --------------------------------------- +# We can also plot this image and perform thresholding to only show values +# more extreme than +/- 0.3. Furthermore, we can display the location of the +# seed with a sphere and set the cross to the center of the seed region of +# interest. +from nilearn import plotting + +display = plotting.plot_stat_map(seed_based_correlation_img, threshold=0.3, + cut_coords=pcc_coords[0]) +display.add_markers(marker_coords=pcc_coords, marker_color='g', + marker_size=300) +# At last, we save the plot as pdf. +display.savefig('sbc_z.pdf') diff --git a/examples/connectivity/plot_signal_extraction.py b/examples/03_connectivity/plot_signal_extraction.py similarity index 66% rename from examples/connectivity/plot_signal_extraction.py rename to examples/03_connectivity/plot_signal_extraction.py index bb24edb6ad..b72ffc98ed 100644 --- a/examples/connectivity/plot_signal_extraction.py +++ b/examples/03_connectivity/plot_signal_extraction.py @@ -19,14 +19,19 @@ This is just a code example, see the :ref:`corresponding section in the documentation ` for more. + +.. note:: + This example needs SciPy >= 1.0.0 for the reordering of the matrix. """ ############################################################################## # Retrieve the atlas and the data +# -------------------------------- from nilearn import datasets dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm') -atlas_filename, labels = dataset.maps, dataset.labels +atlas_filename = dataset.maps +labels = dataset.labels print('Atlas ROIs are located in nifti image (4D) at: %s' % atlas_filename) # 4D data @@ -36,8 +41,9 @@ fmri_filenames = data.func[0] ############################################################################## -# Extract signals on a parcellation defined by labels using the -# NiftiLabelsMasker +# Extract signals on a parcellation defined by labels +# ----------------------------------------------------- +# Using the NiftiLabelsMasker from nilearn.input_data import NiftiLabelsMasker masker = NiftiLabelsMasker(labels_img=atlas_filename, standardize=True, memory='nilearn_cache', verbose=5) @@ -50,44 +56,36 @@ ############################################################################## # Compute and display a correlation matrix -import numpy as np -correlation_matrix = np.corrcoef(time_series.T) +# ----------------------------------------- +from nilearn.connectome import ConnectivityMeasure +correlation_measure = ConnectivityMeasure(kind='correlation') +correlation_matrix = correlation_measure.fit_transform([time_series])[0] # Plot the correlation matrix -from matplotlib import pyplot as plt -plt.figure(figsize=(10, 10)) +import numpy as np +from nilearn import plotting +# Make a large figure # Mask the main diagonal for visualization: np.fill_diagonal(correlation_matrix, 0) - -plt.imshow(correlation_matrix, interpolation="nearest", cmap="RdBu_r", - vmax=0.8, vmin=-0.8) - -# Add labels and adjust margins -x_ticks = plt.xticks(range(len(labels) - 1), labels[1:], rotation=90) -y_ticks = plt.yticks(range(len(labels) - 1), labels[1:]) -plt.gca().yaxis.tick_right() -plt.subplots_adjust(left=.01, bottom=.3, top=.99, right=.62) - +# The labels we have start with the background (0), hence we skip the +# first label +# matrices are ordered for block-like representation +plotting.plot_matrix(correlation_matrix, figure=(10, 8), labels=labels[1:], + vmax=0.8, vmin=-0.8, reorder=True) ############################################################################### # Same thing without confounds, to stress the importance of confounds +# -------------------------------------------------------------------- time_series = masker.fit_transform(fmri_filenames) # Note how we did not specify confounds above. This is bad! -correlation_matrix = np.corrcoef(time_series.T) +correlation_matrix = correlation_measure.fit_transform([time_series])[0] # Mask the main diagonal for visualization: np.fill_diagonal(correlation_matrix, 0) -plt.figure(figsize=(10, 10)) -plt.imshow(correlation_matrix, interpolation="nearest", cmap="RdBu_r", - vmax=0.8, vmin=-0.8) - -x_ticks = plt.xticks(range(len(labels) - 1), labels[1:], rotation=90) -y_ticks = plt.yticks(range(len(labels) - 1), labels[1:]) -plt.gca().yaxis.tick_right() -plt.subplots_adjust(left=.01, bottom=.3, top=.99, right=.62) -plt.suptitle('No confounds', size=27) +plotting.plot_matrix(correlation_matrix, figure=(10, 8), labels=labels[1:], + vmax=0.8, vmin=-0.8, title='No confounds', reorder=True) -plt.show() +plotting.show() diff --git a/examples/connectivity/plot_simulated_connectome.py b/examples/03_connectivity/plot_simulated_connectome.py similarity index 63% rename from examples/connectivity/plot_simulated_connectome.py rename to examples/03_connectivity/plot_simulated_connectome.py index be67306d4d..1f192c3aab 100644 --- a/examples/connectivity/plot_simulated_connectome.py +++ b/examples/03_connectivity/plot_simulated_connectome.py @@ -10,12 +10,6 @@ import matplotlib.pyplot as plt -def plot_matrix(m, ylabel=""): - abs_max = abs(m).max() - plt.imshow(m, cmap=plt.cm.RdBu_r, interpolation="nearest", - vmin=-abs_max, vmax=abs_max) - - # Generate synthetic data from nilearn._utils.testing import generate_group_sparse_gaussian_graphs @@ -25,11 +19,15 @@ def plot_matrix(m, ylabel=""): n_subjects=n_subjects, n_features=10, min_n_samples=30, max_n_samples=50, density=0.1) +from nilearn import plotting fig = plt.figure(figsize=(10, 7)) plt.subplots_adjust(hspace=0.4) for n in range(n_displayed): - plt.subplot(n_displayed, 4, 4 * n + 1) - plot_matrix(precisions[n]) + ax = plt.subplot(n_displayed, 4, 4 * n + 1) + max_precision = precisions[n].max() + plotting.plot_matrix(precisions[n], vmin=-max_precision, + vmax=max_precision, axes=ax, colorbar=False) + if n == 0: plt.title("ground truth") plt.ylabel("subject %d" % n) @@ -41,8 +39,10 @@ def plot_matrix(m, ylabel=""): gsc.fit(subjects) for n in range(n_displayed): - plt.subplot(n_displayed, 4, 4 * n + 2) - plot_matrix(gsc.precisions_[..., n]) + ax = plt.subplot(n_displayed, 4, 4 * n + 2) + max_precision = gsc.precisions_[..., n].max() + plotting.plot_matrix(gsc.precisions_[..., n], axes=ax, vmin=-max_precision, + vmax=max_precision, colorbar=False) if n == 0: plt.title("group-sparse\n$\\alpha=%.2f$" % gsc.alpha_) @@ -54,8 +54,10 @@ def plot_matrix(m, ylabel=""): for n, subject in enumerate(subjects[:n_displayed]): gl.fit(subject) - plt.subplot(n_displayed, 4, 4 * n + 3) - plot_matrix(gl.precision_) + ax = plt.subplot(n_displayed, 4, 4 * n + 3) + max_precision = gl.precision_.max() + plotting.plot_matrix(gl.precision_, axes=ax, vmin=-max_precision, + vmax=max_precision, colorbar=False) if n == 0: plt.title("graph lasso") plt.ylabel("$\\alpha=%.2f$" % gl.alpha_) @@ -65,8 +67,10 @@ def plot_matrix(m, ylabel=""): import numpy as np gl.fit(np.concatenate(subjects)) -plt.subplot(n_displayed, 4, 4) -plot_matrix(gl.precision_) +ax = plt.subplot(n_displayed, 4, 4) +max_precision = gl.precision_.max() +plotting.plot_matrix(gl.precision_, axes=ax, vmin=-max_precision, + vmax=max_precision, colorbar=False) plt.title("graph lasso, all subjects\n$\\alpha=%.2f$" % gl.alpha_) plt.show() diff --git a/examples/03_connectivity/plot_sphere_based_connectome.py b/examples/03_connectivity/plot_sphere_based_connectome.py new file mode 100644 index 0000000000..d52a006513 --- /dev/null +++ b/examples/03_connectivity/plot_sphere_based_connectome.py @@ -0,0 +1,148 @@ +""" +Extract signals on spheres from an atlas and plot a connectome +============================================================== + +This example shows how to extract signals from spherical regions +centered on coordinates from Power-264 atlas [1] and Dosenbach-160 [2]. +We estimate connectome using **sparse inverse covariance**, to recover +the functional brain **networks structure**. + +**References** + +[1] Power, Jonathan D., et al. "Functional network organization of the +human brain." Neuron 72.4 (2011): 665-678. + +[2] Dosenbach N.U., Nardos B., et al. "Prediction of individual brain maturity +using fMRI.", 2010, Science 329, 1358-1361. + +""" + +############################################################################### +# Load fMRI data and Power atlas +# ------------------------------ +# +# We are going to use a single subject from the ADHD dataset. +from nilearn import datasets + +adhd = datasets.fetch_adhd(n_subjects=1) + +############################################################################### +# We store the paths to its functional image and the confounds file. +fmri_filename = adhd.func[0] +confounds_filename = adhd.confounds[0] +print('Functional image is {0},\nconfounds are {1}.'.format(fmri_filename, + confounds_filename)) + +############################################################################### +# We fetch the coordinates of Power atlas. +power = datasets.fetch_coords_power_2011() +print('Power atlas comes with {0}.'.format(power.keys())) + +############################################################################### +# Compute within spheres averaged time-series +# ------------------------------------------- +# +# We can compute the mean signal within **spheres** of a fixed radius around +# a sequence of (x, y, z) coordinates with the object +# :class:`nilearn.input_data.NiftiSpheresMasker`. +# So we collect the regions coordinates in a numpy array +import numpy as np + +coords = np.vstack((power.rois['x'], power.rois['y'], power.rois['z'])).T + +print('Stacked power coordinates in array of shape {0}.'.format(coords.shape)) + +############################################################################### +# and define spheres masker, with small enough radius to avoid regions overlap. +from nilearn import input_data + +spheres_masker = input_data.NiftiSpheresMasker( + seeds=coords, smoothing_fwhm=4, radius=5., + detrend=True, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2.5) + +############################################################################### +# Voxel-wise time-series within each sphere are averaged. The resulting signal +# is then prepared by the masker object: Detrended, cleaned from counfounds, +# band-pass filtered and **standardized to 1 variance**. +timeseries = spheres_masker.fit_transform(fmri_filename, + confounds=confounds_filename) + +############################################################################### +# Estimate correlations +# --------------------- +# +# All starts with the estimation of the signals **covariance** matrix. Here the +# number of ROIs exceeds the number of samples, +print('time series has {0} samples'.format(timeseries.shape[0])) + +############################################################################### +# in which situation the graphical lasso **sparse inverse covariance** +# estimator captures well the covariance **structure**. +from sklearn.covariance import GraphLassoCV + +covariance_estimator = GraphLassoCV(verbose=1) + +############################################################################### +# We just fit our regions signals into the `GraphLassoCV` object +covariance_estimator.fit(timeseries) + +############################################################################### +# and get the ROI-to-ROI covariance matrix. +matrix = covariance_estimator.covariance_ +print('Covariance matrix has shape {0}.'.format(matrix.shape)) + +############################################################################### +# Plot matrix and graph +# --------------------- +# +# We use nilearn.plotting.plot_matrix to visualize our correlation matrix +# and display the graph of connections with `nilearn.plotting.plot_connectome`. +from nilearn import plotting + +plotting.plot_matrix(matrix, vmin=-1., vmax=1., colorbar=True, + title='Power correlation matrix') + +# Tweak edge_threshold to keep only the strongest connections. +plotting.plot_connectome(matrix, coords, title='Power correlation graph', + edge_threshold='99.8%', node_size=20, colorbar=True) + +############################################################################### +# Note the 1. on the matrix diagonal: These are the signals variances, set to +# 1. by the `spheres_masker`. Hence the covariance of the signal is a +# correlation matrix + +############################################################################### +# Connectome extracted from Dosenbach's atlas +# ------------------------------------------- +# +# We repeat the same steps for Dosenbach's atlas. +dosenbach = datasets.fetch_coords_dosenbach_2010() + +coords = np.vstack(( + dosenbach.rois['x'], + dosenbach.rois['y'], + dosenbach.rois['z'], +)).T + +spheres_masker = input_data.NiftiSpheresMasker( + seeds=coords, smoothing_fwhm=4, radius=4.5, + detrend=True, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2.5) + +timeseries = spheres_masker.fit_transform(fmri_filename, + confounds=confounds_filename) + +covariance_estimator = GraphLassoCV() +covariance_estimator.fit(timeseries) +matrix = covariance_estimator.covariance_ + +plotting.plot_matrix(matrix, vmin=-1., vmax=1., colorbar=True, + title='Dosenbach correlation matrix') + +plotting.plot_connectome(matrix, coords, title='Dosenbach correlation graph', + edge_threshold="99.7%", node_size=20, colorbar=True) + +############################################################################### +# We can easily identify the Dosenbach's networks from the matrix blocks. +print('Dosenbach networks names are {0}'.format(np.unique(dosenbach.networks))) + +plotting.show() diff --git a/examples/04_manipulating_images/README.txt b/examples/04_manipulating_images/README.txt new file mode 100644 index 0000000000..3e9090f5fa --- /dev/null +++ b/examples/04_manipulating_images/README.txt @@ -0,0 +1,4 @@ +Manipulating brain image volumes +-------------------------------- + +See :ref:`data_manipulation` for more details. diff --git a/examples/manipulating_visualizing/plot_affine_transformation.py b/examples/04_manipulating_images/plot_affine_transformation.py similarity index 94% rename from examples/manipulating_visualizing/plot_affine_transformation.py rename to examples/04_manipulating_images/plot_affine_transformation.py index d3cd88d2e0..7f46273506 100644 --- a/examples/manipulating_visualizing/plot_affine_transformation.py +++ b/examples/04_manipulating_images/plot_affine_transformation.py @@ -11,10 +11,10 @@ matrix yields (x, y, z, 1), a 4-vector containing the millimeter position of the voxel. -The resampling procedure in `resample_img` can attribute a new affine matrix -and a new shape to your Nifti image while keeping its representation in -millimeter space exactly the same (up to sampling error and possible -clipping). +The resampling procedure in :func:`nilearn.image.resample_img` can attribute +a new affine matrix and a new shape to your Nifti image while keeping its +representation in millimeter space exactly the same (up to sampling error and +possible clipping). This example shows a 2D image in voxel space, and the position of the data in millimeter space, as encoded by the affine matrix. The image is the resampled @@ -90,7 +90,7 @@ img_4d_affine = resample_img(img, target_affine=target_affine_4x4) target_affine_mm_space_offset_changed = np.eye(4) target_affine_mm_space_offset_changed[:3, 3] = \ - img_3d_affine.get_affine()[:3, 3] + img_3d_affine.affine[:3, 3] img_3d_affine_in_mm_space = resample_img( img_3d_affine, diff --git a/examples/04_manipulating_images/plot_compare_mean_image.py b/examples/04_manipulating_images/plot_compare_mean_image.py new file mode 100644 index 0000000000..ecdc87d824 --- /dev/null +++ b/examples/04_manipulating_images/plot_compare_mean_image.py @@ -0,0 +1,34 @@ +""" +Comparing the means of 2 images +=============================== + +The goal of this example is to illustrate the use of the function +:func:`nilearn.image.math_img` with a list of images as input. +We compare the means of 2 resting state 4D images. The mean of the images +could have been computed with nilearn :func:`nilearn.image.mean_img` function. +""" + + +############################################################################### +# Fetching 2 subject resting state functionnal MRI from datasets. +from nilearn import datasets +dataset = datasets.fetch_adhd(n_subjects=2) + + +############################################################################### +# Print basic information on the adhd subjects resting state datasets. +print('Subject 1 resting state dataset at: %s' % dataset.func[0]) +print('Subject 2 resting state dataset at: %s' % dataset.func[1]) + + +############################################################################### +# Comparing the means of the 2 resting state datasets. +from nilearn import plotting, image + +result_img = image.math_img("np.mean(img1, axis=-1) - np.mean(img2, axis=-1)", + img1=dataset.func[0], + img2=dataset.func[1]) + +plotting.plot_stat_map(result_img, + title="Comparing means of 2 resting state 4D images.") +plotting.show() diff --git a/examples/04_manipulating_images/plot_extract_regions_labels_image.py b/examples/04_manipulating_images/plot_extract_regions_labels_image.py new file mode 100644 index 0000000000..ce92c14b57 --- /dev/null +++ b/examples/04_manipulating_images/plot_extract_regions_labels_image.py @@ -0,0 +1,115 @@ +""" +Breaking an atlas of labels in separated regions +================================================= + +This example shows how to use +:class:`nilearn.regions.connected_label_regions` +to assign each spatially-separated region of the atlas a unique label. + +Indeed, often in a given atlas of labels, the same label (number) may +be used in different connected regions, for instance a region in each +hemisphere. If we want to operate on regions and not networks (for +instance in signal extration), it is useful to assign a different +label to each region. We end up with a new atlas that has more labels, +but each one points to a single region. + +We use the Yeo atlas as an example for labeling regions, +:func:`nilearn.datasets.fetch_atlas_yeo_2011` + +""" + +############################################################################## +# The original Yeo atlas +# ----------------------- + +# First we fetch the Yeo atlas +from nilearn import datasets + +atlas_yeo_2011 = datasets.fetch_atlas_yeo_2011() +atlas_yeo = atlas_yeo_2011.thick_7 + +# Let's now plot it +from nilearn import plotting + +plotting.plot_roi(atlas_yeo, title='Original Yeo atlas', + cut_coords=(8, -4, 9), colorbar=True, cmap='Paired') + +############################################################################## +# The original Yeo atlas has 7 labels, that is indicated in the colorbar. +# The colorbar also shows the correspondence between the color and the label +# +# Note that these 7 labels correspond actually to networks that comprise +# several regions. We are going to split them up. + +############################################################################## +# Relabeling the atlas into separated regions +# --------------------------------------------- +# +# Now we use the connected_label_regions to break appart the networks +# of the Yeo atlas into separated regions +from nilearn.regions import connected_label_regions +region_labels = connected_label_regions(atlas_yeo) + +############################################################################## +# Plotting the new regions +plotting.plot_roi(region_labels, title='Relabeled Yeo atlas', + cut_coords=(8, -4, 9), colorbar=True, cmap='Paired') + +############################################################################## +# Note that the same cluster in original and labeled atlas could have +# different color, so, you cannot directly compare colors. +# +# However, you can see that the regions in the left and right hemispheres +# now have different colors. For some regions it is difficult to tell +# appart visually, as the colors are too close on the colormap (eg in the +# blue: regions labeled around 3). +# +# Also, we can see that there are many more labels: the colorbar goes up +# to 49. The 7 networks of the Yeo atlas are now broken up into 49 +# ROIs. +# +# You can save the new atlas to a nifti file using to_filename method. +region_labels.to_filename('relabeled_yeo_atlas.nii.gz') + +# The images are saved to the current folder. It is possible to specify the +# folder for saving the results, i.e. +# import os +# region_labels.to_filename(os.path.join(folder_path, +# 'relabeled_yeo_atlas.nii.gz')) + + +############################################################################## +# Different connectivity modes +# ----------------------------- +# +# Using the parameter connect_diag=False we separate in addition two regions +# that are connected only along the diagonal. + +region_labels_not_diag = connected_label_regions(atlas_yeo, + connect_diag=False) + +plotting.plot_roi(region_labels_not_diag, + title='Relabeling and connect_diag=False', + cut_coords=(8, -4, 9), colorbar=True, cmap='Paired') + + +############################################################################## +# A consequence of using connect_diag=False is that we can get a lot of +# small regions, around 110 judging from the colorbar. +# +# Hence we suggest use connect_diag=True + +############################################################################## +# Parameter min_size +# ------------------- +# +# In the above, we get around 110 regions, but many of these are very +# small. We can remove them with the min_size parameter, keeping only the +# regions larger than 100mm^3. +region_labels_min_size = connected_label_regions(atlas_yeo, min_size=100, + connect_diag=False) + +plotting.plot_roi(region_labels_min_size, title='Relabeling and min_size', + cut_coords=(8, -4, 9), colorbar=True, cmap='Paired') + +plotting.show() diff --git a/examples/manipulating_visualizing/plot_extract_rois_smith_atlas.py b/examples/04_manipulating_images/plot_extract_rois_smith_atlas.py similarity index 87% rename from examples/manipulating_visualizing/plot_extract_rois_smith_atlas.py rename to examples/04_manipulating_images/plot_extract_rois_smith_atlas.py index d8914a4bf8..0653d2ccc4 100644 --- a/examples/manipulating_visualizing/plot_extract_rois_smith_atlas.py +++ b/examples/04_manipulating_images/plot_extract_rois_smith_atlas.py @@ -44,14 +44,14 @@ import numpy as np DMN_network = index_img(atlas_networks, 3) -plotting.plot_roi(DMN_network, display_mode='z', cut_coords=1, - title='Network 3') +plotting.plot_stat_map(DMN_network, display_mode='z', cut_coords=1, + title='Network 3', colorbar=False) regions_indices_network3 = np.where(np.array(extraction.index_) == 3) for index in regions_indices_network3[0]: cur_img = index_img(extraction.regions_img_, index) coords = find_xyz_cut_coords(cur_img) - plotting.plot_roi(cur_img, display_mode='z', cut_coords=coords[2:3], - title="Blob of network3") + plotting.plot_stat_map(cur_img, display_mode='z', cut_coords=coords[2:3], + title="Blob of network3", colorbar=False) plotting.show() diff --git a/examples/manipulating_visualizing/plot_extract_rois_statistical_maps.py b/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py similarity index 85% rename from examples/manipulating_visualizing/plot_extract_rois_statistical_maps.py rename to examples/04_manipulating_images/plot_extract_rois_statistical_maps.py index 3b57f30469..72773ff068 100644 --- a/examples/manipulating_visualizing/plot_extract_rois_statistical_maps.py +++ b/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py @@ -5,7 +5,8 @@ This example shows how to extract regions or separate the regions from a statistical map. -We use localizer t-statistic maps from :func:`nilearn.datasets.fetch_localizer_contrasts` +We use localizer t-statistic maps from +:func:`nilearn.datasets.fetch_neurovault_auditory_computation_task` as an input image. The idea is to threshold an image to get foreground objects using a @@ -18,10 +19,8 @@ # utilities from nilearn import datasets -n_subjects = 3 -localizer_path = datasets.fetch_localizer_contrasts( - ['calculation (auditory cue)'], n_subjects=n_subjects, get_tmaps=True) -tmap_filename = localizer_path.tmaps[0] +localizer = datasets.fetch_neurovault_auditory_computation_task() +tmap_filename = localizer.images[0] ################################################################################ # Threshold the t-statistic image by importing threshold function @@ -34,7 +33,7 @@ # Type 2: threshold strategy used will be based on image intensity # Here, threshold value should be within the limits i.e. less than max value. -threshold_value_img = threshold_img(tmap_filename, threshold=4.) +threshold_value_img = threshold_img(tmap_filename, threshold=3.0) ################################################################################ # Visualization @@ -63,12 +62,12 @@ # Visualizing region extraction results title = ("ROIs using percentile thresholding. " "\n Each ROI in same color is an extracted region") -plotting.plot_prob_atlas(regions_percentile_img, anat_img=tmap_filename, +plotting.plot_prob_atlas(regions_percentile_img, bg_img=tmap_filename, view_type='contours', display_mode='z', cut_coords=5, title=title) title = ("ROIs using image intensity thresholding. " "\n Each ROI in same color is an extracted region") -plotting.plot_prob_atlas(regions_value_img, anat_img=tmap_filename, +plotting.plot_prob_atlas(regions_value_img, bg_img=tmap_filename, view_type='contours', display_mode='z', cut_coords=5, title=title) plotting.show() diff --git a/examples/04_manipulating_images/plot_mask_computation.py b/examples/04_manipulating_images/plot_mask_computation.py new file mode 100644 index 0000000000..b7ec34b609 --- /dev/null +++ b/examples/04_manipulating_images/plot_mask_computation.py @@ -0,0 +1,146 @@ +""" +Understanding NiftiMasker and mask computation +================================================== + +In this example, the Nifti masker is used to automatically compute a mask. + +* The default strategy is based on the background. + +* Another option is to use a template. + +* For raw EPI, as in resting-state time series, we need to use the + 'epi' strategy of the NiftiMasker. + +In addition, we show here how to tweak the different parameters of the +underlying routine that extract masks from EPI +:func:`nilearn.masking.compute_epi_mask`. + +""" + + +from nilearn.input_data import NiftiMasker +import nilearn.image as image +from nilearn.plotting import plot_roi, plot_epi, show + +############################################################################### +# Computing a mask from the background +############################################################################### +# +# The default strategy to compute a mask, eg in NiftiMasker is to try to +# detect the background. +# +# With data that has already been masked, this will work well, as it lies +# on a homogeneous background + +# Load Miyawaki dataset +from nilearn import datasets +miyawaki_dataset = datasets.fetch_miyawaki2008() + +# print basic information on the dataset +print('First functional nifti image (4D) is located at: %s' % + miyawaki_dataset.func[0]) # 4D data + +miyawaki_filename = miyawaki_dataset.func[0] +miyawaki_mean_img = image.mean_img(miyawaki_filename) +plot_epi(miyawaki_mean_img, title='Mean EPI image') +############################################################################### +# A NiftiMasker with the default strategy +masker = NiftiMasker() +masker.fit(miyawaki_filename) + +# Plot the generated mask +plot_roi(masker.mask_img_, miyawaki_mean_img, + title="Mask from already masked data") + + +############################################################################### +# Computing a mask from raw EPI data +############################################################################### +# +# From raw EPI data, there is no uniform background, and a different +# strategy is necessary + +# Load ADHD resting-state dataset +dataset = datasets.fetch_adhd(n_subjects=1) +epi_filename = dataset.func[0] + +# Restrict to 100 frames to speed up computation +from nilearn.image import index_img +epi_img = index_img(epi_filename, slice(0, 100)) + +# To display the background +mean_img = image.mean_img(epi_img) +plot_epi(mean_img, title='Mean EPI image') + +############################################################################### +# Simple mask extraction from EPI images +# We need to specify an 'epi' mask_strategy, as this is raw EPI data +masker = NiftiMasker(mask_strategy='epi') +masker.fit(epi_img) +plot_roi(masker.mask_img_, mean_img, title='EPI automatic mask') + +############################################################################### +# Generate mask with strong opening +# +# We can fine-tune the outline of the mask by increasing the number of +# opening steps (`opening=10`) using the `mask_args` argument of the +# NiftiMasker. This effectively performs erosion and dilation +# operations on the outer voxel layers of the mask, which can for example +# remove remaining +# skull parts in the image. +masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10)) +masker.fit(epi_img) +plot_roi(masker.mask_img_, mean_img, title='EPI Mask with strong opening') + +############################################################################### +# Generate mask with a high lower cutoff +# +# The NiftiMasker calls the nilearn.masking.compute_epi_mask function to +# compute the mask from the EPI. It has two important parameters: +# lower_cutoff and upper_cutoff. These set the grey-value bounds in which +# the masking algorithm will search for its threshold (0 being the +# minimum of the image and 1 the maximum). We will here increase the +# lower cutoff to enforce selection of those voxels that appear as bright +# in the EPI image. + +masker = NiftiMasker(mask_strategy='epi', + mask_args=dict(upper_cutoff=.9, lower_cutoff=.8, + opening=False)) +masker.fit(epi_img) +plot_roi(masker.mask_img_, mean_img, + title='EPI Mask: high lower_cutoff') + +############################################################################### +# Computing the mask from the MNI template +############################################################################### +# +# A mask can also be computed from the MNI gray matter template. In this +# case, it is resampled to the target image + +masker = NiftiMasker(mask_strategy='template') +masker.fit(epi_img) +plot_roi(masker.mask_img_, mean_img, + title='Mask from template') + + +############################################################################### +# After mask computation: extracting time series +############################################################################### +# +# Extract time series + +# trended vs detrended +trended = NiftiMasker(mask_strategy='epi') +detrended = NiftiMasker(mask_strategy='epi', detrend=True) +trended_data = trended.fit_transform(epi_img) +detrended_data = detrended.fit_transform(epi_img) + +# The timeseries are numpy arrays, so we can manipulate them with numpy +import numpy as np + +print("Trended: mean %.2f, std %.2f" % + (np.mean(trended_data), np.std(trended_data))) +print("Detrended: mean %.2f, std %.2f" % + (np.mean(detrended_data), np.std(detrended_data))) + +show() diff --git a/examples/04_manipulating_images/plot_negate_image.py b/examples/04_manipulating_images/plot_negate_image.py new file mode 100644 index 0000000000..b7166c8652 --- /dev/null +++ b/examples/04_manipulating_images/plot_negate_image.py @@ -0,0 +1,29 @@ +""" +Negating an image with math_img +=============================== + +The goal of this example is to illustrate the use of the function +:func:`nilearn.image.math_img` on T-maps. +We compute a negative image by multiplying its voxel values with -1. +""" + +from nilearn import datasets, plotting, image + +############################################################################### +# Retrieve the data: the localizer dataset with contrast maps. +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] + +############################################################################### +# Multiply voxel values by -1. +negative_stat_img = image.math_img("-img", img=stat_img) + +plotting.plot_stat_map(stat_img, + cut_coords=(36, -27, 66), + threshold=3, title="t-map", vmax=9 +) +plotting.plot_stat_map(negative_stat_img, + cut_coords=(36, -27, 66), + threshold=3, title="Negative t-map", vmax=9 +) +plotting.show() diff --git a/examples/manipulating_visualizing/plot_nifti_simple.py b/examples/04_manipulating_images/plot_nifti_simple.py similarity index 89% rename from examples/manipulating_visualizing/plot_nifti_simple.py rename to examples/04_manipulating_images/plot_nifti_simple.py index b5ae2749c5..50367001c9 100644 --- a/examples/manipulating_visualizing/plot_nifti_simple.py +++ b/examples/04_manipulating_images/plot_nifti_simple.py @@ -10,11 +10,10 @@ # Retrieve the NYU test-retest dataset from nilearn import datasets -nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) -func_filename = nyu_dataset.func[0] +dataset = datasets.fetch_adhd(n_subjects=1) +func_filename = dataset.func[0] # print basic information on the dataset -print('First anatomical nifti image (3D) is at: %s' % nyu_dataset.anat_anon[0]) print('First functional nifti image (4D) is at: %s' % func_filename) ########################################################################### @@ -23,8 +22,9 @@ # As this is raw resting-state EPI, the background is noisy and we cannot # rely on the 'background' masking strategy. We need to use the 'epi' one -nifti_masker = NiftiMasker(standardize=False, mask_strategy='epi', - memory="nilearn_cache", memory_level=2) +nifti_masker = NiftiMasker(standardize=True, mask_strategy='epi', + memory="nilearn_cache", memory_level=2, + smoothing_fwhm=8) nifti_masker.fit(func_filename) mask_img = nifti_masker.mask_img_ @@ -48,7 +48,7 @@ ########################################################################### # Run an algorithm from sklearn.decomposition import FastICA -n_components = 20 +n_components = 10 ica = FastICA(n_components=n_components, random_state=42) components_masked = ica.fit_transform(fmri_masked.T).T diff --git a/examples/04_manipulating_images/plot_resample_to_template.py b/examples/04_manipulating_images/plot_resample_to_template.py new file mode 100644 index 0000000000..0f4866a1f2 --- /dev/null +++ b/examples/04_manipulating_images/plot_resample_to_template.py @@ -0,0 +1,69 @@ +""" +Resample an image to a template +=============================== + +The goal of this example is to illustrate the use of the function +:func:`nilearn.image.resample_to_img` to resample an image to a template. +We use the MNI152 template as the reference for resampling a t-map image. +Function :func:`nilearn.image.resample_img` could also be used to achieve this. +""" + +############################################################################### +# First we load the required datasets using the nilearn datasets module. +from nilearn.datasets import fetch_neurovault_motor_task +from nilearn.datasets import load_mni152_template + +template = load_mni152_template() + +motor_images = fetch_neurovault_motor_task() +stat_img = motor_images.images[0] + +############################################################################### +# Now, the localizer t-map image can be resampled to the MNI template image. +from nilearn.image import resample_to_img + +resampled_stat_img = resample_to_img(stat_img, template) + +############################################################################### +# Let's check the shape and affine have been correctly updated. + +# First load the original t-map in memory: +from nilearn.image import load_img +tmap_img = load_img(stat_img) + +original_shape = tmap_img.shape +original_affine = tmap_img.affine + +resampled_shape = resampled_stat_img.shape +resampled_affine = resampled_stat_img.affine + +template_img = load_img(template) +template_shape = template_img.shape +template_affine = template_img.affine +print("""Shape comparison: +- Original t-map image shape : {0} +- Resampled t-map image shape: {1} +- Template image shape : {2} +""".format(original_shape, resampled_shape, template_shape)) + +print("""Affine comparison: +- Original t-map image affine :\n {0} +- Resampled t-map image affine:\n {1} +- Template image affine :\n {2} +""".format(original_affine, resampled_affine, template_affine)) + +############################################################################### +# Finally, result images are displayed using nilearn plotting module. +from nilearn import plotting + +plotting.plot_stat_map(stat_img, + bg_img=template, + cut_coords=(36, -27, 66), + threshold=3, + title="t-map in original resolution") +plotting.plot_stat_map(resampled_stat_img, + bg_img=template, + cut_coords=(36, -27, 66), + threshold=3, + title="Resampled t-map") +plotting.show() diff --git a/examples/04_manipulating_images/plot_roi_extraction.py b/examples/04_manipulating_images/plot_roi_extraction.py new file mode 100644 index 0000000000..c5b0b730f6 --- /dev/null +++ b/examples/04_manipulating_images/plot_roi_extraction.py @@ -0,0 +1,341 @@ +""" +Computing a Region of Interest (ROI) mask manually +=================================================== + +This example shows manual steps to create and further modify an ROI spatial +mask. They represent a means for "data folding", i.e., extracting and then +analyzing brain data from a subset of voxels rather than whole brain images. +Example can also help alleviate curse of dimensionality (i.e., statistical +problems that arise in the context of high-dimensional input variables). + +We demonstrate how to compute a ROI mask using **T-test** and then how simple +image operations can be used before and after computing ROI to improve the +quality of the computed mask. + +These chains of operations are easy to set up using Nilearn and Scipy Python +libraries. Here we give clear guidelines about these steps, starting with +pre-image operations to post-image operations. The main point is that +visualization & results checking be possible at each step. + +See also :doc:`plot_extract_rois_smith_atlas` for automatic ROI extraction +of brain connected networks given in 4D image. +""" + +############################################################################## +# Coordinates of the slice we are interested in each direction. We will be +# using them for visualization. + +# cut in x-direction +sagittal = -25 +# cut in y-direction +coronal = -37 +# cut in z-direction +axial = -6 + +# coordinates displaying should be prepared as a list +cut_coords = [sagittal, coronal, axial] + +############################################################################## +# Loading the data +# ---------------- +# We rely on the Haxby datasets and its experiments to demonstrate the complete +# list of operations. Fetching datasets is easy, shipping with Nilearn using a +# function named as `fetch_haxby`. The data will then be automatically stored +# in a home directory with "nilearn_data" folder in your computer. From which, +# we process data using paths of the Nifti images. + +# We load data from nilearn by import datasets +from nilearn import datasets + +# First, we fetch single subject specific data with haxby datasets: to have +# anatomical image, EPI images and masks images +haxby_dataset = datasets.fetch_haxby() + +# print basic information on the dataset +print('First subject anatomical nifti image (3D) located is at: %s' % + haxby_dataset.anat[0]) +print('First subject functional nifti image (4D) is located at: %s' % + haxby_dataset.func[0]) +print('Labels of haxby dataset (text file) is located at: %s' % + haxby_dataset.session_target[0]) + +# Second, load the labels stored in a text file into array using pandas +import pandas as pd + +session_target = pd.read_csv(haxby_dataset.session_target[0], sep=" ") +# Now, we have the labels and will be useful while computing student's t-test +haxby_labels = session_target['labels'] + +############################################################################## +# We have the datasets in hand especially paths to the locations. Now, we do +# simple pre-processing step called as image smoothing on functional images +# and then build a statistical test on smoothed images. + +############################################################################## +# Build a statistical test to find voxels of interest +# --------------------------------------------------- +# **Smoothing**: Functional MRI data have a low signal-to-noise ratio. +# When using methods that are not robust to noise, it is useful to apply a +# spatial filtering kernel on the data. Such data smoothing is usually applied +# using a Gaussian function with 4mm to 12mm full-width at half-maximum (this +# is where the FWHM comes from). The function :func:`nilearn.image.smooth_img` +# accounts for potential anisotropy in the image affine (i.e., non-indentical +# voxel size in all the three dimensions). Analogous to the majority of nilearn +# functions, smooth_img function can also use file names as input parameters. + +# Smooth the data using image processing module from nilearn +from nilearn import image + +# Functional data +fmri_filename = haxby_dataset.func[0] +# smoothing: first argument as functional data filename and smoothing value +# (integer) in second argument. Output returns in Nifti image. +fmri_img = image.smooth_img(fmri_filename, fwhm=6) + +# Visualize the mean of the smoothed EPI image using plotting function +# `plot_epi` +from nilearn.plotting import plot_epi + +# First, compute the voxel-wise mean of smooth EPI image (first argument) using +# image processing module `image` +mean_img = image.mean_img(fmri_img) +# Second, we visualize the mean image with coordinates positioned manually +plot_epi(mean_img, title='Smoothed mean EPI', cut_coords=cut_coords) + +############################################################################## +# Given the smoothed functional data stored in variable 'fmri_img', we then +# select two features of interest with face and house experimental conditions. +# The method we will be using is a simple Student's t-test. The below section +# gives us brief motivation example about why selecting features in high +# dimensional FMRI data setting. + +############################################################################## +# Functional MRI data can be considered "high dimensional" given the p-versus-n +# ratio (e.g., p=~20,000-200,000 voxels for n=1000 samples or less). In this +# setting, machine-learning algorithms can perform poorly due to the so-called +# curse of dimensionality. However, simple means from the realms of classical +# statistics can help reducing the number of voxels. + +fmri_data = fmri_img.get_data() +# number of voxels being x*y*z, samples in 4th dimension +print(fmri_data.shape) + +############################################################################## +# **Selecting features using T-test**: The Student's t-test +# (:func:`scipy.stats.ttest_ind`) is an established method to determine whether +# two distributions have a different mean value. It can be used to compare voxel +# time-series from two different experimental conditions (e.g., when houses or +# faces are shown to individuals during brain scanning). If the time-series +# distribution is similar in the two conditions, then the voxel is not very +# interesting to discriminate the condition. + +import numpy as np +from scipy import stats + +# This test returns p-values that represent probabilities that the two +# time-series were not drawn from the same distribution. The lower the +# p-value, the more discriminative is the voxel in distinguishing the two +# conditions (faces and houses). +_, p_values = stats.ttest_ind(fmri_data[..., haxby_labels == 'face'], + fmri_data[..., haxby_labels == 'house'], + axis=-1) + +# Use a log scale for p-values +log_p_values = -np.log10(p_values) +# NAN values to zero +log_p_values[np.isnan(log_p_values)] = 0. +log_p_values[log_p_values > 10.] = 10. + +# Visualize statistical p-values using plotting function `plot_stat_map` +from nilearn.plotting import plot_stat_map + +# Before visualizing, we transform the computed p-values to Nifti-like image +# using function `new_img_like` from nilearn. +from nilearn.image import new_img_like + +# First argument being a reference image and second argument should be p-values +# data to convert to a new image as output. This new image will have same header +# information as reference image. +log_p_values_img = new_img_like(fmri_img, log_p_values) + +# Now, we visualize log p-values image on functional mean image as background +# with coordinates given manually and colorbar on the right side of plot (by +# default colorbar=True) +plot_stat_map(log_p_values_img, mean_img, + title="p-values", cut_coords=cut_coords) + +############################################################################# +# **Selecting features using f_classif**: Feature selection method is also +# available in the scikit-learn Python package, where it has been extended to +# several classes, using the `sklearn.feature_selection.f_classif` function. + +############################################################################## +# Build a mask from this statistical map (Improving the quality of the mask) +# -------------------------------------------------------------------------- +# **Thresholding** - We build the t-map to have better representation of voxels +# of interest and voxels with lower p-values correspond to the most intense +# voxels. This can be done easily by applying a threshold to a t-map data in +# array. + +# Note that we use log p-values data; we force values below 5 to 0 by +# thresholding. +log_p_values[log_p_values < 5] = 0 + +# Visualize the reduced voxels of interest using statistical image plotting +# function. As shown above, we first transform data in array to Nifti image. +log_p_values_img = new_img_like(fmri_img, log_p_values) + +# Now, visualizing the created log p-values to image without colorbar and +# without Left - 'L', Right - 'R' annotation +plot_stat_map(log_p_values_img, mean_img, + title='Thresholded p-values', annotate=False, + colorbar=False, cut_coords=cut_coords) + +############################################################################## +# We can post-process the results obtained with simple operations such as mask +# intersection and dilation to regularize the mask definition. The idea of using +# these operations are to have more compact or sparser blobs. + +############################################################################## +# **Binarization** and **Intersection** with Ventral Temporal (VT) mask - We +# now want to restrict our investigation to the VT area. The corresponding +# spatial mask is provided in haxby_dataset.mask_vt. We want to compute the +# intersection of this provided mask with our self-computed mask. + +# self-computed mask +bin_p_values = (log_p_values != 0) +# VT mask +mask_vt_filename = haxby_dataset.mask_vt[0] + +# The first step is to load VT mask and same time convert data type +# numbers to boolean type +from nilearn.image import load_img + +vt = load_img(mask_vt_filename).get_data().astype(bool) + +# We can then use a logical "and" operation - numpy.logical_and - to keep only +# voxels that have been selected in both masks. In neuroimaging jargon, this +# is called an "AND conjunction". We use already imported numpy as np +bin_p_values_and_vt = np.logical_and(bin_p_values, vt) + +# Visualizing the mask intersection results using plotting function `plot_roi`, +# a function which can be used for visualizing target specific voxels. +from nilearn.plotting import plot_roi, show + +# First, we create new image type of binarized and intersected mask (second +# argument) and use this created Nifti image type in visualization. Binarized +# values in data type boolean should be converted to int data type at the same +# time. Otherwise, an error will be raised +bin_p_values_and_vt_img = new_img_like(fmri_img, + bin_p_values_and_vt.astype(np.int)) +# Visualizing goes here with background as computed mean of functional images +plot_roi(bin_p_values_and_vt_img, mean_img, cut_coords=cut_coords, + title='Intersection with ventral temporal mask') + +############################################################################## +# **Dilation** - Thresholded functional brain images often contain scattered +# voxels across the brain. To consolidate such brain images towards more compact +# shapes, we use a `morphological dilation +# `_. This is a common step +# to be sure not to forget voxels located on the edge of a ROI. In other words, +# such operations can fill "holes" in masked voxel representations. + +# We use ndimage function from scipy Python library for mask dilation +from scipy import ndimage + +# Input here is a binarized and intersected mask data from previous section +dil_bin_p_values_and_vt = ndimage.binary_dilation(bin_p_values_and_vt) + +# Now, we visualize the same using `plot_roi` with data being converted to Nifti +# image. In all new image like, reference image is the same but second argument +# varies with data specific +dil_bin_p_values_and_vt_img = new_img_like( + fmri_img, + dil_bin_p_values_and_vt.astype(np.int)) +# Visualization goes here without 'L', 'R' annotation and coordinates being the +# same +plot_roi(dil_bin_p_values_and_vt_img, mean_img, + title='Dilated mask', cut_coords=cut_coords, + annotate=False) +############################################################################# +# Finally, we end with splitting the connected ROIs to two hemispheres into two +# separate regions (ROIs). The function `scipy.ndimage.label` from the scipy +# Python library. + +############################################################################## +# **Identification of connected components** - The function +# :func:`scipy.ndimage.label` from the scipy Python library identifies +# immediately neighboring voxels in our voxels mask. It assigns a separate +# integer label to each one of them. +labels, n_labels = ndimage.label(dil_bin_p_values_and_vt) +# we take first roi data with labels assigned as integer 1 +first_roi_data = (labels == 5).astype(np.int) +# Similarly, second roi data is assigned as integer 2 +second_roi_data = (labels == 3).astype(np.int) +# Visualizing the connected components +# First, we create a Nifti image type from first roi data in a array +first_roi_img = new_img_like(fmri_img, first_roi_data) +# Then, visualize the same created Nifti image in first argument and mean of +# functional images as background (second argument), cut_coords is default now +# and coordinates are selected automatically pointed exactly on the roi data +plot_roi(first_roi_img, mean_img, title='Connected components: first ROI') +# we do the same for second roi data +second_roi_img = new_img_like(fmri_img, second_roi_data) +# Visualization goes here with second roi image and cut_coords are default with +# coordinates selected automatically pointed on the data +plot_roi(second_roi_img, mean_img, title='Connected components: second ROI') + + +############################################################################## +# Use the new ROIs, to extract data maps in both ROIs + +# We extract data from ROIs using nilearn's NiftiLabelsMasker +from nilearn.input_data import NiftiLabelsMasker + +# Before data extraction, we convert an array labels to Nifti like image. All +# inputs to NiftiLabelsMasker must be Nifti-like images or filename to Nifti +# images. We use the same reference image as used above in previous sections +labels_img = new_img_like(fmri_img, labels) +# First, initialize masker with parameters suited for data extraction using +# labels as input image, resampling_target is None as affine, shape/size is same +# for all the data used here, time series signal processing parameters +# standardize and detrend are set to False +masker = NiftiLabelsMasker(labels_img, resampling_target=None, + standardize=False, detrend=False) +# After initialization of masker object, we call fit() for preparing labels_img +# data according to given parameters +masker.fit() +# Preparing for data extraction: setting number of conditions, size, etc from +# haxby dataset +condition_names = haxby_labels.unique() +n_cond_img = fmri_data[..., haxby_labels == 'house'].shape[-1] +n_conds = len(condition_names) + +X1, X2 = np.zeros((n_cond_img, n_conds)), np.zeros((n_cond_img, n_conds)) +# Gathering data for each condition and then use transformer from masker +# object transform() on each data. The transformer extracts data in condition +# maps where the target regions are specified by labels images +for i, cond in enumerate(condition_names): + cond_maps = new_img_like( + fmri_img, fmri_data[..., haxby_labels == cond][..., :n_cond_img]) + mask_data = masker.transform(cond_maps) + X1[:, i], X2[:, i] = mask_data[:, 0], mask_data[:, 1] +condition_names[np.where(condition_names == 'scrambledpix')] = 'scrambled' + +############################################################################## +# save the ROI 'atlas' to a Nifti file +new_img_like(fmri_img, labels).to_filename('mask_atlas.nii.gz') + +############################################################################## +# Plot the average in the different condition names +import matplotlib.pyplot as plt + +plt.figure(figsize=(15, 7)) +for i in np.arange(2): + plt.subplot(1, 2, i + 1) + plt.boxplot(X1 if i == 0 else X2) + plt.xticks(np.arange(len(condition_names)) + 1, condition_names, + rotation=25) + plt.title('Boxplots of data in ROI%i per condition' % (i + 1)) + +show() diff --git a/examples/manipulating_visualizing/plot_smooth_mean_image.py b/examples/04_manipulating_images/plot_smooth_mean_image.py similarity index 100% rename from examples/manipulating_visualizing/plot_smooth_mean_image.py rename to examples/04_manipulating_images/plot_smooth_mean_image.py diff --git a/examples/05_advanced/README.txt b/examples/05_advanced/README.txt new file mode 100644 index 0000000000..4d7571adc7 --- /dev/null +++ b/examples/05_advanced/README.txt @@ -0,0 +1,2 @@ +Advanced statistical analysis of brain images +--------------------------------------------- diff --git a/examples/manipulating_visualizing/plot_haxby_mass_univariate.py b/examples/05_advanced/plot_haxby_mass_univariate.py similarity index 80% rename from examples/manipulating_visualizing/plot_haxby_mass_univariate.py rename to examples/05_advanced/plot_haxby_mass_univariate.py index 0deee4d027..047648ed4e 100644 --- a/examples/manipulating_visualizing/plot_haxby_mass_univariate.py +++ b/examples/05_advanced/plot_haxby_mass_univariate.py @@ -32,7 +32,7 @@ ############################################################################## # Load Haxby dataset from nilearn import datasets -haxby_dataset = datasets.fetch_haxby_simple() +haxby_dataset = datasets.fetch_haxby(subjects=[2]) # print basic information on the dataset print('Mask nifti image (3D) is located at: %s' % haxby_dataset.mask) @@ -43,6 +43,7 @@ mask_filename = haxby_dataset.mask from nilearn.input_data import NiftiMasker nifti_masker = NiftiMasker( + smoothing_fwhm=8, mask_img=mask_filename, memory='nilearn_cache', memory_level=1) # cache options func_filename = haxby_dataset.func[0] @@ -51,10 +52,15 @@ ############################################################################## # Restrict to faces and houses import numpy as np -conditions_encoded, sessions = np.loadtxt( - haxby_dataset.session_target[0]).astype("int").T -conditions = np.recfromtxt(haxby_dataset.conditions_target[0])['f0'] -condition_mask = np.logical_or(conditions == b'face', conditions == b'house') +import pandas as pd +labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ") +conditions = labels['labels'] +categories = conditions.unique() +conditions_encoded = np.zeros_like(conditions) +for c, category in enumerate(categories): + conditions_encoded[conditions == category] = c +sessions = labels['chunks'] +condition_mask = conditions.isin(['face', 'house']) conditions_encoded = conditions_encoded[condition_mask] fmri_masked = fmri_masked[condition_mask] @@ -69,9 +75,9 @@ for s in range(n_sessions): session_mask = sessions[condition_mask] == s session_house_mask = np.logical_and(session_mask, - conditions[condition_mask] == b'house') + conditions[condition_mask] == 'house') session_face_mask = np.logical_and(session_mask, - conditions[condition_mask] == b'face') + conditions[condition_mask] == 'face') grouped_fmri_masked[2 * s] = fmri_masked[session_house_mask].mean(0) grouped_fmri_masked[2 * s + 1] = fmri_masked[session_face_mask].mean(0) grouped_conditions_encoded[2 * s] = conditions_encoded[ @@ -98,7 +104,7 @@ # scikit-learn F-scores for comparison # # F-test does not allow to observe the effect sign (pure two-sided test) -from nilearn._utils.fixes import f_regression +from sklearn.feature_selection import f_regression _, pvals_bonferroni = f_regression( grouped_fmri_masked, grouped_conditions_encoded) # f_regression implicitly adds intercept @@ -118,32 +124,19 @@ from nilearn import image mean_fmri_img = image.mean_img(func_filename) -# Various plotting parameters -z_slice = -17 # plotted slice -from nilearn.image.resampling import coord_transform -affine = signed_neg_log_pvals_unmasked.get_affine() -from scipy import linalg -_, _, k_slice = coord_transform(0, 0, z_slice, - linalg.inv(affine)) -k_slice = np.round(k_slice) - threshold = -np.log10(0.1) # 10% corrected vmax = min(signed_neg_log_pvals.max(), neg_log_pvals_bonferroni.max()) # Plot thresholded p-values map corresponding to F-scores -fig = plt.figure(figsize=(4, 5.5), facecolor='k') - display = plot_stat_map(neg_log_pvals_bonferroni_unmasked, mean_fmri_img, threshold=threshold, cmap=plt.cm.RdBu_r, - display_mode='z', cut_coords=[z_slice], - figure=fig, vmax=vmax) + display_mode='z', cut_coords=[-1, ], + vmax=vmax) neg_log_pvals_bonferroni_data = neg_log_pvals_bonferroni_unmasked.get_data() -neg_log_pvals_bonferroni_slice_data = \ - neg_log_pvals_bonferroni_data[..., k_slice] -n_detections = (neg_log_pvals_bonferroni_slice_data > threshold).sum() +n_detections = (neg_log_pvals_bonferroni_data > threshold).sum() title = ('Negative $\log_{10}$ p-values' '\n(Parametric two-sided F-test' '\n+ Bonferroni correction)' @@ -152,17 +145,12 @@ display.title(title, y=1.1) # Plot permutation p-values map -fig = plt.figure(figsize=(4, 5.5), facecolor='k') - display = plot_stat_map(signed_neg_log_pvals_unmasked, mean_fmri_img, threshold=threshold, cmap=plt.cm.RdBu_r, - display_mode='z', cut_coords=[z_slice], - figure=fig, vmax=vmax) + display_mode='z', cut_coords=[-1, ], + vmax=vmax) -signed_neg_log_pvals_data = signed_neg_log_pvals_unmasked.get_data() -signed_neg_log_pvals_slice_data = \ - signed_neg_log_pvals_data[..., k_slice, 0] -n_detections = (np.abs(signed_neg_log_pvals_slice_data) > threshold).sum() +n_detections = (np.abs(signed_neg_log_pvals) > threshold).sum() title = ('Negative $\log_{10}$ p-values' '\n(Non-parametric two-sided test' '\n+ max-type correction)' diff --git a/examples/05_advanced/plot_ica_neurovault.py b/examples/05_advanced/plot_ica_neurovault.py new file mode 100644 index 0000000000..6a50605ab6 --- /dev/null +++ b/examples/05_advanced/plot_ica_neurovault.py @@ -0,0 +1,144 @@ +""" +NeuroVault cross-study ICA maps. +================================ + +This example shows how to download statistical maps from +NeuroVault, label them with NeuroSynth terms, +and compute ICA components across all the maps. + +See :func:`nilearn.datasets.fetch_neurovault` +documentation for more details. + +""" +# Author: Ben Cipollini +# License: BSD +# Ported from code authored by Chris Filo Gorgolewski, Gael Varoquaux +# https://github.com/NeuroVault/neurovault_analysis +import warnings + +import numpy as np +from scipy import stats +from sklearn.decomposition import FastICA + +from nilearn.datasets import fetch_neurovault +from nilearn.image import smooth_img + +from nilearn.datasets import load_mni152_brain_mask +from nilearn.input_data import NiftiMasker + +from nilearn import plotting + + +###################################################################### +# Get image and term data +# ----------------------- + +# Download images +# Here by default we only download 80 images to save time, +# but for better results I recommend using at least 200. +print("Fetching Neurovault images; " + "if you haven't downloaded any Neurovault data before " + "this will take several minutes.") +nv_data = fetch_neurovault(max_images=80, fetch_neurosynth_words=True) + +images = nv_data['images'] +term_weights = nv_data['word_frequencies'] +vocabulary = nv_data['vocabulary'] + +# Clean and report term scores +term_weights[term_weights < 0] = 0 +total_scores = np.mean(term_weights, axis=0) + +print("\nTop 10 neurosynth terms from downloaded images:\n") + +for term_idx in np.argsort(total_scores)[-10:][::-1]: + print(vocabulary[term_idx]) + + +###################################################################### +# Reshape and mask images +# ----------------------- + +print("\nReshaping and masking images.\n") + +with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + warnings.simplefilter('ignore', DeprecationWarning) + + mask_img = load_mni152_brain_mask() + masker = NiftiMasker( + mask_img=mask_img, memory='nilearn_cache', memory_level=1) + masker = masker.fit() + + # Images may fail to be transformed, and are of different shapes, + # so we need to transform one-by-one and keep track of failures. + X = [] + is_usable = np.ones((len(images),), dtype=bool) + + for index, image_path in enumerate(images): + # load image and remove nan and inf values. + # applying smooth_img to an image with fwhm=None simply cleans up + # non-finite values but otherwise doesn't modify the image. + image = smooth_img(image_path, fwhm=None) + try: + X.append(masker.transform(image)) + except Exception as e: + meta = nv_data['images_meta'][index] + print("Failed to mask/reshape image: id: {0}; " + "name: '{1}'; collection: {2}; error: {3}".format( + meta.get('id'), meta.get('name'), + meta.get('collection_id'), e)) + is_usable[index] = False + +# Now reshape list into 2D matrix, and remove failed images from terms +X = np.vstack(X) +term_weights = term_weights[is_usable, :] + + +###################################################################### +# Run ICA and map components to terms +# ----------------------------------- + +print("Running ICA; may take time...") +# We use a very small number of components as we have downloaded only 80 +# images. For better results, increase the number of images downloaded +# and the number of components +n_components = 16 +fast_ica = FastICA(n_components=n_components, random_state=0) +ica_maps = fast_ica.fit_transform(X.T).T + +term_weights_for_components = np.dot(fast_ica.components_, term_weights) +print('Done, plotting results.') + + +###################################################################### +# Generate figures +# ---------------- + +with warnings.catch_warnings(): + warnings.simplefilter('ignore', DeprecationWarning) + + for index, (ic_map, ic_terms) in enumerate( + zip(ica_maps, term_weights_for_components)): + if -ic_map.min() > ic_map.max(): + # Flip the map's sign for prettiness + ic_map = - ic_map + ic_terms = - ic_terms + + ic_threshold = stats.scoreatpercentile(np.abs(ic_map), 90) + ic_img = masker.inverse_transform(ic_map) + important_terms = vocabulary[np.argsort(ic_terms)[-3:]] + title = 'IC%i %s' % (index, ', '.join(important_terms[::-1])) + + plotting.plot_stat_map( + ic_img, threshold=ic_threshold, colorbar=False, + title=title) + + +###################################################################### +# As we can see, some of the components capture cognitive or neurological +# maps, while other capture noise in the database. More data, better +# filtering, and better cognitive labels would give better maps + +# Done. +plotting.show() diff --git a/examples/connectivity/plot_ica_resting_state.py b/examples/05_advanced/plot_ica_resting_state.py similarity index 71% rename from examples/connectivity/plot_ica_resting_state.py rename to examples/05_advanced/plot_ica_resting_state.py index 69a994df73..ef54643e87 100644 --- a/examples/connectivity/plot_ica_resting_state.py +++ b/examples/05_advanced/plot_ica_resting_state.py @@ -9,7 +9,7 @@ This example is a toy. To apply ICA to resting-state data, it is advised to look at the example -:ref:`sphx_glr_auto_examples_connectivity_plot_canica_resting_state.py`. +:ref:`sphx_glr_auto_examples_03_connectivity_plot_canica_resting_state.py`. The example here applies the scikit-learn ICA to resting-state data. Note that following the code in the example, any unsupervised @@ -20,28 +20,29 @@ """ -### Load nyu_rest dataset ##################################################### +##################################################################### +# Load ADHD dataset from nilearn import datasets # Here we use only 3 subjects to get faster-running code. For better # results, simply increase this number # XXX: must get the code to run for more than 1 subject -nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) -func_filename = nyu_dataset.func[0] +dataset = datasets.fetch_adhd(n_subjects=1) +func_filename = dataset.func[0] # print basic information on the dataset -print('First subject anatomical nifti image (3D) is at: %s' % - nyu_dataset.anat_anon[0]) print('First subject functional nifti image (4D) is at: %s' % - nyu_dataset.func[0]) # 4D data + dataset.func[0]) # 4D data -### Preprocess ################################################################ + +##################################################################### +# Preprocess from nilearn.input_data import NiftiMasker # This is resting-state data: the background has not been removed yet, # thus we need to use mask_strategy='epi' to compute the mask from the # EPI images masker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1, - mask_strategy='epi', standardize=False) + mask_strategy='epi', standardize=True) data_masked = masker.fit_transform(func_filename) # Concatenate all the subjects @@ -49,10 +50,11 @@ fmri_data = data_masked -### Apply ICA ################################################################# +##################################################################### +# Apply ICA from sklearn.decomposition import FastICA -n_components = 20 +n_components = 10 ica = FastICA(n_components=n_components, random_state=42) components_masked = ica.fit_transform(data_masked.T).T @@ -60,13 +62,16 @@ components_masked -= components_masked.mean(axis=0) components_masked /= components_masked.std(axis=0) # Threshold -components_masked[components_masked < .8] = 0 +import numpy as np +components_masked[np.abs(components_masked) < .8] = 0 # Now invert the masking operation, going back to a full 3D # representation component_img = masker.inverse_transform(components_masked) -### Visualize the results ##################################################### +##################################################################### +# Visualize the results + # Show some interesting components from nilearn import image from nilearn.plotting import plot_stat_map, show @@ -74,8 +79,8 @@ # Use the mean as a background mean_img = image.mean_img(func_filename) -plot_stat_map(image.index_img(component_img, 5), mean_img) +plot_stat_map(image.index_img(component_img, 0), mean_img) -plot_stat_map(image.index_img(component_img, 12), mean_img) +plot_stat_map(image.index_img(component_img, 1), mean_img) show() diff --git a/examples/manipulating_visualizing/plot_localizer_mass_univariate_methods.py b/examples/05_advanced/plot_localizer_mass_univariate_methods.py similarity index 73% rename from examples/manipulating_visualizing/plot_localizer_mass_univariate_methods.py rename to examples/05_advanced/plot_localizer_mass_univariate_methods.py index 51631af342..9c8f4d5e2a 100644 --- a/examples/manipulating_visualizing/plot_localizer_mass_univariate_methods.py +++ b/examples/05_advanced/plot_localizer_mass_univariate_methods.py @@ -18,13 +18,13 @@ """ # Author: Virgile Fritsch, , May. 2014 import numpy as np -from scipy import linalg import matplotlib.pyplot as plt from nilearn import datasets from nilearn.input_data import NiftiMasker from nilearn.mass_univariate import permuted_ols -### Load Localizer contrast ################################################### +############################################################################## +# Load Localizer contrast n_samples = 94 localizer_dataset = datasets.fetch_localizer_contrasts( ['left button press (auditory cue)'], n_subjects=n_samples) @@ -42,14 +42,18 @@ tested_var = tested_var[mask_quality_check].astype(float).reshape((-1, 1)) print("Actual number of subjects after quality check: %d" % n_samples) -### Mask data ################################################################# + +############################################################################## +# Mask data nifti_masker = NiftiMasker( smoothing_fwhm=5, memory='nilearn_cache', memory_level=1) # cache options fmri_masked = nifti_masker.fit_transform(contrast_map_filenames) -### Anova (parametric F-scores) ############################################### -from nilearn._utils.fixes import f_regression + +############################################################################## +# Anova (parametric F-scores) +from sklearn.feature_selection import f_regression _, pvals_anova = f_regression(fmri_masked, tested_var, center=True) pvals_anova *= fmri_masked.shape[1] pvals_anova[np.isnan(pvals_anova)] = 1 @@ -58,7 +62,9 @@ neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform( neg_log_pvals_anova) -### Perform massively univariate analysis with permuted OLS ################### + +############################################################################## +# Perform massively univariate analysis with permuted OLS neg_log_pvals_permuted_ols, _, _ = permuted_ols( tested_var, fmri_masked, model_intercept=True, @@ -67,16 +73,13 @@ neg_log_pvals_permuted_ols_unmasked = nifti_masker.inverse_transform( np.ravel(neg_log_pvals_permuted_ols)) -### Visualization ############################################################# + +############################################################################## +# Visualization from nilearn.plotting import plot_stat_map, show # Various plotting parameters z_slice = 12 # plotted slice -from nilearn.image.resampling import coord_transform -affine = neg_log_pvals_anova_unmasked.get_affine() -_, _, k_slice = coord_transform(0, 0, z_slice, - linalg.inv(affine)) -k_slice = np.round(k_slice) threshold = - np.log10(0.1) # 10% corrected vmax = min(np.amax(neg_log_pvals_permuted_ols), @@ -86,14 +89,11 @@ fig = plt.figure(figsize=(5, 7), facecolor='k') display = plot_stat_map(neg_log_pvals_anova_unmasked, - threshold=threshold, cmap=plt.cm.autumn, + threshold=threshold, display_mode='z', cut_coords=[z_slice], figure=fig, vmax=vmax, black_bg=True) -neg_log_pvals_anova_data = neg_log_pvals_anova_unmasked.get_data() -neg_log_pvals_anova_slice_data = \ - neg_log_pvals_anova_data[..., k_slice] -n_detections = (neg_log_pvals_anova_slice_data > threshold).sum() +n_detections = (neg_log_pvals_anova_unmasked.get_data() > threshold).sum() title = ('Negative $\log_{10}$ p-values' '\n(Parametric + Bonferroni correction)' '\n%d detections') % n_detections @@ -104,15 +104,12 @@ fig = plt.figure(figsize=(5, 7), facecolor='k') display = plot_stat_map(neg_log_pvals_permuted_ols_unmasked, - threshold=threshold, cmap=plt.cm.autumn, + threshold=threshold, display_mode='z', cut_coords=[z_slice], figure=fig, vmax=vmax, black_bg=True) -neg_log_pvals_permuted_ols_data = \ - neg_log_pvals_permuted_ols_unmasked.get_data() -neg_log_pvals_permuted_ols_slice_data = \ - neg_log_pvals_permuted_ols_data[..., k_slice] -n_detections = (neg_log_pvals_permuted_ols_slice_data > threshold).sum() +n_detections = (neg_log_pvals_permuted_ols_unmasked.get_data() + > threshold).sum() title = ('Negative $\log_{10}$ p-values' '\n(Non-parametric + max-type correction)' '\n%d detections') % n_detections diff --git a/examples/plot_localizer_simple_analysis.py b/examples/05_advanced/plot_localizer_simple_analysis.py similarity index 76% rename from examples/plot_localizer_simple_analysis.py rename to examples/05_advanced/plot_localizer_simple_analysis.py index d7f3b45054..12b1483440 100644 --- a/examples/plot_localizer_simple_analysis.py +++ b/examples/05_advanced/plot_localizer_simple_analysis.py @@ -15,26 +15,31 @@ """ # Author: Virgile Fritsch, , May. 2014 import numpy as np -from scipy import linalg import matplotlib.pyplot as plt from nilearn import datasets from nilearn.input_data import NiftiMasker -### Load Localizer contrast ################################################### + +############################################################################ +# Load Localizer contrast n_samples = 20 localizer_dataset = datasets.fetch_localizer_calculation_task( n_subjects=n_samples) tested_var = np.ones((n_samples, 1)) -### Mask data ################################################################# + +############################################################################ +# Mask data nifti_masker = NiftiMasker( smoothing_fwhm=5, memory='nilearn_cache', memory_level=1) # cache options cmap_filenames = localizer_dataset.cmaps fmri_masked = nifti_masker.fit_transform(cmap_filenames) -### Anova (parametric F-scores) ############################################### -from nilearn._utils.fixes import f_regression + +############################################################################ +# Anova (parametric F-scores) +from sklearn.feature_selection import f_regression _, pvals_anova = f_regression(fmri_masked, tested_var, center=False) # do not remove intercept pvals_anova *= fmri_masked.shape[1] @@ -44,17 +49,13 @@ neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform( neg_log_pvals_anova) -### Visualization ############################################################# +############################################################################ +# Visualization from nilearn.plotting import plot_stat_map, show # Various plotting parameters z_slice = 45 # plotted slice -from nilearn.image.resampling import coord_transform -affine = neg_log_pvals_anova_unmasked.get_affine() -_, _, k_slice = coord_transform(0, 0, z_slice, - linalg.inv(affine)) -k_slice = np.round(k_slice) threshold = - np.log10(0.1) # 10% corrected # Plot Anova p-values @@ -69,7 +70,7 @@ title = ('Negative $\log_{10}$ p-values' '\n(Parametric + Bonferroni correction)' - '\n%d detections' % (~masked_pvals.mask[..., k_slice]).sum()) + '\n%d detections' % (~masked_pvals.mask).sum()) display.title(title, y=1.1, alpha=0.8) diff --git a/examples/05_advanced/plot_neurovault_meta_analysis.py b/examples/05_advanced/plot_neurovault_meta_analysis.py new file mode 100644 index 0000000000..399ad77f3a --- /dev/null +++ b/examples/05_advanced/plot_neurovault_meta_analysis.py @@ -0,0 +1,109 @@ +""" +NeuroVault meta-analysis of stop-go paradigm studies. +===================================================== + +This example shows how to download statistical maps from +NeuroVault + +See :func:`nilearn.datasets.fetch_neurovault_ids` +documentation for more details. + +""" +# Author: Ben Cipollini +# License: BSD +import scipy + +from nilearn.datasets import fetch_neurovault_ids +from nilearn import plotting +from nilearn.image import new_img_like, load_img, math_img + + +###################################################################### +# Fetch images for "successful stop minus go"-like protocols. +# ----------------------------------------------------------- + +# These are the images we are interested in, +# in order to save time we specify their ids explicitly. +stop_go_image_ids = (151, 3041, 3042, 2676, 2675, 2818, 2834) + +# These ids were determined by querying neurovault like this: + +# from nilearn.datasets import fetch_neurovault, neurovault + +# nv_data = fetch_neurovault( +# max_images=7, +# cognitive_paradigm_cogatlas=neurovault.Contains('stop signal'), +# contrast_definition=neurovault.Contains('succ', 'stop', 'go'), +# map_type='T map') + +# print([meta['id'] for meta in nv_data['images_meta']]) + + +nv_data = fetch_neurovault_ids(image_ids=stop_go_image_ids) + +images_meta = nv_data['images_meta'] +collections = nv_data['collections_meta'] + +###################################################################### +# Visualize the data +# ------------------ + +print('\nplotting glass brain for collected images\n') + +for im in images_meta: + plotting.plot_glass_brain( + im['absolute_path'], + title='image {0}: {1}'.format(im['id'], im['contrast_definition'])) + +###################################################################### +# Compute statistics +# ------------------ + + +def t_to_z(t_scores, deg_of_freedom): + p_values = scipy.stats.t.sf(t_scores, df=deg_of_freedom) + z_values = scipy.stats.norm.isf(p_values) + return z_values + + +# Compute z values +mean_maps = [] +z_imgs = [] +current_collection = None + +print("\nComputing maps...") + + +# convert t to z for all images +for this_meta in images_meta: + if this_meta['collection_id'] != current_collection: + print("\n\nCollection {0}:".format(this_meta['id'])) + current_collection = this_meta['collection_id'] + + # Load and validate the downloaded image. + t_img = load_img(this_meta['absolute_path']) + deg_of_freedom = this_meta['number_of_subjects'] - 2 + print(" Image {1}: degrees of freedom: {2}".format( + "", this_meta['id'], deg_of_freedom)) + + # Convert data, create new image. + z_img = new_img_like( + t_img, t_to_z(t_img.get_data(), deg_of_freedom=deg_of_freedom)) + + z_imgs.append(z_img) + + +###################################################################### +# Plot the combined z maps +# ------------------------ + +cut_coords = [-15, -8, 6, 30, 46, 62] +meta_analysis_img = math_img( + 'np.sum(z_imgs, axis=3) / np.sqrt(z_imgs.shape[3])', + z_imgs=z_imgs) + +plotting.plot_stat_map(meta_analysis_img, display_mode='z', threshold=6, + cut_coords=cut_coords, vmax=12) + + +plotting.show() diff --git a/examples/README.txt b/examples/README.txt index 1a8a5155fb..7b4fd97c3c 100644 --- a/examples/README.txt +++ b/examples/README.txt @@ -1,3 +1,6 @@ +Nilearn usage examples +====================== + .. warning:: If you want to run the examples, make sure you execute them in a directory @@ -5,17 +8,12 @@ directory. If you install nilearn manually, make sure you have followed :ref:`the instructions `. -.. note:: - - A few examples may not run with scikit-learn versions older than - 0.14.1. - .. contents:: **Contents** :local: :depth: 1 -General examples ----------------- +Tutorial examples +------------------ -General-purpose and introductory examples for nilearn. +Introductory examples that teach how to use nilearn. diff --git a/examples/connectivity/plot_adhd_spheres.py b/examples/connectivity/plot_adhd_spheres.py deleted file mode 100644 index 334d73baa5..0000000000 --- a/examples/connectivity/plot_adhd_spheres.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -Extracting brain signal from spheres -==================================== - -This example extract brain signals from spheres described by the coordinates -of their center in MNI space and a given radius in millimeters. In particular, -this example extracts signals from Default Mode Network regions and compute a -connectome from them. - -""" - -########################################################################## -# Retrieve the dataset -from nilearn import datasets -adhd_dataset = datasets.fetch_adhd(n_subjects=1) - -# print basic information on the dataset -print('First subject functional nifti image (4D) is at: %s' % - adhd_dataset.func[0]) # 4D data - - -########################################################################## -# Coordinates of Default Mode Network -dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (0, 50, -5)] -labels = [ - 'Posterior Cingulate Cortex', - 'Left Temporoparietal junction', - 'Right Temporoparietal junction', - 'Medial prefrontal cortex' -] - - -########################################################################## -# Extracts signal from sphere around DMN seeds -from nilearn import input_data - -masker = input_data.NiftiSpheresMasker( - dmn_coords, radius=8, - detrend=True, standardize=True, - low_pass=0.1, high_pass=0.01, t_r=2.5, - memory='nilearn_cache', memory_level=1, verbose=2) - -func_filename = adhd_dataset.func[0] -confound_filename = adhd_dataset.confounds[0] - -time_series = masker.fit_transform(func_filename, - confounds=[confound_filename]) - -########################################################################## -# Display time series -import matplotlib.pyplot as plt -for time_serie, label in zip(time_series.T, labels): - plt.plot(time_serie, label=label) - -plt.title('Default Mode Network Time Series') -plt.xlabel('Scan number') -plt.ylabel('Normalized signal') -plt.legend() -plt.tight_layout() - - -########################################################################## -# Compute precision matrices -from sklearn.covariance import LedoitWolf -cve = LedoitWolf() -cve.fit(time_series) - - -########################################################################## -# Display connectome -from nilearn import plotting - -plotting.plot_connectome(cve.precision_, dmn_coords, - title="Default Mode Network Connectivity") -plotting.show() diff --git a/examples/connectivity/plot_connectivity_measures.py b/examples/connectivity/plot_connectivity_measures.py deleted file mode 100644 index cd7710d615..0000000000 --- a/examples/connectivity/plot_connectivity_measures.py +++ /dev/null @@ -1,99 +0,0 @@ -""" -Comparing different functional connectivity measures -==================================================== - -This example compares different measures of functional connectivity between -regions of interest : correlation, partial correlation, as well as a measure -called tangent. The resulting connectivity coefficients are used to -classify ADHD vs control subjects and the tangent measure outperforms the -standard measures. - -""" - -# Fetch dataset -import nilearn.datasets -atlas = nilearn.datasets.fetch_atlas_msdl() -dataset = nilearn.datasets.fetch_adhd(n_subjects=30) - - -###################################################################### -# Extract regions time series signals -import nilearn.input_data -masker = nilearn.input_data.NiftiMapsMasker( - atlas.maps, resampling_target="maps", detrend=True, - low_pass=None, high_pass=None, t_r=2.5, standardize=False, - memory='nilearn_cache', memory_level=1) -subjects = [] -sites = [] -adhds = [] -for func_file, phenotypic in zip(dataset.func, dataset.phenotypic): - # keep only 3 sites, to save computation time - if phenotypic['site'] in [b'"NYU"', b'"OHSU"', b'"NeuroImage"']: - time_series = masker.fit_transform(func_file) - subjects.append(time_series) - sites.append(phenotypic['site']) - adhds.append(phenotypic['adhd']) # ADHD/control label - - -###################################################################### -# Estimate connectivity -import nilearn.connectome -kinds = ['tangent', 'partial correlation', 'correlation'] -individual_connectivity_matrices = {} -mean_connectivity_matrix = {} -for kind in kinds: - conn_measure = nilearn.connectome.ConnectivityMeasure(kind=kind) - individual_connectivity_matrices[kind] = conn_measure.fit_transform( - subjects) - # Compute the mean connectivity - if kind == 'tangent': - mean_connectivity_matrix[kind] = conn_measure.mean_ - else: - mean_connectivity_matrix[kind] = \ - individual_connectivity_matrices[kind].mean(axis=0) - - -###################################################################### -# Plot the mean connectome -import numpy as np -import nilearn.plotting -labels = np.recfromcsv(atlas.labels) -region_coords = labels[['x', 'y', 'z']].tolist() -for kind in kinds: - nilearn.plotting.plot_connectome(mean_connectivity_matrix[kind], - region_coords, edge_threshold='98%', - title=kind) - - -###################################################################### -# Use the connectivity coefficients to classify ADHD vs controls -from sklearn.svm import LinearSVC -from sklearn.cross_validation import StratifiedKFold, cross_val_score -classes = ['{0}{1}'.format(site, adhd) for site, adhd in zip(sites, adhds)] -print('Classification accuracy:') -mean_scores = [] -cv = StratifiedKFold(classes, n_folds=3) -for kind in kinds: - svc = LinearSVC() - # Transform the connectivity matrices to 1D arrays - coonectivity_coefs = nilearn.connectome.sym_to_vec( - individual_connectivity_matrices[kind]) - cv_scores = cross_val_score(svc, coonectivity_coefs, - adhds, cv=cv, scoring='accuracy') - print('%20s score: %1.2f +- %1.2f' % (kind, cv_scores.mean(), - cv_scores.std())) - mean_scores.append(cv_scores.mean()) - - -###################################################################### -# Display the classification scores -import matplotlib.pyplot as plt -plt.figure(figsize=(6, 4)) -positions = np.arange(len(kinds)) * .1 + .1 -plt.barh(positions, mean_scores, align='center', height=.05) -yticks = [kind.replace(' ', '\n') for kind in kinds] -plt.yticks(positions, yticks) -plt.xlabel('Classification accuracy') -plt.grid(True) -plt.tight_layout() -plt.show() diff --git a/examples/connectivity/plot_extract_regions_canica_maps.py b/examples/connectivity/plot_extract_regions_canica_maps.py deleted file mode 100644 index 023131c453..0000000000 --- a/examples/connectivity/plot_extract_regions_canica_maps.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Regions extraction using Canonical ICA maps and functional connectomes -====================================================================== - -This example shows how to use :class:`nilearn.regions.RegionExtractor` -to extract connected brain regions from whole brain ICA maps and -use them to estimate a connectome. - -We used 20 resting state ADHD functional datasets from :func:`nilearn.datasets.fetch_adhd` -and :class:`nilearn.decomposition.CanICA` for whole brain ICA maps. - -Please see the related documentation of :class:`nilearn.regions.RegionExtractor` -for more details. -""" - -################################################################################ -# Fetching ADHD resting state functional datasets by loading from datasets -# utilities -from nilearn import datasets - -adhd_dataset = datasets.fetch_adhd(n_subjects=20) -func_filenames = adhd_dataset.func -confounds = adhd_dataset.confounds - -################################################################################ -# Canonical ICA decomposition of functional datasets by importing CanICA from -# decomposition module -from nilearn.decomposition import CanICA - -# Initialize canica parameters -canica = CanICA(n_components=5, smoothing_fwhm=6., - memory="nilearn_cache", memory_level=2, - random_state=0) -# Fit to the data -canica.fit(func_filenames) -# ICA maps -components_img = canica.masker_.inverse_transform(canica.components_) - -# Visualization -# Show ICA maps by using plotting utilities -from nilearn import plotting - -plotting.plot_prob_atlas(components_img, view_type='filled_contours', - title='ICA components') - -################################################################################ -# Extracting regions from ICA maps and then timeseries signals from those -# regions, both can be done by importing Region Extractor from regions module. -# threshold=0.5 indicates that we keep nominal of amount nonzero voxels across all -# maps, less the threshold means that more intense non-voxels will be survived. -from nilearn.regions import RegionExtractor - -extractor = RegionExtractor(components_img, threshold=0.5, - thresholding_strategy='ratio_n_voxels', - extractor='local_regions', - standardize=True, min_region_size=1350) -# Just call fit() to process for regions extraction -extractor.fit() -# Extracted regions are stored in regions_img_ -regions_extracted_img = extractor.regions_img_ -# Each region index is stored in index_ -regions_index = extractor.index_ -# Total number of regions extracted -n_regions_extracted = regions_extracted_img.shape[-1] - -# Visualization -# Show region extraction results -title = ('%d regions are extracted from %d ICA components.' - '\nEach separate color of region indicates extracted region' - % (n_regions_extracted, 5)) -plotting.plot_prob_atlas(regions_extracted_img, view_type='filled_contours', - title=title) - -################################################################################ -# Computing correlation coefficients -# First we need to do subjects timeseries signals extraction and then estimating -# correlation matrices on those signals. -# To extract timeseries signals, we call transform() from RegionExtractor object -# onto each subject functional data stored in func_filenames. -# To estimate correlation matrices we import connectome utilities from nilearn -from nilearn.connectome import ConnectivityMeasure - -correlations = [] -# Initializing ConnectivityMeasure object with kind='correlation' -connectome_measure = ConnectivityMeasure(kind='correlation') -for filename, confound in zip(func_filenames, confounds): - # call transform from RegionExtractor object to extract timeseries signals - timeseries_each_subject = extractor.transform(filename, confounds=confound) - # call fit_transform from ConnectivityMeasure object - correlation = connectome_measure.fit_transform([timeseries_each_subject]) - # saving each subject correlation to correlations - correlations.append(correlation) - -# Mean of all correlations -import numpy as np - -mean_correlations = np.mean(correlations, axis=0).reshape(n_regions_extracted, - n_regions_extracted) - -# Visualization -# Showing mean correlation results -# Import image utilities in utilising to operate on 4th dimension -import matplotlib.pyplot as plt -from nilearn import image - -regions_imgs = image.iter_img(regions_extracted_img) -coords_connectome = [plotting.find_xyz_cut_coords(img) for img in regions_imgs] -title = 'Correlation interactions between %d regions' % n_regions_extracted -plt.figure() -plt.imshow(mean_correlations, interpolation="nearest", - vmax=1, vmin=-1, cmap=plt.cm.bwr) -plt.colorbar() -plt.title(title) -plotting.plot_connectome(mean_correlations, coords_connectome, - edge_threshold='90%', title=title) - -################################################################################ -# Showing Default Mode Network (DMN) regions before and after region extraction -# by manually identifying the index of DMN in ICA decomposed components -from nilearn._utils.compat import izip - -# First we plot DMN without region extraction, interested in only index=[3] -img = image.index_img(components_img, 3) -coords = plotting.find_xyz_cut_coords(img) -display = plotting.plot_stat_map(img, cut_coords=((0, -52, 29)), - colorbar=False, title='ICA map: DMN mode') - -# Now, we plot DMN after region extraction to show that connected regions are -# nicely separated. Each brain extracted region is indicated with separate color - -# For this, we take the indices of the all regions extracted related to original -# ICA map 3. -regions_indices_of_map3 = np.where(np.array(regions_index) == 3) - -display = plotting.plot_anat(cut_coords=((0, -52, 29)), title='Extracted regions in DMN mode') - -# Now add as an overlay by looping over all the regions for right -# temporoparietal function, posterior cingulate cortex, medial prefrontal -# cortex, left temporoparietal junction -color_list = [[0., 1., 0.29, 1.], [0., 1., 0.54, 1.], - [0., 1., 0.78, 1.], [0., 0.96, 1., 1.], - [0., 0.73, 1., 1.], [0., 0.47, 1., 1.], - [0., 0.22, 1., 1.], [0.01, 0., 1., 1.], - [0.26, 0., 1., 1.]] -for each_index_of_map3, color in izip(regions_indices_of_map3[0], color_list): - display.add_overlay(image.index_img(regions_extracted_img, each_index_of_map3), - cmap=plotting.cm.alpha_cmap(color)) - -plotting.show() diff --git a/examples/connectivity/plot_rest_clustering.py b/examples/connectivity/plot_rest_clustering.py deleted file mode 100644 index 675787874d..0000000000 --- a/examples/connectivity/plot_rest_clustering.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Ward clustering to learn a brain parcellation from rest fMRI -==================================================================== - -We use spatially-constrained Ward-clustering to create a set of -parcels. These parcels are particularly interesting for creating a -'compressed' representation of the data, replacing the data in the fMRI -images by mean on the parcellation. - -This parcellation may be useful in a supervised learning, see for -instance: `A supervised clustering approach for fMRI-based inference of -brain states `_, Michel et al, -Pattern Recognition 2011. - -""" - -### Load nyu_rest dataset ##################################################### - -import numpy as np -from nilearn import datasets -from nilearn import input_data -from nilearn.plotting import plot_roi, plot_epi, show -nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) - -# print basic information on the dataset -print('First subject anatomical nifti image (3D) is at: %s' % - nyu_dataset.anat_anon[0]) -print('First subject functional nifti image (4D) is at: %s' % - nyu_dataset.func[0]) # 4D data - -# This is resting-state data: the background has not been removed yet, -# thus we need to use mask_strategy='epi' to compute the mask from the -# EPI images -nifti_masker = input_data.NiftiMasker(memory='nilearn_cache', - mask_strategy='epi', memory_level=1, - standardize=False) -func_filename = nyu_dataset.func[0] -fmri_masked = nifti_masker.fit_transform(func_filename) -mask = nifti_masker.mask_img_.get_data().astype(np.bool) - -### Ward ###################################################################### - -# Compute connectivity matrix: which voxel is connected to which -from sklearn.feature_extraction import image -shape = mask.shape -connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1], - n_z=shape[2], mask=mask) - -# Computing the ward for the first time, this is long... -from sklearn.cluster import FeatureAgglomeration -# If you have scikit-learn older than 0.14, you need to import -# WardAgglomeration instead of FeatureAgglomeration -import time -start = time.time() -ward = FeatureAgglomeration(n_clusters=1000, connectivity=connectivity, - linkage='ward', memory='nilearn_cache') -ward.fit(fmri_masked) -print("Ward agglomeration 1000 clusters: %.2fs" % (time.time() - start)) - -# Compute the ward with more clusters, should be faster as we are using -# the caching mechanism -start = time.time() -ward = FeatureAgglomeration(n_clusters=2000, connectivity=connectivity, - linkage='ward', memory='nilearn_cache') -ward.fit(fmri_masked) -print("Ward agglomeration 2000 clusters: %.2fs" % (time.time() - start)) - -### Show result ############################################################### - -# Unmask data -# Avoid 0 label -labels = ward.labels_ + 1 -labels_img = nifti_masker.inverse_transform(labels) - -from nilearn.image import mean_img -mean_func_img = mean_img(func_filename) - -# common cut coordinates for all plots - -first_plot = plot_roi(labels_img, mean_func_img, title="Ward parcellation", - display_mode='xz') -# labels_img is a Nifti1Image object, it can be saved to file with the -# following code: -labels_img.to_filename('parcellation.nii') - - -# Display the original data -plot_epi(nifti_masker.inverse_transform(fmri_masked[0]), - cut_coords=first_plot.cut_coords, - title='Original (%i voxels)' % fmri_masked.shape[1], - display_mode='xz') - -# A reduced data can be create by taking the parcel-level average: -# Note that, as many objects in the scikit-learn, the ward object exposes -# a transform method that modifies input features. Here it reduces their -# dimension -fmri_reduced = ward.transform(fmri_masked) - -# Display the corresponding data compressed using the parcellation -fmri_compressed = ward.inverse_transform(fmri_reduced) -compressed_img = nifti_masker.inverse_transform(fmri_compressed[0]) - -plot_epi(compressed_img, cut_coords=first_plot.cut_coords, - title='Compressed representation (2000 parcels)', - display_mode='xz') - -show() diff --git a/examples/manipulating_visualizing/README.txt b/examples/manipulating_visualizing/README.txt deleted file mode 100644 index bea909c2b4..0000000000 --- a/examples/manipulating_visualizing/README.txt +++ /dev/null @@ -1,4 +0,0 @@ -Manipulating images and visualization -------------------------------------- - -See :ref:`plotting` and :ref:`data_manipulation` for more details. diff --git a/examples/manipulating_visualizing/plot_demo_glass_brain.py b/examples/manipulating_visualizing/plot_demo_glass_brain.py deleted file mode 100644 index 1784935073..0000000000 --- a/examples/manipulating_visualizing/plot_demo_glass_brain.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Glass brain plotting in nilearn -=============================== - -See :ref:`plotting` for more plotting functionalities. -""" - - -############################################################################### -# Retrieve the data -from nilearn import datasets - -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_tmaps=True) -localizer_tmap_filename = localizer_dataset.tmaps[1] - -############################################################################### -# demo glass brain plotting -from nilearn import plotting - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=3) - -plotting.plot_glass_brain( - localizer_tmap_filename, title='plot_glass_brain', - black_bg=True, display_mode='xz', threshold=3) - -plotting.show() diff --git a/examples/manipulating_visualizing/plot_demo_glass_brain_extensive.py b/examples/manipulating_visualizing/plot_demo_glass_brain_extensive.py deleted file mode 100644 index 51881ccb80..0000000000 --- a/examples/manipulating_visualizing/plot_demo_glass_brain_extensive.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -Glass brain plotting in nilearn (all options) -============================================= - -This example goes through different options of the :func:`nilearn.plotting.plot_glass_brain` function -(including plotting negative values). -See :ref:`plotting` for more plotting functionalities. -""" - - -############################################################################### -# Retrieve the data -from nilearn import datasets - -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_tmaps=True) -localizer_tmap_filename = localizer_dataset.tmaps[1] - -############################################################################### -# demo glass brain plotting -from nilearn import plotting - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=3) - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=3, colorbar=True) - -plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain', - black_bg=True, display_mode='xz', threshold=3) - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=0, colorbar=True, - plot_abs=False) - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=3, - colorbar=True, plot_abs=False) - -plotting.show() diff --git a/examples/manipulating_visualizing/plot_demo_more_plotting.py b/examples/manipulating_visualizing/plot_demo_more_plotting.py deleted file mode 100644 index 84f005d2d5..0000000000 --- a/examples/manipulating_visualizing/plot_demo_more_plotting.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -More nilearn plotting -===================== - -See :ref:`plotting` for more details. -""" - -# The imports from nilearn plotting and image processing -from nilearn import plotting, image - -############################################################################### -# Retrieve the data: haxby dataset to have EPI images and masks, and -# localizer dataset to have contrast maps - -from nilearn import datasets -haxby_dataset = datasets.fetch_haxby(n_subjects=1) -haxby_anat_filename = haxby_dataset.anat[0] -haxby_mask_filename = haxby_dataset.mask_vt[0] -haxby_func_filename = haxby_dataset.func[0] - -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_anats=True) -localizer_anat_filename = localizer_dataset.anats[1] -localizer_cmap_filename = localizer_dataset.cmaps[1] - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='ortho', - cut_coords=(36, -27, 60), - title="display_mode='ortho', cut_coords=(36, -27, 60)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='z', cut_coords=5, - title="display_mode='z', cut_coords=5") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='x', - cut_coords=(-36, 36), - title="display_mode='x', cut_coords=(-36, 36)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='y', cut_coords=1, - title="display_mode='x', cut_coords=(-36, 36)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='z', - cut_coords=1, colorbar=False, - title="display_mode='z', cut_coords=1, colorbar=False") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='xz', - cut_coords=(36, 60), - title="display_mode='xz', cut_coords=(36, 60)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='yx', - cut_coords=(-27, 36), - title="display_mode='yx', cut_coords=(-27, 36)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='yz', - cut_coords=(-27, 60), - title="display_mode='yz', cut_coords=(-27, 60)") - -############################################################################### -# demo display objects with add_* methods -mean_haxby_img = image.mean_img(haxby_func_filename) - -# Plot T1 outline on top of the mean EPI (useful for checking coregistration) -display = plotting.plot_anat(mean_haxby_img, title="add_edges") -display.add_edges(haxby_anat_filename) - -######################################## -# Plotting outline of the mask on top of the EPI -display = plotting.plot_anat(mean_haxby_img, title="add_contours", - cut_coords=(28, -34, -22)) -display.add_contours(haxby_mask_filename, levels=[0.5], colors='r') - -############################################################################### -# demo saving plots to file - -plotting.plot_stat_map(localizer_cmap_filename, - title='Using plot_stat_map output_file', - output_file='plot_stat_map.png') - -######################################## -display = plotting.plot_stat_map(localizer_cmap_filename, - title='Using display savefig') -display.savefig('plot_stat_map_from_display.png') -# In non-interactive settings make sure you close your displays -display.close() - -plotting.show() diff --git a/examples/manipulating_visualizing/plot_demo_plotting.py b/examples/manipulating_visualizing/plot_demo_plotting.py deleted file mode 100644 index c92d168c3d..0000000000 --- a/examples/manipulating_visualizing/plot_demo_plotting.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -Plotting in nilearn -========================== - -Nilearn comes with a set of plotting function for Nifti-like images, -see :ref:`plotting` for more details. -""" - -# Import plotting and image processing tools -from nilearn import plotting, image - -############################################################################### -# Retrieve the data: haxby dataset to have EPI images and masks, and -# localizer dataset to have contrast maps - -from nilearn import datasets -haxby_dataset = datasets.fetch_haxby(n_subjects=1) - -# print basic information on the dataset -print('First subject anatomical nifti image (3D) is at: %s' % - haxby_dataset.anat[0]) -print('First subject functional nifti image (4D) is at: %s' % - haxby_dataset.func[0]) # 4D data - -haxby_anat_filename = haxby_dataset.anat[0] -haxby_mask_filename = haxby_dataset.mask_vt[0] -haxby_func_filename = haxby_dataset.func[0] - -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_anats=True, - get_tmaps=True) -localizer_anat_filename = localizer_dataset.anats[1] -localizer_tmap_filename = localizer_dataset.tmaps[1] - -############################################################################### -# Plotting statistical maps -plotting.plot_stat_map(localizer_tmap_filename, bg_img=localizer_anat_filename, - threshold=3, title="plot_stat_map", - cut_coords=(36, -27, 66)) - -############################################################################### -# Plotting glass brain -plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain', - threshold=3) - -############################################################################### -# Plotting anatomical maps -plotting.plot_anat(haxby_anat_filename, title="plot_anat") - -############################################################################### -# Plotting ROIs (here the mask) -plotting.plot_roi(haxby_mask_filename, bg_img=haxby_anat_filename, - title="plot_roi") - -############################################################################### -# Plotting EPI haxby -mean_haxby_img = image.mean_img(haxby_func_filename) -plotting.plot_epi(mean_haxby_img, title="plot_epi") - -plotting.show() diff --git a/examples/manipulating_visualizing/plot_mask_computation.py b/examples/manipulating_visualizing/plot_mask_computation.py deleted file mode 100644 index afd2f019e7..0000000000 --- a/examples/manipulating_visualizing/plot_mask_computation.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -Understanding NiftiMasker and mask computation -================================================== - -In this example, the Nifti masker is used to automatically compute a mask. - -For data that has already been masked, the default strategy works out of -the box. - -However, for raw EPI, as in resting-state time series, we need to use the -'epi' strategy of the NiftiMasker. - -In addition, we show here how to tweak the different parameters of the -underlying mask extraction routine -:func:`nilearn.masking.compute_epi_mask`. - -""" - -import numpy as np - -import nibabel -from nilearn import datasets - - -############################################################################### -# From already masked data -from nilearn.input_data import NiftiMasker -import nilearn.image as image -from nilearn.plotting import plot_roi, show - -# Load Miyawaki dataset -miyawaki_dataset = datasets.fetch_miyawaki2008() - -# print basic information on the dataset -print('First functional nifti image (4D) is located at: %s' % - miyawaki_dataset.func[0]) # 4D data - -miyawaki_filename = miyawaki_dataset.func[0] -miyawaki_mean_img = image.mean_img(miyawaki_filename) - -# This time, we can use the NiftiMasker without changing the default mask -# strategy, as the data has already been masked, and thus lies on a -# homogeneous background - -masker = NiftiMasker() -masker.fit(miyawaki_filename) - -plot_roi(masker.mask_img_, miyawaki_mean_img, - title="Mask from already masked data") - - -############################################################################### -# From raw EPI data - -# Load NYU resting-state dataset -nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) -nyu_filename = nyu_dataset.func[0] -nyu_img = nibabel.load(nyu_filename) - -# Restrict nyu to 100 frames to speed up computation -from nilearn.image import index_img -nyu_img = index_img(nyu_img, slice(0, 100)) - -# To display the background -nyu_mean_img = image.mean_img(nyu_img) - - -# Simple mask extraction from EPI images -# We need to specify an 'epi' mask_strategy, as this is raw EPI data -masker = NiftiMasker(mask_strategy='epi') -masker.fit(nyu_img) -plot_roi(masker.mask_img_, nyu_mean_img, title='EPI automatic mask') - -# Generate mask with strong opening -masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10)) -masker.fit(nyu_img) -plot_roi(masker.mask_img_, nyu_mean_img, title='EPI Mask with strong opening') - -# Generate mask with a high lower cutoff -masker = NiftiMasker(mask_strategy='epi', - mask_args=dict(upper_cutoff=.9, lower_cutoff=.8, - opening=False)) -masker.fit(nyu_img) -plot_roi(masker.mask_img_, nyu_mean_img, - title='EPI Mask: high lower_cutoff') - -############################################################################### -# Extract time series - -# trended vs detrended -trended = NiftiMasker(mask_strategy='epi') -detrended = NiftiMasker(mask_strategy='epi', detrend=True) -trended_data = trended.fit_transform(nyu_img) -detrended_data = detrended.fit_transform(nyu_img) - -print("Trended: mean %.2f, std %.2f" % - (np.mean(trended_data), np.std(trended_data))) -print("Detrended: mean %.2f, std %.2f" % - (np.mean(detrended_data), np.std(detrended_data))) - -show() diff --git a/examples/manipulating_visualizing/plot_overlay.py b/examples/manipulating_visualizing/plot_overlay.py deleted file mode 100644 index 2701d91391..0000000000 --- a/examples/manipulating_visualizing/plot_overlay.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Visualizing a probablistic atlas: the default mode in the MSDL atlas -===================================================================== - -Visualizing a probablistic atlas requires visualizing the different -maps that compose it. - -Here we represent the nodes constituting the default mode network in the -`MSDL atlas -`_. - -The tools that we need to leverage are: - - * :func:`nilearn.image.index_img` to retrieve the various maps composing - the atlas - - * Adding overlays on an existing brain display, to plot each of these - maps - -""" - -from nilearn import datasets, plotting, image - -atlas_data = datasets.fetch_atlas_msdl() -atlas_filename = atlas_data.maps - -# First plot the map for the PCC: index 4 in the atlas -display = plotting.plot_stat_map(image.index_img(atlas_filename, 4), - colorbar=False, - title="DMN nodes in MSDL atlas") - -# Now add as an overlay the maps for the ACC and the left and right -# parietal nodes -display.add_overlay(image.index_img(atlas_filename, 5), - cmap=plotting.cm.black_blue) -display.add_overlay(image.index_img(atlas_filename, 6), - cmap=plotting.cm.black_green) -display.add_overlay(image.index_img(atlas_filename, 3), - cmap=plotting.cm.black_pink) - -plotting.show() diff --git a/examples/manipulating_visualizing/plot_roi_extraction.py b/examples/manipulating_visualizing/plot_roi_extraction.py deleted file mode 100644 index d0068bbff8..0000000000 --- a/examples/manipulating_visualizing/plot_roi_extraction.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -Computing an ROI mask -======================= - -Example showing how a T-test can be performed to compute an ROI -mask, and how simple operations can improve the quality of the mask -obtained. -""" - -############################################################################## -# Coordinates of the slice we will be displaying - -coronal = -24 -sagittal = -33 -axial = -17 -cut_coords = (coronal, sagittal, axial) - -############################################################################## -# Load the data - -# Fetch the data files from Internet -from nilearn import datasets -from nilearn.image import new_img_like - - -haxby_dataset = datasets.fetch_haxby(n_subjects=1) - -# print basic information on the dataset -print('First subject anatomical nifti image (3D) located is at: %s' % - haxby_dataset.anat[0]) -print('First subject functional nifti image (4D) is located at: %s' % - haxby_dataset.func[0]) - -# Second, load the labels -import numpy as np - -session_target = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ") -haxby_labels = session_target['labels'] - -import matplotlib.pyplot as plt -from nilearn.input_data import NiftiLabelsMasker - -############################################################################## -# Build a statistical test to find voxels of interest - -# Smooth the data -from nilearn import image -fmri_filename = haxby_dataset.func[0] -fmri_img = image.smooth_img(fmri_filename, fwhm=6) - -# Plot the mean image -from nilearn.plotting import plot_epi -mean_img = image.mean_img(fmri_img) -plot_epi(mean_img, title='Smoothed mean EPI', cut_coords=cut_coords) - -############################################################################## -# Run a T-test for face and houses -from scipy import stats -fmri_data = fmri_img.get_data() -_, p_values = stats.ttest_ind(fmri_data[..., haxby_labels == b'face'], - fmri_data[..., haxby_labels == b'house'], - axis=-1) - -# Use a log scale for p-values -log_p_values = -np.log10(p_values) -log_p_values[np.isnan(log_p_values)] = 0. -log_p_values[log_p_values > 10.] = 10. -from nilearn.plotting import plot_stat_map -plot_stat_map(new_img_like(fmri_img, log_p_values), - mean_img, title="p-values", cut_coords=cut_coords) - -############################################################################## -# Build a mask from this statistical map - -# Thresholding -log_p_values[log_p_values < 5] = 0 -plot_stat_map(new_img_like(fmri_img, log_p_values), - mean_img, title='Thresholded p-values', annotate=False, - colorbar=False, cut_coords=cut_coords) - -############################################################################## -# Binarization and intersection with VT mask -# (intersection corresponds to an "AND conjunction") -bin_p_values = (log_p_values != 0) -mask_vt_filename = haxby_dataset.mask_vt[0] -import nibabel -vt = nibabel.load(mask_vt_filename).get_data().astype(bool) -bin_p_values_and_vt = np.logical_and(bin_p_values, vt) - -from nilearn.plotting import plot_roi, show -plot_roi(new_img_like(fmri_img, bin_p_values_and_vt.astype(np.int)), - mean_img, title='Intersection with ventral temporal mask', - cut_coords=cut_coords) - -############################################################################## -# Dilation -from scipy import ndimage -dil_bin_p_values_and_vt = ndimage.binary_dilation(bin_p_values_and_vt) -plot_roi(new_img_like(fmri_img, dil_bin_p_values_and_vt.astype(np.int)), - mean_img, title='Dilated mask', cut_coords=cut_coords, - annotate=False) - - -############################################################################## -# Identification of connected components -labels, n_labels = ndimage.label(dil_bin_p_values_and_vt) -first_roi_data = (labels == 1).astype(np.int) -second_roi_data = (labels == 2).astype(np.int) -plot_roi(new_img_like(fmri_img, first_roi_data), - mean_img, title='Connected components: first ROI') - -plot_roi(new_img_like(fmri_img, second_roi_data), - mean_img, title='Connected components: second ROI') - - -############################################################################## -# Use the new ROIs to extract data maps in both ROIs -masker = NiftiLabelsMasker( - labels_img=new_img_like(fmri_img, labels), - resampling_target=None, - standardize=False, - detrend=False) -masker.fit() -condition_names = list(set(haxby_labels)) -n_cond_img = fmri_data[..., haxby_labels == b'house'].shape[-1] -n_conds = len(condition_names) - -X1, X2 = np.zeros((n_cond_img, n_conds)), np.zeros((n_cond_img, n_conds)) -for i, cond in enumerate(condition_names): - cond_maps = new_img_like( - fmri_img, fmri_data[..., haxby_labels == cond][..., :n_cond_img]) - mask_data = masker.transform(cond_maps) - X1[:, i], X2[:, i] = mask_data[:, 0], mask_data[:, 1] -condition_names[condition_names.index(b'scrambledpix')] = b'scrambled' - - -############################################################################## -# Plot the average in the different condition names -plt.figure(figsize=(15, 7)) -for i in np.arange(2): - plt.subplot(1, 2, i + 1) - plt.boxplot(X1 if i == 0 else X2) - plt.xticks(np.arange(len(condition_names)) + 1, condition_names, - rotation=25) - plt.title('Boxplots of data in ROI%i per condition' % (i + 1)) - -show() - -# save the ROI 'atlas' to a single output Nifti -nibabel.save(new_img_like(fmri_img, labels), - 'mask_atlas.nii') diff --git a/examples/plot_3d_and_4d_niimg.py b/examples/plot_3d_and_4d_niimg.py new file mode 100644 index 0000000000..9b6a724c79 --- /dev/null +++ b/examples/plot_3d_and_4d_niimg.py @@ -0,0 +1,107 @@ +""" +3D and 4D niimgs: handling and visualizing +========================================== + +Here we discover how to work with 3D and 4D niimgs. +""" + +############################################################################### +# Downloading tutorial datasets from Internet +# -------------------------------------------- +# +# Nilearn comes with functions that download public data from Internet +# +# Let's first check where the data is downloaded on our disk: +from nilearn import datasets +print('Datasets are stored in: %r' % datasets.get_data_dirs()) + +############################################################################### +# Let's now retrieve a motor contrast from a localizer experiment +tmap_filenames = datasets.fetch_localizer_button_task()['tmaps'] +print(tmap_filenames) + +############################################################################### +# tmap_filenames is a list of filenames. We need to take the first one +tmap_filename = tmap_filenames[0] + + +############################################################################### +# Visualizing a 3D file +# ---------------------- +# +# The file contains a 3D volume, we can easily visualize it as a +# statistical map: +from nilearn import plotting +plotting.plot_stat_map(tmap_filename) + +############################################################################### +# Visualizing works better with a threshold +plotting.plot_stat_map(tmap_filename, threshold=3) + + +############################################################################### +# Visualizing one volume in a 4D file +# ----------------------------------- +# +# We can download resting-state networks from the Smith 2009 study on +# correspondance between rest and task +rsn = datasets.fetch_atlas_smith_2009()['rsn10'] +print(rsn) + +############################################################################### +# It is a 4D nifti file. We load it into the memory to print its +# shape. +from nilearn import image +print(image.load_img(rsn).shape) + +############################################################################### +# We can retrieve the first volume (note that Python indexing starts at 0): +first_rsn = image.index_img(rsn, 0) +print(first_rsn.shape) + +############################################################################### +# first_rsn is a 3D image. +# +# We can then plot it +plotting.plot_stat_map(first_rsn) + + +############################################################################### +# Looping on all volumes in a 4D file +# ----------------------------------- +# +# If we want to plot all the volumes in this 4D file, we can use iter_img +# to loop on them. +# +# Then we give a few arguments to plot_stat_map in order to have a more +# compact display. +for img in image.iter_img(rsn): + # img is now an in-memory 3D img + plotting.plot_stat_map(img, threshold=3, display_mode="z", cut_coords=1, + colorbar=False) + + +############################################################################### +# plotting.show is useful to force the display of figures when running +# outside IPython +plotting.show() + +######################################################################### +# | +# +# ______ +# +# To recap, neuroimaging images (niimgs as we call them) come in +# different flavors: +# +# * 3D images, containing only one brain volume +# * 4D images, containing multiple brain volumes. +# +# More details about the input formats in nilearn for 3D and 4D images is +# given in the documentation section: :ref:`loading_data`. +# +# Functions accept either 3D or 4D images, and we need to use on the one +# hand :func:`nilearn.image.index_img` or :func:`nilearn.image.iter_img` +# to break down 4D images into 3D images, and on the other hand +# :func:`nilearn.image.concat_imgs` to group a list of 3D images into a 4D +# image. diff --git a/examples/plot_decoding_tutorial.py b/examples/plot_decoding_tutorial.py new file mode 100644 index 0000000000..997b37d7d2 --- /dev/null +++ b/examples/plot_decoding_tutorial.py @@ -0,0 +1,289 @@ +""" +A introduction tutorial to fMRI decoding +========================================== + +Here is a simple tutorial on decoding with nilearn. It reproduces the +Haxby 2001 study on a face vs cat discrimination task in a mask of the +ventral stream. + +This tutorial is meant as an introduction to the various steps of a +decoding analysis. + +It is not a minimalistic example, as it strives to be didactic. It is not +meant to be copied to analyze new data: many of the steps are unecessary. + +.. contents:: **Contents** + :local: + :depth: 1 + + +""" + +########################################################################### +# Retrieve and load the fMRI data from the Haxby study +# ----------------------------------------------------- +# +# First download the data +# ....................... +# +# The :func:`nilearn.datasets.fetch_haxby` function will download the +# Haxby dataset if not present on the disk, in the nilearn data directory. +# It can take a while to download about 310 Mo of data from the Internet. +from nilearn import datasets +# By default 2nd subject will be fetched +haxby_dataset = datasets.fetch_haxby() +# 'func' is a list of filenames: one for each subject +fmri_filename = haxby_dataset.func[0] + +# print basic information on the dataset +print('First subject functional nifti images (4D) are at: %s' % + fmri_filename) # 4D data + +########################################################################### +# Convert the fMRI volume's to a data matrix +# .......................................... +# +# We will use the :class:`nilearn.input_data.NiftiMasker` to extract the +# fMRI data on a mask and convert it to data series. +# +# The mask is a mask of the Ventral Temporal streaming coming from the +# Haxby study: +mask_filename = haxby_dataset.mask_vt[0] + +# Let's visualize it, using the subject's anatomical image as a +# background +from nilearn import plotting +plotting.plot_roi(mask_filename, bg_img=haxby_dataset.anat[0], + cmap='Paired') + +########################################################################### +# Now we use the NiftiMasker. +# +# We first create a masker, giving it the options that we care +# about. Here we use standardizing of the data, as it is often important +# for decoding +from nilearn.input_data import NiftiMasker +masker = NiftiMasker(mask_img=mask_filename, standardize=True) + +# We give the masker a filename and retrieve a 2D array ready +# for machine learning with scikit-learn +fmri_masked = masker.fit_transform(fmri_filename) + +########################################################################### +# The variable "fmri_masked" is a numpy array: +print(fmri_masked) + +########################################################################### +# Its shape corresponds to the number of time-points times the number of +# voxels in the mask +print(fmri_masked.shape) + +########################################################################### +# Load the behavioral labels +# .......................... +# +# The behavioral labels are stored in a CSV file, separated by spaces. +# +# We use pandas to load them in an array. +import pandas as pd +# Load behavioral information +behavioral = pd.read_csv(haxby_dataset.session_target[0], sep=" ") +print(behavioral) + +########################################################################### +# Retrieve the experimental conditions, that we are going to use as +# prediction targets in the decoding +conditions = behavioral['labels'] +print(conditions) + +########################################################################### +# Restrict the analysis to cats and faces +# ........................................ +# +# As we can see from the targets above, the experiment contains many +# conditions, not all that interest us for decoding. +# +# To keep only data corresponding to faces or cats, we create a +# mask of the samples belonging to the condition. +condition_mask = conditions.isin(['face', 'cat']) + +# We apply this mask in the sampe direction to restrict the +# classification to the face vs cat discrimination +fmri_masked = fmri_masked[condition_mask] + +########################################################################### +# We now have less samples +print(fmri_masked.shape) + +########################################################################### +# We apply the same mask to the targets +conditions = conditions[condition_mask] +print(conditions.shape) + + +########################################################################### +# Decoding with an SVM +# ---------------------- +# +# We will now use the `scikit-learn `_ +# machine-learning toolbox on the fmri_masked data. +# +# As a decoder, we use a Support Vector Classification, with a linear +# kernel. +# +# We first create it: +from sklearn.svm import SVC +svc = SVC(kernel='linear') +print(svc) + +########################################################################### +# The svc object is an object that can be fit (or trained) on data with +# labels, and then predict labels on data without. +# +# We first fit it on the data +svc.fit(fmri_masked, conditions) + +########################################################################### +# We can then predict the labels from the data +prediction = svc.predict(fmri_masked) +print(prediction) + +########################################################################### +# Let's measure the error rate: +print((prediction == conditions).sum() / float(len(conditions))) + +########################################################################### +# This error rate is meaningless. Why? + +########################################################################### +# Measuring prediction scores using cross-validation +# --------------------------------------------------- +# +# The proper way to measure error rates or prediction accuracy is via +# cross-validation: leaving out some data and testing on it. +# +# Manually leaving out data +# .......................... +# +# Let's leave out the 30 last data points during training, and test the +# prediction on these 30 last points: +svc.fit(fmri_masked[:-30], conditions[:-30]) + +prediction = svc.predict(fmri_masked[-30:]) +print((prediction == conditions[-30:]).sum() / float(len(conditions[-30:]))) + + +########################################################################### +# Implementing a KFold loop +# ......................... +# +# We can split the data in train and test set repetitively in a `KFold` +# strategy: +from sklearn.model_selection import KFold + +cv = KFold(n_splits=5) + +# The "cv" object's split method can now accept data and create a +# generator which can yield the splits. +for train, test in cv.split(X=fmri_masked): + conditions_masked = conditions.values[train] + svc.fit(fmri_masked[train], conditions_masked) + prediction = svc.predict(fmri_masked[test]) + print((prediction == conditions.values[test]).sum() + / float(len(conditions.values[test]))) + +########################################################################### +# Cross-validation with scikit-learn +# ................................... +# +# Scikit-learn has tools to perform cross-validation easier: +from sklearn.model_selection import cross_val_score +cv_score = cross_val_score(svc, fmri_masked, conditions) +print(cv_score) + +########################################################################### +# Note that we can speed things up to use all the CPUs of our computer +# with the n_jobs parameter. + +########################################################################### +# The best way to do cross-validation is to respect the structure of +# the experiment, for instance by leaving out full sessions of +# acquisition. +# +# The number of the session is stored in the CSV file giving the +# behavioral data. We have to apply our session mask, to select only cats +# and faces. +session_label = behavioral['chunks'][condition_mask] + +# By default, cross_val_score uses a 3-fold KFold. We can control this by +# passing the "cv" object, here a 5-fold: +cv_score = cross_val_score(svc, fmri_masked, conditions, cv=cv) +print(cv_score) + +# To leave a session out, pass it to the groups parameter of cross_val_score. +from sklearn.model_selection import LeaveOneGroupOut +cv = LeaveOneGroupOut() +cv_score = cross_val_score(svc, + fmri_masked, + conditions, + cv=cv, + groups=session_label, + ) +print(cv_score) + + +########################################################################### +# Inspecting the model weights +# ----------------------------- +# +# Finally, it may be useful to inspect and display the model weights. +# +# Turning the weights into a nifti image +# ....................................... +# +# We retrieve the SVC discriminating weights +coef_ = svc.coef_ +print(coef_) + +########################################################################### +# It's a numpy array +print(coef_.shape) + +########################################################################### +# We need to turn it back into a Nifti image, in essence, "inverting" +# what the NiftiMasker has done. +# +# For this, we can call inverse_transform on the NiftiMasker: +coef_img = masker.inverse_transform(coef_) +print(coef_img) + +########################################################################### +# coef_img is now a NiftiImage. +# +# We can save the coefficients as a nii.gz file: +coef_img.to_filename('haxby_svc_weights.nii.gz') + +########################################################################### +# Plotting the SVM weights +# ......................... +# +# We can plot the weights, using the subject's anatomical as a background +from nilearn.plotting import plot_stat_map, show + +plot_stat_map(coef_img, bg_img=haxby_dataset.anat[0], + title="SVM weights", display_mode="yx") + +show() + +########################################################################### +# Further reading +# ---------------- +# +# * The :ref:`section of the documentation on decoding ` +# +# * :ref:`sphx_glr_auto_examples_02_decoding_plot_haxby_anova_svm.py` +# For decoding without a precomputed mask +# +# * :ref:`space_net` +# +# ______________ diff --git a/examples/plot_haxby_simple.py b/examples/plot_haxby_simple.py deleted file mode 100644 index ee46d560ef..0000000000 --- a/examples/plot_haxby_simple.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Simple example of decoding: the Haxby data -============================================== - -Here is a simple example of decoding, reproducing the Haxby 2001 -study on a face vs cat discrimination task in a mask of the ventral -stream. -""" - -########################################################################### -# Retrieve and load the Haxby dataset - -from nilearn import datasets -haxby_dataset = datasets.fetch_haxby() - -# print basic information on the dataset -print('First subject anatomical nifti image (3D) is at: %s' % - haxby_dataset.anat[0]) -print('First subject functional nifti images (4D) are at: %s' % - haxby_dataset.func[0]) # 4D data - -# Load the behavioral labels -import numpy as np -# Load target information as string and give a numerical identifier to each -labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ") - -# scikit-learn >= 0.14 supports text labels. You can replace this line by: -# target = labels['labels'] -_, target = np.unique(labels['labels'], return_inverse=True) - -# Keep only data corresponding to faces or cats -condition_mask = np.logical_or(labels['labels'] == b'face', - labels['labels'] == b'cat') -target = target[condition_mask] - - -########################################################################### -# Prepare the data: apply the mask - -from nilearn.input_data import NiftiMasker -mask_filename = haxby_dataset.mask_vt[0] -# For decoding, standardizing is often very important -nifti_masker = NiftiMasker(mask_img=mask_filename, standardize=True) - -func_filename = haxby_dataset.func[0] -# We give the nifti_masker a filename and retrieve a 2D array ready -# for machine learning with scikit-learn -fmri_masked = nifti_masker.fit_transform(func_filename) - -# Restrict the classification to the face vs cat discrimination -fmri_masked = fmri_masked[condition_mask] - -########################################################################### -# The decoding - -# Here we use a Support Vector Classification, with a linear kernel -from sklearn.svm import SVC -svc = SVC(kernel='linear') - -# And we run it -svc.fit(fmri_masked, target) -prediction = svc.predict(fmri_masked) - -########################################################################### -# Compute prediction scores using cross-validation - -from sklearn.cross_validation import KFold - -cv = KFold(n=len(fmri_masked), n_folds=5) -cv_scores = [] - -for train, test in cv: - svc.fit(fmri_masked[train], target[train]) - prediction = svc.predict(fmri_masked[test]) - cv_scores.append(np.sum(prediction == target[test]) - / float(np.size(target[test]))) - -print(cv_scores) - -########################################################################### -# Retrieve the discriminating weights and save them - -# Retrieve the SVC discriminating weights -coef_ = svc.coef_ - -# Reverse masking thanks to the Nifti Masker -coef_img = nifti_masker.inverse_transform(coef_) - -# Save the coefficients as a Nifti image -coef_img.to_filename('haxby_svc_weights.nii') - -########################################################################### -# Visualize the discriminating weights over the mean EPI -from nilearn.image import mean_img -from nilearn.plotting import plot_roi, plot_stat_map, show - -mean_epi = mean_img(func_filename) -plot_stat_map(coef_img, mean_epi, title="SVM weights", display_mode="yx") - -########################################################################### -# Plot also the mask that was computed by the NiftiMasker -plot_roi(nifti_masker.mask_img_, mean_epi, title="Mask", display_mode="yx") - -show() diff --git a/examples/plot_nilearn_101.py b/examples/plot_nilearn_101.py index e423fe28c8..5d39263a29 100644 --- a/examples/plot_nilearn_101.py +++ b/examples/plot_nilearn_101.py @@ -1,55 +1,79 @@ """ -Basic nilearn example -===================== +Basic nilearn example: manipulating and looking at data +======================================================= A simple example showing how to load an existing Nifti file and use basic nilearn functionalities. """ -# Import the os module, for file manipulation -import os +# Let us use a Nifti file that is shipped with nilearn +from nilearn.datasets import MNI152_FILE_PATH + +# Note that the variable MNI152_FILE_PATH is just a path to a Nifti file +print('Path to MNI152 template: %r' % MNI152_FILE_PATH) ######################################################################### -# Let us use a Nifti file that is shipped with nilearn -from nilearn.datasets import data -anat_filename = os.path.join(os.path.dirname(data.__file__), - 'avg152T1_brain.nii.gz') -print('anat_filename: %s' % anat_filename) +# A first step: looking at our data +# ---------------------------------- +# +# Let's quickly plot this file: +from nilearn import plotting +plotting.plot_img(MNI152_FILE_PATH) ######################################################################### -# Using simple image nilearn functions -from nilearn import image -# functions containing 'img' can take either a filename or an image as input -smooth_anat_img = image.smooth_img(anat_filename, 3) +# This is not a very pretty plot. We just used the simplest possible +# code. There is a whole :ref:`section of the documentation ` +# on making prettier code. +# +# **Exercise**: Try plotting one of your own files. In the above, +# MNI152_FILE_PATH is nothing more than a string with a path pointing to +# a nifti image. You can replace it with a string pointing to a file on +# your disk. Note that it should be a 3D volume, and not a 4D volume. -# While we are giving a file name as input, the object that is returned -# is a 'nibabel' object. It has data, and an affine -anat_data = smooth_anat_img.get_data() -print('anat_data has shape: %s' % str(anat_data.shape)) -anat_affine = smooth_anat_img.get_affine() -print('anat_affineaffine:\n%s' % anat_affine) +######################################################################### +# Simple image manipulation: smoothing +# ------------------------------------- +# +# Let's use an image-smoothing function from nilearn: +# :func:`nilearn.image.smooth_img` +# +# Functions containing 'img' can take either a filename or an image as input. +# +# Here we give as inputs the image filename and the smoothing value in mm +from nilearn import image +smooth_anat_img = image.smooth_img(MNI152_FILE_PATH, fwhm=3) -# Finally, it can be passed to nilearn function -smooth_anat_img = image.smooth_img(smooth_anat_img, 3) +# While we are giving a file name as input, the function returns +# an in-memory object: +print(smooth_anat_img) ######################################################################### -# Visualization -from nilearn import plotting -cut_coords = (0, 0, 0) - -# Like all functions in nilearn, plotting can be given filenames -plotting.plot_anat(anat_filename, cut_coords=cut_coords, - title='Anatomy image') +# This is an in-memory object. We can pass it to nilearn function, for +# instance to look at it +plotting.plot_img(smooth_anat_img) -# Or nibabel objects -plotting.plot_anat(smooth_anat_img, - cut_coords=cut_coords, - title='Smoothed anatomy image') +######################################################################### +# We could also pass it to the smoothing function +more_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3) +plotting.plot_img(more_smooth_anat_img) ######################################################################### -# Saving image to file -smooth_anat_img.to_filename('smooth_anat_img.nii.gz') +# Saving results to a file +# ------------------------- +# +# We can save any in-memory object as follows: +more_smooth_anat_img.to_filename('more_smooth_anat_img.nii.gz') ######################################################################### -# Finally, showing plots when used inside a terminal +# Finally, calling plotting.show() is necessary to display the figure +# when running as a script outside IPython plotting.show() + +######################################################################### +# | +# +# ______ +# +# To recap, all the nilearn tools can take data as filenames or in-memory +# objects, and return brain volumes as in-memory objects. These can be +# passed on to other nilearn tools, or saved to disk. diff --git a/nilearn/__init__.py b/nilearn/__init__.py index a861e8511d..a830bf17e7 100644 --- a/nilearn/__init__.py +++ b/nilearn/__init__.py @@ -1,6 +1,6 @@ """ Machine Learning module for NeuroImaging in python -================================================== +-------------------------------------------------- Documentation is available in the docstrings and online at http://nilearn.github.io. @@ -28,28 +28,38 @@ estimated with OLS and permutation test plotting --- Plotting code for nilearn region --- Set of functions for extracting region-defined - signals + signals, clustering methods, connected regions extraction signal --- Set of preprocessing functions for time series """ import gzip - -# list all submodules available in nilearn -__all__ = ['datasets', 'decoding', 'decomposition', 'connectome', - 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', - 'region', 'signal'] +from distutils.version import LooseVersion from .version import _check_module_dependencies, __version__ _check_module_dependencies() +# Temporary work around to address formatting issues in doc tests +# with NumPy 1.14. NumPy had made more consistent str/repr formatting +# of numpy arrays. Hence we print the options to old versions. +import numpy as np +if LooseVersion(np.__version__) >= LooseVersion("1.14"): + # See issue #1600 in nilearn for reason to add try and except + try: + from ._utils.testing import is_nose_running + if is_nose_running(): + np.set_printoptions(legacy='1.13') + except ImportError: + pass + # Monkey-patch gzip to have faster reads on large gzip files if hasattr(gzip.GzipFile, 'max_read_chunk'): gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb # Boolean controlling the default globbing technique when using check_niimg -# Default value it True, set it to False to completely deactivate use of glob -# module +# and the os.path.expanduser usage in CacheMixin. +# Default value it True, set it to False to completely deactivate this +# behavior. EXPAND_PATH_WILDCARDS = True # Boolean controlling whether the joblib caches should be @@ -58,3 +68,8 @@ # structures # This is used in nilearn._utils.cache_mixin CHECK_CACHE_VERSION = True + +# list all submodules available in nilearn and version +__all__ = ['datasets', 'decoding', 'decomposition', 'connectome', + 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', + 'region', 'signal', 'surface', 'parcellations', '__version__'] diff --git a/nilearn/_utils/__init__.py b/nilearn/_utils/__init__.py index 5616b0f7cd..59aae3afc4 100644 --- a/nilearn/_utils/__init__.py +++ b/nilearn/_utils/__init__.py @@ -1,4 +1,3 @@ - from .niimg_conversions import (check_niimg, check_niimg_3d, concat_niimgs, check_niimg_4d) @@ -9,3 +8,7 @@ from .cache_mixin import CacheMixin from .logger import _compose_err_msg + +__all__ = ['check_niimg', 'check_niimg_3d', 'concat_niimgs', 'check_niimg_4d', + '_repr_niimgs', 'copy_img', 'load_niimg', + 'as_ndarray', 'CacheMixin', '_compose_err_msg'] diff --git a/nilearn/_utils/cache_mixin.py b/nilearn/_utils/cache_mixin.py index f75b47c7f9..de5b04e296 100644 --- a/nilearn/_utils/cache_mixin.py +++ b/nilearn/_utils/cache_mixin.py @@ -11,6 +11,8 @@ from distutils.version import LooseVersion import nibabel +import sklearn + from sklearn.externals.joblib import Memory MEMORY_CLASSES = (Memory, ) @@ -28,6 +30,61 @@ __CACHE_CHECKED = dict() +def _check_memory(memory, verbose=0): + """Function to ensure an instance of a joblib.Memory object. + + Parameters + ---------- + memory: None or instance of joblib.Memory or str + Used to cache the masking process. + If a str is given, it is the path to the caching directory. + + verbose : int, optional (default 0) + Verbosity level. + + Returns + ------- + instance of joblib.Memory. + """ + if memory is None: + memory = Memory(cachedir=None, verbose=verbose) + if isinstance(memory, _basestring): + cache_dir = memory + if nilearn.EXPAND_PATH_WILDCARDS: + cache_dir = os.path.expanduser(cache_dir) + + # Perform some verifications on given path. + split_cache_dir = os.path.split(cache_dir) + if (len(split_cache_dir) > 1 and + (not os.path.exists(split_cache_dir[0]) and + split_cache_dir[0] != '')): + if (not nilearn.EXPAND_PATH_WILDCARDS and + cache_dir.startswith("~")): + # Maybe the user want to enable expanded user path. + error_msg = ("Given cache path parent directory doesn't " + "exists, you gave '{0}'. Enabling " + "nilearn.EXPAND_PATH_WILDCARDS could solve " + "this issue.".format(split_cache_dir[0])) + elif memory.startswith("~"): + # Path built on top of expanded user path doesn't exist. + error_msg = ("Given cache path parent directory doesn't " + "exists, you gave '{0}' which was expanded " + "as '{1}' but doesn't exist either. Use " + "nilearn.EXPAND_PATH_WILDCARDS to deactivate " + "auto expand user path (~) behavior." + .format(split_cache_dir[0], + os.path.dirname(memory))) + else: + # The given cache base path doesn't exist. + error_msg = ("Given cache path parent directory doesn't " + "exists, you gave '{0}'." + .format(split_cache_dir[0])) + raise ValueError(error_msg) + + memory = Memory(cachedir=cache_dir, verbose=verbose) + return memory + + def _safe_cache(memory, func, **kwargs): """ A wrapper for mem.cache that flushes the cache if the version number of nibabel has changed. @@ -90,8 +147,18 @@ def _safe_cache(memory, func, **kwargs): return memory.cache(func, **kwargs) +class _ShelvedFunc(object): + """Work around for Python 2, for which pickle fails on instance method""" + def __init__(self, func): + self.func = func + self.func_name = func.__name__ + '_shelved' + + def __call__(self, *args, **kwargs): + return self.func.call_and_shelve(*args, **kwargs) + + def cache(func, memory, func_memory_level=None, memory_level=None, - **kwargs): + shelve=False, **kwargs): """ Return a joblib.Memory object. The memory_level determines the level above which the wrapped @@ -117,16 +184,20 @@ def cache(func, memory, func_memory_level=None, memory_level=None, be cached or not (if user_memory_level is equal of greater than func_memory_level the function is cached) + shelve: bool + Whether to return a joblib MemorizedResult, callable by a .get() + method, instead of the return value of func + kwargs: keyword arguments The keyword arguments passed to memory.cache Returns ------- - mem: joblib.MemorizedFunc - object that wraps the function func. This object may be - a no-op, if the requested level is lower than the value given - to _cache()). For consistency, a joblib.Memory object is always - returned. + mem: joblib.MemorizedFunc, wrapped in _ShelvedFunc if shelving + Object that wraps the function func to cache its further call. + This object may be a no-op, if the requested level is lower + than the value given to _cache()). + For consistency, a callable object is always returned. """ verbose = kwargs.get('verbose', 0) @@ -157,7 +228,10 @@ def cache(func, memory, func_memory_level=None, memory_level=None, stacklevel=2) else: memory = Memory(cachedir=None, verbose=verbose) - return _safe_cache(memory, func, **kwargs) + cached_func = _safe_cache(memory, func, **kwargs) + if shelve: + cached_func = _ShelvedFunc(cached_func) + return cached_func class CacheMixin(object): @@ -171,7 +245,7 @@ class CacheMixin(object): cache level (self._memory_level) is greater than the value given as a parameter to self._cache(). See _cache() documentation for details. """ - def _cache(self, func, func_memory_level=1, **kwargs): + def _cache(self, func, func_memory_level=1, shelve=False, **kwargs): """Return a joblib.Memory object. The memory_level determines the level above which the wrapped @@ -189,16 +263,18 @@ def _cache(self, func, func_memory_level=1, **kwargs): The memory_level from which caching must be enabled for the wrapped function. + shelve: bool + Whether to return a joblib MemorizedResult, callable by a .get() + method, instead of the return value of func + Returns ------- - mem: joblib.Memory - object that wraps the function func. This object may be - a no-op, if the requested level is lower than the value given - to _cache()). For consistency, a joblib.Memory object is always - returned. - + mem: joblib.MemorizedFunc, wrapped in _ShelvedFunc if shelving + Object that wraps the function func to cache its further call. + This object may be a no-op, if the requested level is lower + than the value given to _cache()). + For consistency, a callable object is always returned. """ - verbose = getattr(self, 'verbose', 0) # Creates attributes if they don't exist @@ -207,18 +283,16 @@ def _cache(self, func, func_memory_level=1, **kwargs): self.memory_level = 0 if not hasattr(self, "memory"): self.memory = Memory(cachedir=None, verbose=verbose) - if isinstance(self.memory, _basestring): - self.memory = Memory(cachedir=self.memory, verbose=verbose) + self.memory = _check_memory(self.memory, verbose=verbose) # If cache level is 0 but a memory object has been provided, set # memory_level to 1 with a warning. - if self.memory_level == 0: - if (isinstance(self.memory, _basestring) - or self.memory.cachedir is not None): - warnings.warn("memory_level is currently set to 0 but " - "a Memory object has been provided. " - "Setting memory_level to 1.") - self.memory_level = 1 + if self.memory_level == 0 and self.memory.cachedir is not None: + warnings.warn("memory_level is currently set to 0 but " + "a Memory object has been provided. " + "Setting memory_level to 1.") + self.memory_level = 1 return cache(func, self.memory, func_memory_level=func_memory_level, - memory_level=self.memory_level, **kwargs) + memory_level=self.memory_level, shelve=shelve, + **kwargs) diff --git a/nilearn/_utils/class_inspect.py b/nilearn/_utils/class_inspect.py index cf444b79aa..83132b5ebd 100644 --- a/nilearn/_utils/class_inspect.py +++ b/nilearn/_utils/class_inspect.py @@ -16,7 +16,7 @@ class 'cls' and returns the value for these parameters in object class), it is useful to forward parameters from one instance to another. Parameters - ========== + ---------- cls: class The class that gives us the list of parameters we are interested in @@ -28,7 +28,7 @@ class 'cls' and returns the value for these parameters in object Names of the parameters that are not returned. Returns - ======= + ------- params: dict The dict of parameters """ @@ -57,7 +57,7 @@ def enclosing_scope_name(ensure_estimator=True, stack_level=2): for debug print purpose. Parameters - ========== + ---------- ensure_estimator: boolean, default: True If true, find the enclosing object deriving from 'BaseEstimator' stack_level: integer, default 2 diff --git a/nilearn/_utils/compat.py b/nilearn/_utils/compat.py index 69f5baa1fa..d21be064d3 100644 --- a/nilearn/_utils/compat.py +++ b/nilearn/_utils/compat.py @@ -4,6 +4,10 @@ import sys import hashlib +from distutils.version import LooseVersion + +import nibabel + if sys.version_info[0] == 3: import pickle @@ -58,3 +62,4 @@ def md5_hash(string): m = hashlib.md5() m.update(string) return m.hexdigest() + diff --git a/nilearn/_utils/exceptions.py b/nilearn/_utils/exceptions.py index f9bb6fd3af..ab47a7b1f0 100644 --- a/nilearn/_utils/exceptions.py +++ b/nilearn/_utils/exceptions.py @@ -1,3 +1,10 @@ +try: + from numpy import VisibleDeprecationWarning +except ImportError: + class VisibleDeprecationWarning(UserWarning): + pass + + AuthorizedException = ( BufferError, ArithmeticError, @@ -49,18 +56,20 @@ def increment_stack_counter(self): @property def message(self): - message = ( - "Data must be a %iD Niimg-like object but you provided a " - "%s%iD image%s. " - "See http://nilearn.github.io/manipulating_visualizing/" - "manipulating_images.html#niimg." % ( - self.required_dimension + self.stack_counter, - "list of " * self.stack_counter, - self.file_dimension, - "s" * (self.stack_counter != 0) + return ("Input data has incompatible dimensionality: " + "Expected dimension is {0}D and you provided a " + "{1}{2}D image{3}{4}. " + "See http://nilearn.github.io/manipulating_images/" + "input_output.html." + .format(self.required_dimension + self.stack_counter, + "list of " * self.stack_counter, + self.file_dimension, + "s" * (self.stack_counter != 0), + (" (%iD)" % + (self.file_dimension + self.stack_counter)) * + (self.stack_counter > 0) + ) ) - ) - return message def __str__(self): return self.message diff --git a/nilearn/_utils/extmath.py b/nilearn/_utils/extmath.py index 72584740d3..6cbc8c6daf 100644 --- a/nilearn/_utils/extmath.py +++ b/nilearn/_utils/extmath.py @@ -6,30 +6,26 @@ import numpy as np -try: - # partition is available only in numpy >= 1.8.0 - from numpy import partition -except ImportError: - partition = None +from numpy import partition def fast_abs_percentile(data, percentile=80): """ A fast version of the percentile of the absolute value. Parameters - ========== + ---------- data: ndarray, possibly masked array The input data percentile: number between 0 and 100 The percentile that we are asking for Returns - ======= + ------- value: number The score at percentile Notes - ===== + ----- This is a faster, and less accurate version of scipy.stats.scoreatpercentile(np.abs(data), percentile) @@ -55,7 +51,7 @@ def is_spd(M, decimal=15, verbose=1): The check is performed by checking that all eigenvalues are positive. Parameters - ========== + ---------- M: numpy.ndarray symmetric positive definite matrix. @@ -63,7 +59,7 @@ def is_spd(M, decimal=15, verbose=1): verbosity level (0 means no message) Returns - ======= + ------- answer: boolean True if matrix is symmetric positive definite, False otherwise. """ diff --git a/nilearn/_utils/fixes/__init__.py b/nilearn/_utils/fixes/__init__.py deleted file mode 100644 index e76bdd404b..0000000000 --- a/nilearn/_utils/fixes/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from distutils.version import LooseVersion -import sklearn - -if (LooseVersion(sklearn.__version__) < LooseVersion('0.15') or - sklearn.__version__ == '0.15-git'): - from .sklearn_f_regression_nosparse import ( - f_regression_nosparse as f_regression) -else: - from sklearn.feature_selection import f_regression - -# atleast2d_or_csr -try: - from sklearn.utils import atleast2d_or_csr -except ImportError: - # Changed in 0.15 - from sklearn.utils import check_array as atleast2d_or_csr - -# roc_auc_score -try: - from sklearn.metrics import roc_auc_score -except ImportError: - from sklearn.metrics import auc as roc_auc_score diff --git a/nilearn/_utils/fixes/matplotlib_backports.py b/nilearn/_utils/fixes/matplotlib_backports.py deleted file mode 100644 index 6eacaa956d..0000000000 --- a/nilearn/_utils/fixes/matplotlib_backports.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Backports for matplotlib compatibility across versions""" - - -def cbar_outline_get_xy(cbar_outline): - """In the matplotlib versions >= 1.4.0, ColorbarBase.outline is a - Polygon(Patch) object instead of a Line2D(Line) object. This entails - different getters and setters. - - Change specifically after commit 48f594c2e2b05839ea394040b06196f39d9fbfba, - entitled - "changed colorbar outline from a Line2D object to a Polygon object" - from August 28th, 2013. - - This function unifies getters and setters of ColorbarBase outline xy - coordinates.""" - - if hasattr(cbar_outline, "get_xy"): - # loose version >= 1.4.x - return cbar_outline.get_xy() - else: - return cbar_outline.get_xydata() - - -def cbar_outline_set_xy(cbar_outline, xy): - """Setter for ColorbarBase.outline xy coordinates. - See cbar_outline_get_xy for more information. - """ - - if hasattr(cbar_outline, "set_xy"): - # loose version >= 1.4.x - return cbar_outline.set_xy(xy) - else: - cbar_outline.set_xdata(xy[:, 0]) - cbar_outline.set_ydata(xy[:, 1]) diff --git a/nilearn/_utils/fixes/sklearn_f_regression_nosparse.py b/nilearn/_utils/fixes/sklearn_f_regression_nosparse.py deleted file mode 100644 index 7a9b07dff1..0000000000 --- a/nilearn/_utils/fixes/sklearn_f_regression_nosparse.py +++ /dev/null @@ -1,59 +0,0 @@ -import numpy as np -from scipy import stats -from sklearn.utils import check_arrays -from sklearn.utils.extmath import norm - - -# f_regression with correct degrees of freedom when center=False -# available is sklearn version >= 0.15 -# This version does not support sparse matrices and is used to have tests -# passing for versions of sklearn < 0.12. -def f_regression_nosparse(X, y, center=True): - """Univariate linear regression tests - - Quick linear model for testing the effect of a single regressor, - sequentially for many regressors. - - This is done in 3 steps: - 1. the regressor of interest and the data are orthogonalized - with respect to constant regressors - 2. the cross correlation between data and regressors is computed - 3. it is converted to an F score then to a p-value - - Parameters - ---------- - X : {array-like, sparse matrix} shape = (n_samples, n_features) - The set of regressors that will tested sequentially. - - y : array of shape(n_samples). - The data matrix - - center : True, bool, - If true, X and y will be centered. - - Returns - ------- - F : array, shape=(n_features,) - F values of features. - - pval : array, shape=(n_features,) - p-values of F-scores. - """ - X, y = check_arrays(X, y, dtype=np.float) - y = y.ravel() - if center: - y = y - np.mean(y) - X = X.copy('F') # faster in fortran - X -= X.mean(axis=0) - - # compute the correlation - corr = np.dot(y, X) - # XXX could use corr /= row_norms(X.T) here, but the test doesn't pass - corr /= np.asarray(np.sqrt((X ** 2).sum(axis=0))).ravel() - corr /= norm(y) - - # convert to p-value - degrees_of_freedom = y.size - (2 if center else 1) - F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom - pv = stats.f.sf(F, 1, degrees_of_freedom) - return F, pv diff --git a/nilearn/_utils/logger.py b/nilearn/_utils/logger.py index 6526f124ae..48c8b9a032 100644 --- a/nilearn/_utils/logger.py +++ b/nilearn/_utils/logger.py @@ -40,7 +40,7 @@ def log(msg, verbose=1, object_classes=(BaseEstimator, ), user. Most of the time this parameter can be left unchanged. Notes - ===== + ----- This function does tricky things to ensure that the proper object is referenced in the message. If it is called e.g. inside a function that is called by a method of an object inheriting from any class in @@ -82,19 +82,19 @@ def _compose_err_msg(msg, **kwargs): """Append key-value pairs to msg, for display. Parameters - ========== + ---------- msg: string arbitrary message kwargs: dict arbitrary dictionary Returns - ======= + ------- updated_msg: string msg, with "key: value" appended. Only string values are appended. Example - ======= + ------- >>> _compose_err_msg('Error message with arguments...', arg_num=123, \ arg_str='filename.nii', arg_bool=True) 'Error message with arguments...\\narg_str: filename.nii' diff --git a/nilearn/_utils/ndimage.py b/nilearn/_utils/ndimage.py index 077cac6ca6..237b04417e 100644 --- a/nilearn/_utils/ndimage.py +++ b/nilearn/_utils/ndimage.py @@ -6,25 +6,48 @@ import numpy as np from scipy import ndimage - - +from .._utils.compat import _basestring ############################################################################### # Operating on connected components ############################################################################### + def largest_connected_component(volume): """Return the largest connected component of a 3D array. Parameters ----------- - volume: numpy.array + volume: numpy.ndarray 3D boolean array indicating a volume. Returns -------- - volume: numpy.array + volume: numpy.ndarray 3D boolean array with only one connected component. + + See Also + -------- + nilearn.image.largest_connected_component_img : To simply operate the + same manipulation directly on Nifti images. + + Notes + ----- + + **Handling big-endian in given numpy.ndarray** + This function changes the existing byte-ordering information to new byte + order, if the given volume has non-native data type. This operation + is done inplace to avoid big-endian issues with scipy ndimage module. + """ + if hasattr(volume, "get_data") \ + or isinstance(volume, _basestring): + raise ValueError('Please enter a valid numpy array. For images use\ + largest_connected_component_img') + # Get the new byteorder to handle issues like "Big-endian buffer not + # supported on little-endian compiler" with scipy ndimage label. + if not volume.dtype.isnative: + volume.dtype = volume.dtype.newbyteorder('N') + # We use asarray to be able to work with masked arrays. volume = np.asarray(volume) labels, label_nb = ndimage.label(volume) @@ -66,7 +89,8 @@ def _peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1, min_distance : int Minimum number of pixels separating peaks in a region of `2 * min_distance + 1` (i.e. peaks are separated by at least - `min_distance`). To find the maximum number of peaks, use `min_distance=1`. + `min_distance`). To find the maximum number of peaks, use + `min_distance=1`. threshold_abs : float Minimum intensity of peaks. threshold_rel : float @@ -78,7 +102,8 @@ def _peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1, Returns ------- output : ndarray or ndarray of bools - Boolean array shaped like `image`, with peaks represented by True values. + Boolean array shaped like `image`, with peaks represented by True + values. Notes ----- @@ -89,7 +114,8 @@ def _peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1, coordinates of peaks where dilated image = original. This code is mostly adapted from scikit image 0.11.3 release. - Location of file in scikit image: peak_local_max function in skimage.feature.peak + Location of file in scikit image: peak_local_max function in + skimage.feature.peak """ out = np.zeros_like(image, dtype=np.bool) @@ -111,7 +137,8 @@ def _peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1, coordinates = np.argwhere(image > peak_threshold) if coordinates.shape[0] > num_peaks: - intensities = image.flat[np.ravel_multi_index(coordinates.transpose(), image.shape)] + intensities = image.flat[np.ravel_multi_index(coordinates.transpose(), + image.shape)] idx_maxsort = np.argsort(intensities)[::-1] coordinates = coordinates[idx_maxsort][:num_peaks] diff --git a/nilearn/_utils/niimg.py b/nilearn/_utils/niimg.py index 8fdd64c2a9..e27046d38e 100644 --- a/nilearn/_utils/niimg.py +++ b/nilearn/_utils/niimg.py @@ -14,9 +14,23 @@ from .compat import _basestring -def _safe_get_data(img): +def _safe_get_data(img, ensure_finite=False): """ Get the data in the image without having a side effect on the Nifti1Image object + + Parameters + ---------- + img: Nifti image/object + Image to get data. + + ensure_finite: bool + If True, non-finite values such as (NaNs and infs) found in the + image will be replaced by zeros. + + Returns + ------- + data: numpy array + get_data() return from Nifti image. """ if hasattr(img, '_data_cache') and img._data_cache is None: # By loading directly dataobj, we prevent caching if the data is @@ -25,18 +39,14 @@ def _safe_get_data(img): # typically the line below can double memory usage # that's why we invoke a forced call to the garbage collector gc.collect() - return img.get_data() + data = img.get_data() + if ensure_finite: + non_finite_mask = np.logical_not(np.isfinite(data)) + if non_finite_mask.sum() > 0: # any non_finite_mask values? + data[non_finite_mask] = 0 -def _get_data_dtype(img): - """Returns the dtype of an image. - If the image is non standard (no get_data_dtype member), this function - relies on the data itself. - """ - try: - return img.get_data_dtype() - except AttributeError: - return img.get_data().dtype + return data def _get_target_dtype(dtype, target_dtype): @@ -80,7 +90,7 @@ def load_niimg(niimg, dtype=None): ----------- niimg: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Image to load. dtype: {dtype, "auto"} @@ -103,11 +113,11 @@ def load_niimg(niimg, dtype=None): " not compatible with nibabel format:\n" + short_repr(niimg)) - dtype = _get_target_dtype(_get_data_dtype(niimg), dtype) + dtype = _get_target_dtype(niimg.get_data().dtype, dtype) if dtype is not None: niimg = new_img_like(niimg, niimg.get_data().astype(dtype), - niimg.get_affine()) + niimg.affine) return niimg @@ -115,12 +125,12 @@ def copy_img(img): """Copy an image to a nibabel.Nifti1Image. Parameters - ========== + ---------- img: image nibabel SpatialImage object to copy. Returns - ======= + ------- img_copy: image copy of input (data, affine and header) """ @@ -128,7 +138,7 @@ def copy_img(img): if not isinstance(img, nibabel.spatialimages.SpatialImage): raise ValueError("Input value is not an image") - return new_img_like(img, _safe_get_data(img).copy(), img.get_affine().copy(), + return new_img_like(img, _safe_get_data(img).copy(), img.affine.copy(), copy_header=True) @@ -144,12 +154,12 @@ def _repr_niimgs(niimgs): filename = niimgs.get_filename() if filename is not None: return "%s('%s')" % (niimgs.__class__.__name__, - filename) + filename) else: return "%s(\nshape=%s,\naffine=%s\n)" % \ (niimgs.__class__.__name__, repr(niimgs.shape), - repr(niimgs.get_affine())) + repr(niimgs.affine)) except: pass return repr(niimgs) diff --git a/nilearn/_utils/niimg_conversions.py b/nilearn/_utils/niimg_conversions.py index 24c0e3848d..5c59191871 100644 --- a/nilearn/_utils/niimg_conversions.py +++ b/nilearn/_utils/niimg_conversions.py @@ -18,13 +18,14 @@ from .exceptions import DimensionError + def _check_fov(img, affine, shape): """ Return True if img's field of view correspond to given shape and affine, False elsewhere. """ img = check_niimg(img) return (img.shape[:3] == shape and - np.allclose(img.get_affine(), affine)) + np.allclose(img.affine, affine)) def _check_same_fov(*args, **kwargs): @@ -48,8 +49,6 @@ def _check_same_fov(*args, **kwargs): raise_error: boolean, optional If True, an error will be raised in case of error. """ - from ..image import new_img_like # avoid circular imports - raise_error = kwargs.pop('raise_error', False) for i, arg in enumerate(args): kwargs['img_#%i' % i] = arg @@ -58,7 +57,7 @@ def _check_same_fov(*args, **kwargs): kwargs.items(), 2): if not a_img.shape[:3] == b_img.shape[:3]: errors.append((a_name, b_name, 'shape')) - if not np.allclose(a_img.get_affine(), b_img.get_affine()): + if not np.allclose(a_img.affine, b_img.affine): errors.append((a_name, b_name, 'affine')) if len(errors) > 0 and raise_error: raise ValueError('Following field of view errors were detected:\n' + @@ -72,10 +71,21 @@ def _index_img(img, index): """Helper function for check_niimg_4d.""" return new_img_like( - img, img.get_data()[:, :, :, index], img.get_affine(), + img, img.get_data()[:, :, :, index], img.affine, copy_header=True) +def _resolve_globbing(path): + if isinstance(path, _basestring): + path_list = sorted(glob.glob(os.path.expanduser(path))) + # Raise an error in case the niimgs list is empty. + if len(path_list) == 0: + raise ValueError("No files matching path: %s" % path) + path = path_list + + return path + + def _iter_check_niimg(niimgs, ensure_ndim=None, atleast_4d=False, target_fov=None, dtype=None, memory=Memory(cachedir=None), @@ -85,7 +95,7 @@ def _iter_check_niimg(niimgs, ensure_ndim=None, atleast_4d=False, Parameters ---------- - niimgs: list of niimg + niimgs: list of niimg or glob pattern Image to iterate over ensure_ndim: integer, optional @@ -108,12 +118,7 @@ def _iter_check_niimg(niimgs, ensure_ndim=None, atleast_4d=False, check_niimg, check_niimg_3d, check_niimg_4d """ # If niimgs is a string, use glob to expand it to the matching filenames. - if isinstance(niimgs, _basestring): - niimgs_list = glob.glob(os.path.expanduser(niimgs)) - # Raise an error in case the niimgs list is empty. - if len(niimgs_list) == 0: - raise ValueError("No files matching path: %s" % niimgs) - niimgs = niimgs_list + niimgs = _resolve_globbing(niimgs) ref_fov = None resample_to_first_img = False @@ -129,7 +134,7 @@ def _iter_check_niimg(niimgs, ensure_ndim=None, atleast_4d=False, if i == 0: ndim_minus_one = len(niimg.shape) if ref_fov is None: - ref_fov = (niimg.get_affine(), niimg.shape[:3]) + ref_fov = (niimg.affine, niimg.shape[:3]) resample_to_first_img = True if not _check_fov(niimg, ref_fov[0], ref_fov[1]): @@ -150,7 +155,7 @@ def _iter_check_niimg(niimgs, ensure_ndim=None, atleast_4d=False, "reference FOV.\n" "Reference affine:\n%r\nImage affine:\n%r\n" "Reference shape:\n%r\nImage shape:\n%r\n" - % (i, ref_fov[0], niimg.get_affine(), ref_fov[1], + % (i, ref_fov[0], niimg.affine, ref_fov[1], niimg.shape)) yield niimg except DimensionError as exc: @@ -179,12 +184,12 @@ def check_niimg(niimg, ensure_ndim=None, atleast_4d=False, dtype=None, Parameters ---------- niimg: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. The '~' symbol is expanded to the user home folder. - If it is an object, check if get_data() - and get_affine() methods are present, raise TypeError otherwise. + If it is an object, check if the get_data() method + and affine attribute are present, raise TypeError otherwise. ensure_ndim: integer {3, 4}, optional Indicate the dimensionality of the expected niimg. An @@ -213,7 +218,7 @@ def check_niimg(niimg, ensure_ndim=None, atleast_4d=False, dtype=None, ------- result: 3D/4D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed - that the returned object has get_data() and get_affine() methods. + that the returned object has get_data() method and affine attribute. Notes ----- @@ -268,12 +273,12 @@ def check_niimg(niimg, ensure_ndim=None, atleast_4d=False, dtype=None, if ensure_ndim == 3 and len(niimg.shape) == 4 and niimg.shape[3] == 1: # "squeeze" the image. data = _safe_get_data(niimg) - affine = niimg.get_affine() + affine = niimg.affine niimg = new_img_like(niimg, data[:, :, :, 0], affine) if atleast_4d and len(niimg.shape) == 3: data = niimg.get_data().view() data.shape = data.shape + (1, ) - niimg = new_img_like(niimg, data, niimg.get_affine()) + niimg = new_img_like(niimg, data, niimg.affine) if ensure_ndim is not None and len(niimg.shape) != ensure_ndim: raise DimensionError(len(niimg.shape), ensure_ndim) @@ -289,10 +294,10 @@ def check_niimg_3d(niimg, dtype=None): Parameters ---------- niimg: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html If niimg is a string, consider it as a path to Nifti image and - call nibabel.load on it. If it is an object, check if get_data() - and get_affine() methods are present, raise TypeError otherwise. + call nibabel.load on it. If it is an object, check if the get_data() + method and affine attribute are present, raise TypeError otherwise. dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the @@ -303,7 +308,7 @@ def check_niimg_3d(niimg, dtype=None): ------- result: 3D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed - that the returned object has get_data() and get_affine() methods. + that the returned object has get_data() method and affine attribute. Notes ----- @@ -323,12 +328,12 @@ def check_niimg_4d(niimg, return_iterator=False, dtype=None): Parameters ---------- niimg: 4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html If niimgs is an iterable, checks if data is really 4D. Then, considering that it is a list of niimg and load them one by one. If niimg is a string, consider it as a path to Nifti image and - call nibabel.load on it. If it is an object, check if get_data - and get_affine methods are present, raise an Exception otherwise. + call nibabel.load on it. If it is an object, check if the get_data() + method and affine attribute are present, raise an Exception otherwise. dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the @@ -367,8 +372,8 @@ def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None, Parameters ---------- - niimgs: iterable of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + niimgs: iterable of Niimg-like objects or glob pattern + See http://nilearn.github.io/manipulating_images/input_output.html Niimgs to concatenate. dtype: numpy dtype, optional @@ -397,6 +402,11 @@ def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None, ------- concatenated: nibabel.Nifti1Image A single image. + + See Also + -------- + nilearn.image.index_img + """ from ..image import new_img_like # avoid circular imports @@ -407,6 +417,9 @@ def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None, if ensure_ndim is not None: ndim = ensure_ndim - 1 + # If niimgs is a string, use glob to expand it to the matching filenames. + niimgs = _resolve_globbing(niimgs) + # First niimg is extracted to get information and for new_img_like first_niimg = None @@ -441,6 +454,8 @@ def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None, lengths.append(niimg.shape[-1] if ndim == 4 else 1) target_shape = first_niimg.shape[:3] + if dtype == None: + dtype = first_niimg.get_data().dtype data = np.ndarray(target_shape + (sum(lengths), ), order="F", dtype=dtype) cur_4d_index = 0 @@ -458,4 +473,4 @@ def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None, data[..., cur_4d_index:cur_4d_index + size] = niimg.get_data() cur_4d_index += size - return new_img_like(first_niimg, data, first_niimg.get_affine()) + return new_img_like(first_niimg, data, first_niimg.affine, copy_header=True) diff --git a/nilearn/_utils/numpy_conversions.py b/nilearn/_utils/numpy_conversions.py index 434a9edd9d..53331a82ee 100644 --- a/nilearn/_utils/numpy_conversions.py +++ b/nilearn/_utils/numpy_conversions.py @@ -56,7 +56,7 @@ def as_ndarray(arr, copy=False, dtype=None, order='K'): array is recovered. Parameters - ========== + ---------- arr: array-like input array. Any value accepted by numpy.asarray is valid. @@ -73,7 +73,7 @@ def as_ndarray(arr, copy=False, dtype=None, order='K'): default is "K". See ndarray.copy() for more information. Returns - ======= + ------- ret: numpy.ndarray Numpy array containing the same data as arr, always of class numpy.ndarray, and with no link to any underlying file. @@ -152,12 +152,14 @@ def csv_to_array(csv_path, delimiters=' \t,;', **kwargs): if not isinstance(csv_path, _basestring): raise TypeError('CSV must be a file path. Got a CSV of type: %s' % type(csv_path)) - # First, we try genfromtxt which works in most cases. - array = np.genfromtxt(csv_path, **kwargs) - if array.ndim <= 1 and np.all(np.isnan(array)): - # If the delimiter is not known genfromtxt generates an array full of - # nan. In that case, we try to guess the delimiter + try: + # First, we try genfromtxt which works in most cases. + array = np.genfromtxt(csv_path, loose=False, **kwargs) + except ValueError: + # There was an error during the conversion to numpy array, probably + # because the delimiter is wrong. + # In that case, we try to guess the delimiter. try: with open(csv_path, 'r') as csv_file: dialect = csv.Sniffer().sniff(csv_file.readline(), delimiters) diff --git a/nilearn/_utils/param_validation.py b/nilearn/_utils/param_validation.py index 48709567ce..30db3ed060 100644 --- a/nilearn/_utils/param_validation.py +++ b/nilearn/_utils/param_validation.py @@ -1,15 +1,23 @@ """ Utilities to check for valid parameters """ - -import numbers +import numpy as np import warnings +import numbers + +from sklearn.feature_selection import (SelectPercentile, f_regression, + f_classif) from .compat import _basestring +# Volume of a standard (MNI152) brain mask in mm^3 +MNI152_BRAIN_VOLUME = 1827243. + + def check_threshold(threshold, data, percentile_func, name='threshold'): - """ Checks if the given threshold is in correct format and within the limit. + """ Checks if the given threshold is in correct format and within the + limit. If necessary, this function also returns score of the data calculated based upon the given specific percentile function. @@ -20,8 +28,8 @@ def check_threshold(threshold, data, percentile_func, name='threshold'): threshold: float or str If threshold is a float value, it should be within the range of the maximum intensity value of the data. - If threshold is a percentage expressed in a string it must finish with a - percent sign like "99.7%". + If threshold is a percentage expressed in a string it must finish with + a percent sign like "99.7%". data: ndarray an array of the input masked data. percentile_func: function {scoreatpercentile, fastabspercentile} @@ -56,10 +64,130 @@ def check_threshold(threshold, data, percentile_func, name='threshold'): # value of the image data value_check = abs(data).max() if abs(threshold) > value_check: - warnings.warn("The given float value must not exceed %d. " - "But, you have given threshold=%s " % (value_check, - threshold)) + warnings.warn("The given float value must not exceed {0}. " + "But, you have given threshold={1} ".format(value_check, + threshold)) else: raise TypeError('%s should be either a number ' 'or a string finishing with a percent sign' % (name, )) return threshold + + +def _get_mask_volume(mask_img): + """Computes the volume of a brain mask in mm^3 + + Parameters + ---------- + mask_img : nibabel image object + Input image whose voxel dimensions are to be computed. + + Returns + ------- + vol : float + The computed volume. + """ + affine = mask_img.affine + prod_vox_dims = 1. * np.abs(np.linalg.det(affine[:3, :3])) + return prod_vox_dims * mask_img.get_data().astype(np.bool).sum() + + +def _adjust_screening_percentile(screening_percentile, mask_img, + verbose=0): + """Adjusts the screening percentile according to the MNI152 template. + + Parameters + ---------- + screening_percentile : float in the interval [0, 100] + Percentile value for ANOVA univariate feature selection. A value of + 100 means 'keep all features'. This percentile is expressed + w.r.t the volume of a standard (MNI152) brain, and so is corrected + at runtime by premultiplying it with the ratio of the volume of the + mask of the data and volume of a standard brain. + + mask_img : nibabel image object + Input image whose voxel dimensions are to be computed. + + verbose : int, optional (default 0) + Verbosity level. + + Returns + ------- + screening_percentile: float in the interval [0, 100] + Percentile value for ANOVA univariate feature selection. + """ + original_screening_percentile = screening_percentile + # correct screening_percentile according to the volume of the data mask + mask_volume = _get_mask_volume(mask_img) + if mask_volume > 1.1 * MNI152_BRAIN_VOLUME: + warnings.warn( + "Brain mask is bigger than the volume of a standard " + "human brain. This object is probably not tuned to " + "be used on such data.", stacklevel=2) + elif mask_volume < .005 * MNI152_BRAIN_VOLUME: + warnings.warn( + "Brain mask is smaller than .5% of the volume " + "human brain. This object is probably not tuned to" + "be used on such data.", stacklevel=2) + + if screening_percentile < 100.: + screening_percentile = screening_percentile * ( + MNI152_BRAIN_VOLUME / mask_volume) + screening_percentile = min(screening_percentile, 100.) + # if screening_percentile is 100, we don't do anything + + if verbose > 1: + print("Mask volume = %gmm^3 = %gcm^3" % ( + mask_volume, mask_volume / 1.e3)) + print("Standard brain volume = %gmm^3 = %gcm^3" % ( + MNI152_BRAIN_VOLUME, MNI152_BRAIN_VOLUME / 1.e3)) + print("Original screening-percentile: %g" % ( + original_screening_percentile)) + print("Volume-corrected screening-percentile: %g" % ( + screening_percentile)) + return screening_percentile + + +def check_feature_screening(screening_percentile, mask_img, + is_classification, verbose=0): + """Check feature screening method. Turns floats between 1 and 100 into + SelectPercentile objects. + + Parameters + ---------- + screening_percentile : float in the interval [0, 100] + Percentile value for ANOVA univariate feature selection. A value of + 100 means 'keep all features'. This percentile is expressed + w.r.t the volume of a standard (MNI152) brain, and so is corrected + at runtime by premultiplying it with the ratio of the volume of the + mask of the data and volume of a standard brain. + + mask_img : nibabel image object + Input image whose voxel dimensions are to be computed. + + is_classification : bool + If is_classification is True, it indicates that a classification task + is performed. Otherwise, a regression task is performed. + + verbose : int, optional (default 0) + Verbosity level. + + Returns + ------- + selector : SelectPercentile instance + Used to perform the ANOVA univariate feature selection. + """ + + f_test = f_classif if is_classification else f_regression + + if screening_percentile == 100 or screening_percentile is None: + return None + elif not (0. <= screening_percentile <= 100.): + raise ValueError( + ("screening_percentile should be in the interval" + " [0, 100], got %g" % screening_percentile)) + else: + # correct screening_percentile according to the volume of the data mask + screening_percentile_ = _adjust_screening_percentile( + screening_percentile, mask_img, verbose=verbose) + + return SelectPercentile(f_test, int(screening_percentile_)) diff --git a/nilearn/_utils/segmentation.py b/nilearn/_utils/segmentation.py index b298d86004..93882d1a7d 100644 --- a/nilearn/_utils/segmentation.py +++ b/nilearn/_utils/segmentation.py @@ -268,7 +268,7 @@ def _random_walker(data, labels, beta=130, tol=1.e-3, copy=True, spacing=None): if np.any(np.diff(label_values) != 1): mask = labels >= 0 labels[mask] = np.searchsorted(np.unique(labels[mask]), - labels[mask])[0].astype(labels.dtype) + labels[mask]).astype(labels.dtype) labels = labels.astype(np.int32) # If the array has pruned zones, be sure that no isolated pixels diff --git a/nilearn/_utils/testing.py b/nilearn/_utils/testing.py index 54c860797d..e6a4f5e14f 100644 --- a/nilearn/_utils/testing.py +++ b/nilearn/_utils/testing.py @@ -1,6 +1,5 @@ -"""Utilities for testing nilearn. -""" -# Author: Alexandre Abrahame, Philippe Gervais +"""Utilities for testing nilearn.""" +# Author: Alexandre Abraham, Philippe Gervais # License: simplified BSD import contextlib import functools @@ -10,10 +9,12 @@ import sys import tempfile import warnings +import gc import numpy as np import scipy.signal from sklearn.utils import check_random_state +from sklearn.utils.testing import assert_warns import scipy.linalg import nibabel @@ -27,37 +28,70 @@ from nose.tools import assert_raises_regex except ImportError: # For Py 2.7 - try: - from nose.tools import assert_raises_regexp as assert_raises_regex - except ImportError: - # for Py 2.6 - def assert_raises_regex(expected_exception, expected_regexp, - callable_obj=None, *args, **kwargs): - """Helper function to check for message patterns in exceptions""" - - not_raised = False - try: - callable_obj(*args, **kwargs) - not_raised = True - except Exception as e: - error_message = str(e) - if not re.compile(expected_regexp).search(error_message): - raise AssertionError("Error message should match pattern " - "%r. %r does not." % - (expected_regexp, error_message)) - if not_raised: - raise AssertionError("Should have raised %r" % - expected_exception(expected_regexp)) + from nose.tools import assert_raises_regexp as assert_raises_regex + +# we use memory_profiler library for memory consumption checks try: - from sklearn.utils.testing import assert_warns + from memory_profiler import memory_usage + + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + return func + + def memory_used(func, *args, **kwargs): + """Compute memory usage when executing func.""" + def func_3_times(*args, **kwargs): + for _ in range(3): + func(*args, **kwargs) + + gc.collect() + mem_use = memory_usage((func_3_times, args, kwargs), interval=0.001) + return max(mem_use) - min(mem_use) + except ImportError: - # sklearn.utils.testing.assert_warns new in scikit-learn 0.14 - def assert_warns(warning_class, func, *args, **kw): - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", warning_class) - output = func(*args, **kw) - return output + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + def dummy_func(): + import nose + raise nose.SkipTest('Test requires memory_profiler.') + return dummy_func + + memory_usage = memory_used = None + + +def assert_memory_less_than(memory_limit, tolerance, + callable_obj, *args, **kwargs): + """Check memory consumption of a callable stays below a given limit. + + Parameters + ---------- + memory_limit : int + The expected memory limit in MiB. + tolerance: float + As memory_profiler results have some variability, this adds some + tolerance around memory_limit. Accepted values are in range [0.0, 1.0]. + callable_obj: callable + The function to be called to check memory consumption. + + """ + mem_used = memory_used(callable_obj, *args, **kwargs) + + if mem_used > memory_limit * (1 + tolerance): + raise ValueError("Memory consumption measured ({0:.2f} MiB) is " + "greater than required memory limit ({1} MiB) within " + "accepted tolerance ({2:.2f}%)." + "".format(mem_used, memory_limit, tolerance * 100)) + + # We are confident in memory_profiler measures above 100MiB. + # We raise an error if the measure is below the limit of 50MiB to avoid + # false positive. + if mem_used < 50: + raise ValueError("Memory profiler measured an untrustable memory " + "consumption ({0:.2f} MiB). The expected memory " + "limit was {1:.2f} MiB. Try to bench with larger " + "objects (at least 100MiB in memory).". + format(mem_used, memory_limit)) class MockRequest(object): @@ -84,7 +118,7 @@ def write_tmp_imgs(*imgs, **kwargs): the block. Parameters - ========== + ---------- imgs: Nifti1Image Several Nifti images. Every format understood by nibabel.save is accepted. @@ -100,7 +134,7 @@ def write_tmp_imgs(*imgs, **kwargs): matching glob is returned. Returns - ======= + ------- filenames: string or list of filename(s) where input images have been written. If a single image has been given as input, a single string is returned. Otherwise, a @@ -130,6 +164,7 @@ def write_tmp_imgs(*imgs, **kwargs): dir=None) filenames.append(filename) img.to_filename(filename) + del img if use_wildcards: yield prefix + "*" + suffix @@ -239,14 +274,14 @@ def generate_regions_ts(n_features, n_regions, """Generate some regions as timeseries. Parameters - ========== + ---------- overlap: int Number of overlapping voxels between two regions (more or less) window: str Name of a window in scipy.signal. e.g. "hamming". Returns - ======= + ------- regions: numpy.ndarray regions, nepresented as signals. shape (n_features, n_regions) @@ -284,7 +319,7 @@ def generate_maps(shape, n_regions, overlap=0, border=1, window="boxcar", rand_gen=None, affine=np.eye(4)): """Generate a 4D volume containing several maps. Parameters - ========== + ---------- n_regions: int number of regions to generate @@ -298,7 +333,7 @@ def generate_maps(shape, n_regions, overlap=0, border=1, number of background voxels on each side of the 3D volumes. Returns - ======= + ------- maps: nibabel.Nifti1Image 4D array, containing maps. """ @@ -316,7 +351,7 @@ def generate_labeled_regions(shape, n_regions, rand_gen=None, labels=None, """Generate a 3D volume with labeled regions. Parameters - ========== + ---------- shape: tuple shape of returned array @@ -334,7 +369,7 @@ def generate_labeled_regions(shape, n_regions, rand_gen=None, labels=None, affine of returned image Returns - ======= + ------- regions: nibabel.Nifti1Image data has shape "shape", containing region labels. """ @@ -382,7 +417,7 @@ def generate_fake_fmri(shape=(10, 11, 12), length=17, kind="noise", to 'rest' or 'baseline' condition. Parameters - ========== + ---------- shape: tuple, optional Shape of 3D volume @@ -409,7 +444,7 @@ def generate_fake_fmri(shape=(10, 11, 12), length=17, kind="noise", 'classification' or 'regression'. Returns - ======= + ------- fmri: nibabel.Nifti1Image fake fmri signal. shape: shape + (length,) @@ -531,7 +566,7 @@ def generate_group_sparse_gaussian_graphs( """Generate signals drawn from a sparse Gaussian graphical model. Parameters - ========== + ---------- n_subjects : int, optional number of subjects @@ -553,7 +588,7 @@ def generate_group_sparse_gaussian_graphs( verbosity level (0 means no message). Returns - ======= + ------- subjects : list of numpy.ndarray, shape for each (n_samples, n_features) subjects[n] is the signals for subject n. They are provided as a numpy len(subjects) = n_subjects. n_samples varies according to the subject. @@ -623,8 +658,7 @@ def is_nose_running(): return False # Now check that we have the loader in the call stask stack = inspect.stack() - from nose import loader - loader_file_name = loader.__file__ + loader_file_name = nose.loader.__file__ if loader_file_name.endswith('.pyc'): loader_file_name = loader_file_name[:-1] for _, file_name, _, _, _, _ in stack: @@ -637,7 +671,7 @@ def skip_if_running_nose(msg=''): """ Raise a SkipTest if we appear to be running the nose test loader. Parameters - ========== + ---------- msg: string, optional The message issued when SkipTest is raised """ diff --git a/nilearn/connectome/__init__.py b/nilearn/connectome/__init__.py index 982b101ccd..51262f8196 100644 --- a/nilearn/connectome/__init__.py +++ b/nilearn/connectome/__init__.py @@ -4,11 +4,14 @@ of Gaussian graphical models. """ -from .connectivity_matrices import sym_to_vec, ConnectivityMeasure +from .connectivity_matrices import (sym_matrix_to_vec, vec_to_sym_matrix, + sym_to_vec, ConnectivityMeasure, + cov_to_corr, prec_to_partial) from .group_sparse_cov import (GroupSparseCovariance, GroupSparseCovarianceCV, group_sparse_covariance) -__all__ = ['sym_to_vec', 'ConnectivityMeasure', +__all__ = ['sym_matrix_to_vec', 'vec_to_sym_matrix', 'sym_to_vec', + 'ConnectivityMeasure', 'cov_to_corr', 'prec_to_partial', 'GroupSparseCovariance', 'GroupSparseCovarianceCV', 'group_sparse_covariance'] diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index dd16269cb5..a2b9ad8b85 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -1,11 +1,13 @@ import warnings -from math import sqrt +from math import sqrt, floor import numpy as np from scipy import linalg from sklearn.base import BaseEstimator, TransformerMixin, clone from sklearn.covariance import LedoitWolf +from sklearn.utils import deprecated +from .. import signal from .._utils.extmath import is_spd @@ -192,9 +194,12 @@ def _geometric_mean(matrices, init=None, max_iter=10, tol=1e-7): return gmean -def sym_to_vec(symmetric): - """Return the flattened lower triangular part of an array, after - multiplying above the diagonal elements by sqrt(2). +@deprecated("Function 'sym_to_vec' has been renamed to " + "'sym_matrix_to_vec' and will be removed in future releases. ") +def sym_to_vec(symmetric, discard_diagonal=False): + """Return the flattened lower triangular part of an array. + If diagonal is kept, diagonal elements are divided by sqrt(2) to conserve + the norm. Acts on the last two dimensions of the array if not 2-dimensional. @@ -205,18 +210,140 @@ def sym_to_vec(symmetric): symmetric : numpy.ndarray, shape (..., n_features, n_features) Input array. + discard_diagonal : boolean, optional + If True, the values of the diagonal are not returned. + Default is False. + + Returns + ------- + output : numpy.ndarray + The output flattened lower triangular part of symmetric. Shape is + (..., n_features * (n_features + 1) / 2) if discard_diagonal is False + and (..., (n_features - 1) * n_features / 2) otherwise. + """ + return sym_matrix_to_vec(symmetric=symmetric, + discard_diagonal=discard_diagonal) + + +def sym_matrix_to_vec(symmetric, discard_diagonal=False): + """Return the flattened lower triangular part of an array. + + If diagonal is kept, diagonal elements are divided by sqrt(2) to conserve + the norm. + + Acts on the last two dimensions of the array if not 2-dimensional. + + .. versionadded:: 0.3 + + Parameters + ---------- + symmetric : numpy.ndarray or list of numpy arrays, shape\ + (..., n_features, n_features) + Input array. + + discard_diagonal : boolean, optional + If True, the values of the diagonal are not returned. + Default is False. + Returns ------- - output : numpy.ndarray, shape (..., n_features * (n_features + 1) / 2) - The output flattened lower triangular part of symmetric. + output : numpy.ndarray + The output flattened lower triangular part of symmetric. Shape is + (..., n_features * (n_features + 1) / 2) if discard_diagonal is False + and (..., (n_features - 1) * n_features / 2) otherwise. + + """ - scaling = sqrt(2) * np.ones(symmetric.shape[-2:]) - np.fill_diagonal(scaling, 1.) + if discard_diagonal: + # No scaling, we directly return the values + tril_mask = np.tril(np.ones(symmetric.shape[-2:]), k=-1).astype( + np.bool) + return symmetric[..., tril_mask] + scaling = np.ones(symmetric.shape[-2:]) + np.fill_diagonal(scaling, sqrt(2.)) tril_mask = np.tril(np.ones(symmetric.shape[-2:])).astype(np.bool) - return symmetric[..., tril_mask] * scaling[tril_mask] + return symmetric[..., tril_mask] / scaling[tril_mask] + + +def vec_to_sym_matrix(vec, diagonal=None): + """Return the symmetric matrix given its flattened lower triangular part. + + Acts on the last dimension of the array if not 1-dimensional. + Diagonal can be encompassed in vec or given separately. In both cases, note + that diagonal elements are multiplied by sqrt(2). + + .. versionadded:: 0.3 + + Parameters + ---------- + vec : numpy.ndarray or list of numpy arrays, shape \ + (..., n_columns * (n_columns + 1) /2) or + (..., (n_columns - 1) * n_columns / 2) if diagonal is given seperately. + The input array. + + diagonal : numpy.ndarray, shape (..., n_columns), optional + The diagonal array to be stacked to vec. If None, the diagonal is + assumed to be included in vec. + + Returns + ------- + sym : numpy.ndarray, shape (..., n_columns, n_columns). + The output symmetric matrix. + + Note + ---- + This function is meant to be the inverse of sym_matrix_to_vec. If you have + discarded the diagonal in sym_matrix_to_vec, you need to provide it + separately to reconstruct the symmetric matrix. For instance this can be + useful for correlation matrices for which we know the diagonal is 1. + + See also + -------- + nilearn.connectome.sym_matrix_to_vec + """ + n = vec.shape[-1] + # Compute the number of the symmetric matrix columns + # solve n_columns * (n_columns + 1) / 2 = n subject to n_columns > 0 + n_columns = (sqrt(8 * n + 1) - 1.) / 2 + if diagonal is not None: + n_columns += 1 + + if n_columns > floor(n_columns): + raise ValueError( + "Vector of unsuitable shape {0} can not be transformed to " + "a symmetric matrix.".format(vec.shape)) + + n_columns = int(n_columns) + first_shape = vec.shape[:-1] + if diagonal is not None: + if diagonal.shape[:-1] != first_shape or\ + diagonal.shape[-1] != n_columns: + raise ValueError("diagonal of shape {0} incompatible with vector " + "of shape {1}".format(diagonal.shape, vec.shape)) + + sym = np.zeros(first_shape + (n_columns, n_columns)) + + # Fill lower triangular part + skip_diagonal = (diagonal is not None) + mask = np.tril(np.ones((n_columns, n_columns)), k=-skip_diagonal).astype( + np.bool) + sym[..., mask] = vec + + # Fill upper triangular part + sym.swapaxes(-1, -2)[..., mask] = vec + + # (Fill and) rescale diagonal terms + mask.fill(False) + np.fill_diagonal(mask, True) + if diagonal is not None: + sym[..., mask] = diagonal + + sym[..., mask] *= sqrt(2) + return sym -def _cov_to_corr(covariance): + +def cov_to_corr(covariance): """Return correlation matrix for a given covariance matrix. Parameters @@ -231,10 +358,13 @@ def _cov_to_corr(covariance): """ diagonal = np.atleast_2d(1. / np.sqrt(np.diag(covariance))) correlation = covariance * diagonal * diagonal.T + + # Force exact 1. on diagonal + np.fill_diagonal(correlation, 1.) return correlation -def _prec_to_partial(precision): +def prec_to_partial(precision): """Return partial correlation matrix for a given precision matrix. Parameters @@ -247,7 +377,7 @@ def _prec_to_partial(precision): partial_correlation : 2D numpy.ndarray The 2D ouput partial correlation matrix. """ - partial_correlation = -_cov_to_corr(precision) + partial_correlation = -cov_to_corr(precision) np.fill_diagonal(partial_correlation, 1.) return partial_correlation @@ -261,19 +391,33 @@ class ConnectivityMeasure(BaseEstimator, TransformerMixin): Parameters ---------- cov_estimator : estimator object, optional. - The covariance estimator. + The covariance estimator. By default the LedoitWolf estimator + is used. This implies that correlations are slightly shrunk + towards zero compared to a maximum-likelihood estimate kind : {"correlation", "partial correlation", "tangent",\ "covariance", "precision"}, optional The matrix kind. + vectorize : bool, optional + If True, connectivity matrices are reshaped into 1D arrays and only + their flattened lower triangular parts are returned. + + discard_diagonal : bool, optional + If True, vectorized connectivity coefficients do not include the + matrices diagonal elements. Used only when vectorize is set to True. + Attributes ---------- `cov_estimator_` : estimator object A new covariance estimator with the same parameters as cov_estimator. `mean_` : numpy.ndarray - The mean connectivity for the tangent kind. + The mean connectivity matrix across subjects. For 'tangent' kind, + it is the geometric mean of covariances (a group covariance + matrix that captures information from both correlation and partial + correlation matrices). For other values for "kind", it is the + mean of the corresponding matrices `whitening_` : numpy.ndarray The inverted square-rooted geometric mean of the covariance matrices. @@ -285,10 +429,34 @@ class ConnectivityMeasure(BaseEstimator, TransformerMixin): in post-stroke patients using group-level covariance modeling, MICCAI 2010. """ - def __init__(self, cov_estimator=LedoitWolf(), - kind='covariance'): + def __init__(self, cov_estimator=LedoitWolf(store_precision=False), + kind='covariance', vectorize=False, discard_diagonal=False): self.cov_estimator = cov_estimator self.kind = kind + self.vectorize = vectorize + self.discard_diagonal = discard_diagonal + + def _check_input(self, X): + if not hasattr(X, "__iter__"): + raise ValueError("'subjects' input argument must be an iterable. " + "You provided {0}".format(X.__class__)) + + subjects_types = [type(s) for s in X] + if set(subjects_types) != set([np.ndarray]): + raise ValueError("Each subject must be 2D numpy.ndarray.\n You " + "provided {0}".format(str(subjects_types))) + + subjects_dims = [s.ndim for s in X] + if set(subjects_dims) != set([2]): + raise ValueError("Each subject must be 2D numpy.ndarray.\n You" + "provided arrays of dimensions " + "{0}".format(str(subjects_dims))) + + features_dims = [s.shape[1] for s in X] + if len(set(features_dims)) > 1: + raise ValueError("All subjects must have the same number of " + "features.\nYou provided: " + "{0}".format(str(features_dims))) def fit(self, X, y=None): """Fit the covariance estimator to the given time series for each @@ -296,23 +464,86 @@ def fit(self, X, y=None): Parameters ---------- - X : list of numpy.ndarray, shapes (n_samples, n_features) - The input subjects time series. + X : list of numpy.ndarray, shape for each (n_samples, n_features) + The input subjects time series. The number of samples may differ + from one subject to another. Returns ------- self : ConnectivityMatrix instance The object itself. Useful for chaining operations. """ - self.cov_estimator_ = clone(self.cov_estimator) + self._fit_transform(X, do_fit=True) + return self - if self.kind == 'tangent': + def _fit_transform(self, X, do_transform=False, do_fit=False): + """ Internal function to avoid duplication of computation + """ + self._check_input(X) + if do_fit: + self.cov_estimator_ = clone(self.cov_estimator) + + # Compute all the matrices, stored in "connectivities" + if self.kind == 'correlation': + covariances_std = [self.cov_estimator_.fit( + signal._standardize(x, detrend=False, normalize=True) + ).covariance_ for x in X] + connectivities = [cov_to_corr(cov) for cov in covariances_std] + else: covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] - self.mean_ = _geometric_mean(covariances, max_iter=30, tol=1e-7) - self.whitening_ = _map_eigenvalues(lambda x: 1. / np.sqrt(x), - self.mean_) + if self.kind in ('covariance', 'tangent'): + connectivities = covariances + elif self.kind == 'precision': + connectivities = [linalg.inv(cov) for cov in covariances] + elif self.kind == 'partial correlation': + connectivities = [prec_to_partial(linalg.inv(cov)) + for cov in covariances] + else: + raise ValueError('Allowed connectivity kinds are ' + '"correlation", ' + '"partial correlation", "tangent", ' + '"covariance" and "precision", got kind ' + '"{}"'.format(self.kind)) + + # Store the mean + if do_fit: + if self.kind == 'tangent': + self.mean_ = _geometric_mean(covariances, max_iter=30, tol=1e-7) + self.whitening_ = _map_eigenvalues(lambda x: 1. / np.sqrt(x), + self.mean_) + else: + self.mean_ = np.mean(connectivities, axis=0) + # Fight numerical instabilities: make symmetric + self.mean_ = self.mean_ + self.mean_.T + self.mean_ *= .5 + + # Compute the vector we return on transform + if do_transform: + if self.kind == 'tangent': + connectivities = [_map_eigenvalues(np.log, self.whitening_.dot( + cov).dot(self.whitening_)) + for cov in connectivities] + + connectivities = np.array(connectivities) + if self.vectorize: + connectivities = sym_matrix_to_vec( + connectivities, discard_diagonal=self.discard_diagonal) + + return connectivities + + def fit_transform(self, X, y=None): + if self.kind == 'tangent': + # Check that people are applying fit_transform to a group of + # subject + # We can only impose this in fit_transform, as it is legit to + # fit only on a single given reference point + if not len(X) > 1: + raise ValueError("Tangent space parametrization can only " + "be applied to a group of subjects, as it returns " + "deviations to the mean. You provided %r" % X + ) + return self._fit_transform(X, do_fit=True, do_transform=True) - return self def transform(self, X): """Apply transform to covariances matrices to get the connectivity @@ -320,33 +551,74 @@ def transform(self, X): Parameters ---------- - X : list of numpy.ndarray with shapes (n_samples, n_features) - The input subjects time series. + X : list of n_subjects numpy.ndarray with shapes \ + (n_samples, n_features) + The input subjects time series. The number of samples may differ + from one subject to another. Returns ------- - output : numpy.ndarray, shape (n_samples, n_features, n_features) - The transformed connectivity matrices. + output : numpy.ndarray, shape (n_subjects, n_features, n_features) or \ + (n_subjects, n_features * (n_features + 1) / 2) if vectorize \ + is set to True. + The transformed individual connectivities, as matrices or vectors. """ - covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] - covariances = np.array(covariances) - if self.kind == 'covariance': - connectivities = covariances - elif self.kind == 'tangent': - connectivities = [_map_eigenvalues(np.log, self.whitening_.dot( - cov).dot(self.whitening_)) - for cov in covariances] - elif self.kind == 'precision': - connectivities = [linalg.inv(cov) for cov in covariances] - elif self.kind == 'partial correlation': - connectivities = [_prec_to_partial(linalg.inv(cov)) - for cov in covariances] - elif self.kind == 'correlation': - connectivities = [_cov_to_corr(cov) for cov in covariances] - else: - raise ValueError('Allowed connectivity kinds are "correlation", ' - '"partial correlation", "tangent", ' - '"covariance" and "precision", got kind ' - '"{}"'.format(self.kind)) + self._check_fitted() + return self._fit_transform(X, do_transform=True) + + def _check_fitted(self): + if not hasattr(self, "cov_estimator_"): + raise ValueError('It seems that {0} has not been fitted. ' + 'You must call fit() before calling ' + 'transform().'.format(self.__class__.__name__) + ) + + def inverse_transform(self, connectivities, diagonal=None): + """Returns connectivity matrices from connectivities, vectorized or not. + + If kind is 'tangent', the covariance matrices are reconstructed. + + Parameters + ---------- + connectivities : list of n_subjects numpy.ndarray with shapes\ + (n_features, n_features) or (n_features * (n_features + 1) / 2,) + or ((n_features - 1) * n_features / 2,) + Connectivities of each subject, vectorized or not. + + diagonal : numpy.ndarray, shape (n_subjects, n_features), optional + The diagonals of the connectivity matrices. + + Returns + ------- + output : numpy.ndarray, shape (n_subjects, n_features, n_features) + The corresponding connectivity matrices. If kind is 'correlation'/ + 'partial correlation', the correlation/partial correlation + matrices are returned. + If kind is 'tangent', the covariance matrices are reconstructed. + """ + self._check_fitted() + + connectivities = np.array(connectivities) + if self.vectorize: + if self.discard_diagonal: + if diagonal is None: + if self.kind in ['correlation', 'partial correlation']: + diagonal = np.ones((connectivities.shape[0], + self.mean_.shape[0])) / sqrt(2.) + else: + raise ValueError("diagonal values has been discarded " + "and are unknown for {0} kind, can " + "not reconstruct connectivity " + "matrices.".format(self.kind)) + + connectivities = vec_to_sym_matrix(connectivities, + diagonal=diagonal) + + if self.kind == 'tangent': + mean_sqrt = _map_eigenvalues(lambda x: np.sqrt(x), self.mean_) + connectivities = [mean_sqrt.dot( + _map_eigenvalues(np.exp, displacement)).dot(mean_sqrt) + for displacement in connectivities] + connectivities = np.array(connectivities) - return np.array(connectivities) + return connectivities diff --git a/nilearn/connectome/group_sparse_cov.py b/nilearn/connectome/group_sparse_cov.py index 1a2570bf23..2f98023732 100644 --- a/nilearn/connectome/group_sparse_cov.py +++ b/nilearn/connectome/group_sparse_cov.py @@ -5,6 +5,7 @@ # Authors: Philippe Gervais # License: simplified BSD +from distutils.version import LooseVersion import warnings import collections import operator @@ -13,17 +14,17 @@ import numpy as np import scipy.linalg -import sklearn.cross_validation -import sklearn.covariance -from sklearn.utils.extmath import fast_logdet -from sklearn.covariance import empirical_covariance +import sklearn from sklearn.base import BaseEstimator - +from sklearn.covariance import empirical_covariance from sklearn.externals.joblib import Memory, delayed, Parallel +from sklearn.model_selection import check_cv +from sklearn.utils.extmath import fast_logdet from .._utils import CacheMixin from .._utils import logger from .._utils.extmath import is_spd +from .._utils.compat import izip def compute_alpha_max(emp_covs, n_samples): @@ -145,7 +146,7 @@ def group_sparse_covariance(subjects, alpha, max_iter=50, tol=1e-3, verbose=0, but cubic on number of features (subjects[0].shape[1]). Parameters - ========== + ---------- subjects : list of numpy.ndarray input subjects. Each subject is a 2D array, whose columns contain signals. Each array shape must be (sample number, feature number). @@ -191,7 +192,7 @@ def group_sparse_covariance(subjects, alpha, max_iter=50, tol=1e-3, verbose=0, numerical problems, but increases computation time a lot. Returns - ======= + ------- emp_covs : numpy.ndarray, shape (n_features, n_features, n_subjects) empirical covariances matrices @@ -199,7 +200,7 @@ def group_sparse_covariance(subjects, alpha, max_iter=50, tol=1e-3, verbose=0, estimated precision matrices Notes - ===== + ----- The present algorithm is based on: Jean Honorio and Dimitris Samaras. @@ -929,7 +930,7 @@ def fit(self, subjects, y=None): subjects must have the same number of features (i.e. of columns.) Returns - ======= + ------- self: GroupSparseCovarianceCV the object instance itself. """ @@ -942,9 +943,11 @@ def fit(self, subjects, y=None): # can have a different number of samples from the others. cv = [] for k in range(n_subjects): - cv.append(sklearn.cross_validation.check_cv( - self.cv, subjects[k], None, classifier=False)) - + cv.append(check_cv( + self.cv, np.ones(subjects[k].shape[0]), + classifier=False + ).split(subjects[k]) + ) path = list() # List of (alpha, scores, covs) n_alphas = self.alphas @@ -957,13 +960,17 @@ def fit(self, subjects, y=None): alpha_1, _ = compute_alpha_max(emp_covs, n_samples) alpha_0 = 1e-2 * alpha_1 alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), - n_alphas)[::-1] + n_alphas)[::-1] covs_init = itertools.repeat(None) - for i in range(n_refinements): + + # Copying the cv generators to use them n_refinements times. + cv_ = izip(*cv) + + for i, (this_cv) in enumerate(itertools.tee(cv_, n_refinements)): # Compute the cross-validated loss on the current grid train_test_subjs = [] - for train_test in zip(*cv): + for train_test in this_cv: assert(len(train_test) == n_subjects) train_test_subjs.append(list(zip(*[(subject[train, :], subject[test, :]) diff --git a/nilearn/connectome/tests/test_connectivity_matrices.py b/nilearn/connectome/tests/test_connectivity_matrices.py index 68df0c1f32..125c0a88d3 100644 --- a/nilearn/connectome/tests/test_connectivity_matrices.py +++ b/nilearn/connectome/tests/test_connectivity_matrices.py @@ -7,12 +7,15 @@ from numpy.testing import assert_array_almost_equal, assert_array_equal from nose.tools import assert_raises, assert_equal, assert_true from sklearn.utils import check_random_state -from sklearn.covariance import EmpiricalCovariance +from sklearn.covariance import EmpiricalCovariance, LedoitWolf from nilearn._utils.extmath import is_spd +from nilearn._utils.testing import assert_raises_regex +from nilearn.tests.test_signal import generate_signals from nilearn.connectome.connectivity_matrices import ( _check_square, _check_spd, _map_eigenvalues, _form_symmetric, - _geometric_mean, sym_to_vec, _prec_to_partial, ConnectivityMeasure) + _geometric_mean, sym_matrix_to_vec, vec_to_sym_matrix, prec_to_partial, + ConnectivityMeasure) def grad_geometric_mean(mats, init=None, max_iter=10, tol=1e-7): @@ -70,13 +73,11 @@ def grad_geometric_mean(mats, init=None, max_iter=10, tol=1e-7): def test_check_square(): - """Test _check_square function""" non_square = np.ones((2, 3)) assert_raises(ValueError, _check_square, non_square) def test_check_spd(): - """Test _check_spd function""" non_sym = np.array([[0, 1], [0, 0]]) assert_raises(ValueError, _check_spd, non_sym) @@ -85,7 +86,6 @@ def test_check_spd(): def test_map_eigenvalues(): - """Test _map_eigenvalues function""" # Test on exp map sym = np.ones((2, 2)) sym_exp = exp(1.) * np.array([[cosh(1.), sinh(1.)], [sinh(1.), cosh(1.)]]) @@ -103,7 +103,6 @@ def test_map_eigenvalues(): def test_geometric_mean_couple(): - """Test _geometric_mean function for two matrices""" n_features = 7 spd1 = np.ones((n_features, n_features)) spd1 = spd1.dot(spd1) + n_features * np.eye(n_features) @@ -118,7 +117,6 @@ def test_geometric_mean_couple(): def test_geometric_mean_diagonal(): - """Test _geometric_mean function for diagonal matrices""" n_matrices = 20 n_features = 5 diags = [] @@ -133,7 +131,6 @@ def test_geometric_mean_diagonal(): def test_geometric_mean_geodesic(): - """Test geometric_mean function for single geodesic matrices""" n_matrices = 10 n_features = 6 sym = np.arange(n_features) / np.linalg.norm(np.arange(n_features)) @@ -246,8 +243,6 @@ def random_non_singular(p, sing_min=1., sing_max=2., random_state=0): def test_geometric_mean_properties(): - """Test _geometric_mean function for random spd matrices - """ n_matrices = 40 n_features = 15 spds = [] @@ -314,9 +309,7 @@ def test_geometric_mean_properties(): gmean = _geometric_mean(spds, max_iter=max_iter, tol=1e-5) -def test_geometric_mean_checks(): - """Errors check for _geometric_mean function - """ +def test_geometric_mean_errors(): n_features = 5 # Non square input matrix @@ -332,75 +325,273 @@ def test_geometric_mean_checks(): assert_raises(ValueError, _geometric_mean, [mat2]) -def test_sym_to_vec(): - """Test sym_to_vec function""" +def test_sym_matrix_to_vec(): sym = np.ones((3, 3)) - vec = np.array([1., sqrt(2), 1., sqrt(2), sqrt(2), 1.]) - assert_array_almost_equal(sym_to_vec(sym), vec) + sqrt2 = 1. / sqrt(2.) + vec = np.array([sqrt2, 1., sqrt2, 1., 1., sqrt2]) + assert_array_almost_equal(sym_matrix_to_vec(sym), vec) + + vec = np.array([1., 1., 1.]) + assert_array_almost_equal(sym_matrix_to_vec(sym, discard_diagonal=True), + vec) + + # Check sym_matrix_to_vec is the inverse function of vec_to_sym_matrix + n = 5 + p = n * (n + 1) // 2 + rand_gen = np.random.RandomState(0) + # when diagonal is included + vec = rand_gen.rand(p) + sym = vec_to_sym_matrix(vec) + assert_array_almost_equal(sym_matrix_to_vec(sym), vec) + + # when diagonal given separately + diagonal = rand_gen.rand(n + 1) + sym = vec_to_sym_matrix(vec, diagonal=diagonal) + assert_array_almost_equal(sym_matrix_to_vec(sym, discard_diagonal=True), + vec) + + # multiple matrices case when diagonal is included + vecs = np.asarray([vec, 2. * vec, 0.5 * vec]) + syms = vec_to_sym_matrix(vecs) + assert_array_almost_equal(sym_matrix_to_vec(syms), vecs) + + # multiple matrices case when diagonal is given seperately + diagonals = np.asarray([diagonal, 3. * diagonal, -diagonal]) + syms = vec_to_sym_matrix(vecs, diagonal=diagonals) + assert_array_almost_equal(sym_matrix_to_vec(syms, discard_diagonal=True), + vecs) + + +def test_vec_to_sym_matrix(): + # Check error if unsuitable size + vec = np.ones(31) + assert_raises_regex(ValueError, 'Vector of unsuitable shape', + vec_to_sym_matrix, vec) + + # Check error if given diagonal shape incompatible with vec + vec = np.ones(3) + diagonal = np.zeros(4) + assert_raises_regex(ValueError, 'incompatible with vector', + vec_to_sym_matrix, vec, diagonal) + + # Check output value is correct + vec = np.ones(6, ) + sym = np.array([[sqrt(2), 1., 1.], [1., sqrt(2), 1.], + [1., 1., sqrt(2)]]) + assert_array_almost_equal(vec_to_sym_matrix(vec), sym) + + # Check output value is correct with seperate diagonal + vec = np.ones(3, ) + diagonal = np.ones(3) + assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym) + + # Check vec_to_sym_matrix is the inverse function of sym_matrix_to_vec + # when diagonal is included + assert_array_almost_equal(vec_to_sym_matrix(sym_matrix_to_vec(sym)), sym) + + # when diagonal is discarded + vec = sym_matrix_to_vec(sym, discard_diagonal=True) + diagonal = np.diagonal(sym) / sqrt(2) + assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym) def test_prec_to_partial(): - """Test prec_to_partial function""" prec = np.array([[2., -1., 1.], [-1., 2., -1.], [1., -1., 1.]]) partial = np.array([[1., .5, -sqrt(2.) / 2.], [.5, 1., sqrt(2.) / 2.], [-sqrt(2.) / 2., sqrt(2.) / 2., 1.]]) - assert_array_almost_equal(_prec_to_partial(prec), partial) + assert_array_almost_equal(prec_to_partial(prec), partial) + + +def test_connectivity_measure_errors(): + # Raising error for input subjects not iterable + conn_measure = ConnectivityMeasure() + assert_raises(ValueError, conn_measure.fit, 1.) + + # Raising error for input subjects not 2D numpy.ndarrays + assert_raises(ValueError, conn_measure.fit, [np.ones((100, 40)), + np.ones((10,))]) + + # Raising error for input subjects with different number of features + assert_raises(ValueError, conn_measure.fit, + [np.ones((100, 40)), np.ones((100, 41))]) + + # Raising an error for fit_transform with a single subject and + # kind=tangent + conn_measure = ConnectivityMeasure(kind='tangent') + assert_raises(ValueError, conn_measure.fit_transform, + [np.ones((100, 40)), ]) -def test_fit_transform(): - """Test fit_transform method for class ConnectivityMeasure""" + +def test_connectivity_measure_outputs(): n_subjects = 10 n_features = 49 - n_samples = 200 - # Generate signals and compute empirical covariances - covs = [] + # Generate signals and compute covariances + emp_covs = [] + ledoit_covs = [] signals = [] - random_state = check_random_state(0) + ledoit_estimator = LedoitWolf() for k in range(n_subjects): - signal = random_state.randn(n_samples, n_features) + n_samples = 200 + k + signal, _, _ = generate_signals(n_features=n_features, n_confounds=5, + length=n_samples, same_variance=False) signals.append(signal) signal -= signal.mean(axis=0) - covs.append((signal.T).dot(signal) / n_samples) + emp_covs.append((signal.T).dot(signal) / n_samples) + ledoit_covs.append(ledoit_estimator.fit(signal).covariance_) - input_covs = copy.copy(covs) - kinds = ["correlation", "tangent", "precision", + kinds = ["covariance", "correlation", "tangent", "precision", "partial correlation"] + + # Check outputs properties + for cov_estimator, covs in zip([EmpiricalCovariance(), LedoitWolf()], + [emp_covs, ledoit_covs]): + input_covs = copy.copy(covs) + for kind in kinds: + conn_measure = ConnectivityMeasure(kind=kind, + cov_estimator=cov_estimator) + connectivities = conn_measure.fit_transform(signals) + + # Generic + assert_true(isinstance(connectivities, np.ndarray)) + assert_equal(len(connectivities), len(covs)) + + for k, cov_new in enumerate(connectivities): + assert_array_equal(input_covs[k], covs[k]) + assert(is_spd(covs[k], decimal=7)) + + # Positive definiteness if expected and output value checks + if kind == "tangent": + assert_array_almost_equal(cov_new, cov_new.T) + gmean_sqrt = _map_eigenvalues(np.sqrt, + conn_measure.mean_) + assert(is_spd(gmean_sqrt, decimal=7)) + assert(is_spd(conn_measure.whitening_, decimal=7)) + assert_array_almost_equal(conn_measure.whitening_.dot( + gmean_sqrt), np.eye(n_features)) + assert_array_almost_equal(gmean_sqrt.dot( + _map_eigenvalues(np.exp, cov_new)).dot(gmean_sqrt), + covs[k]) + elif kind == "precision": + assert(is_spd(cov_new, decimal=7)) + assert_array_almost_equal(cov_new.dot(covs[k]), + np.eye(n_features)) + elif kind == "correlation": + assert(is_spd(cov_new, decimal=7)) + d = np.sqrt(np.diag(np.diag(covs[k]))) + if cov_estimator == EmpiricalCovariance(): + assert_array_almost_equal(d.dot(cov_new).dot(d), + covs[k]) + assert_array_almost_equal(np.diag(cov_new), + np.ones((n_features))) + elif kind == "partial correlation": + prec = linalg.inv(covs[k]) + d = np.sqrt(np.diag(np.diag(prec))) + assert_array_almost_equal(d.dot(cov_new).dot(d), -prec + + 2 * np.diag(np.diag(prec))) + + # Check the mean_ + for kind in kinds: + conn_measure = ConnectivityMeasure(kind=kind) + conn_measure.fit_transform(signals) + assert_equal((conn_measure.mean_).shape, (n_features, n_features)) + if kind != 'tangent': + assert_array_almost_equal( + conn_measure.mean_, + np.mean(conn_measure.transform(signals), axis=0)) + + # Check that the mean isn't modified in transform + conn_measure = ConnectivityMeasure(kind='covariance') + conn_measure.fit(signals[:1]) + mean = conn_measure.mean_ + conn_measure.transform(signals[1:]) + assert_array_equal(mean, conn_measure.mean_) + + # Check vectorization option for kind in kinds: - conn_measure = ConnectivityMeasure(kind=kind, - cov_estimator=EmpiricalCovariance()) + conn_measure = ConnectivityMeasure(kind=kind) connectivities = conn_measure.fit_transform(signals) - - # Generic - assert_true(isinstance(connectivities, np.ndarray)) - assert_equal(len(connectivities), len(covs)) - - for k, cov_new in enumerate(connectivities): - assert_array_equal(input_covs[k], covs[k]) - assert(is_spd(covs[k], decimal=7)) - - # Positive definiteness if expected and output value checks - if kind == "tangent": - assert_array_almost_equal(cov_new, cov_new.T) - gmean_sqrt = _map_eigenvalues(np.sqrt, - conn_measure.mean_) - assert(is_spd(gmean_sqrt, decimal=7)) - assert(is_spd(conn_measure.whitening_, decimal=7)) - assert_array_almost_equal(conn_measure.whitening_.dot( - gmean_sqrt), np.eye(n_features)) - assert_array_almost_equal(gmean_sqrt.dot( - _map_eigenvalues(np.exp, cov_new)).dot(gmean_sqrt), - covs[k]) - elif kind == "precision": - assert(is_spd(cov_new, decimal=7)) - assert_array_almost_equal(cov_new.dot(covs[k]), - np.eye(n_features)) - elif kind == "correlation": - assert(is_spd(cov_new, decimal=7)) - d = np.sqrt(np.diag(np.diag(covs[k]))) - assert_array_almost_equal(d.dot(cov_new).dot(d), covs[k]) - elif kind == "partial correlation": - prec = linalg.inv(covs[k]) - d = np.sqrt(np.diag(np.diag(prec))) - assert_array_almost_equal(d.dot(cov_new).dot(d), -prec + - 2 * np.diag(np.diag(prec))) + conn_measure = ConnectivityMeasure(vectorize=True, kind=kind) + vectorized_connectivities = conn_measure.fit_transform(signals) + assert_array_almost_equal(vectorized_connectivities, + sym_matrix_to_vec(connectivities)) + + # Check not fitted error + assert_raises_regex( + ValueError, 'has not been fitted. ', + ConnectivityMeasure().inverse_transform, + vectorized_connectivities) + + # Check inverse transformation + kinds.remove('tangent') + for kind in kinds: + # without vectorization: input matrices are returned with no change + conn_measure = ConnectivityMeasure(kind=kind) + connectivities = conn_measure.fit_transform(signals) + assert_array_almost_equal( + conn_measure.inverse_transform(connectivities), connectivities) + + # with vectorization: input vectors are reshaped into matrices + # if diagonal has not been discarded + conn_measure = ConnectivityMeasure(kind=kind, vectorize=True) + vectorized_connectivities = conn_measure.fit_transform(signals) + assert_array_almost_equal( + conn_measure.inverse_transform(vectorized_connectivities), + connectivities) + + # with vectorization if diagonal has been discarded + for kind in ['correlation', 'partial correlation']: + connectivities = ConnectivityMeasure(kind=kind).fit_transform(signals) + conn_measure = ConnectivityMeasure(kind=kind, vectorize=True, + discard_diagonal=True) + vectorized_connectivities = conn_measure.fit_transform(signals) + assert_array_almost_equal( + conn_measure.inverse_transform(vectorized_connectivities), + connectivities) + + for kind in ['covariance', 'precision']: + connectivities = ConnectivityMeasure(kind=kind).fit_transform(signals) + conn_measure = ConnectivityMeasure(kind=kind, vectorize=True, + discard_diagonal=True) + vectorized_connectivities = conn_measure.fit_transform(signals) + diagonal = np.array([np.diagonal(conn) / sqrt(2) for conn in + connectivities]) + inverse_transformed = conn_measure.inverse_transform( + vectorized_connectivities, diagonal=diagonal) + assert_array_almost_equal(inverse_transformed, connectivities) + assert_raises_regex(ValueError, + 'can not reconstruct connectivity matrices', + conn_measure.inverse_transform, + vectorized_connectivities) + + # for 'tangent' kind, covariance matrices are reconstructed + # without vectorization + tangent_measure = ConnectivityMeasure(kind='tangent') + displacements = tangent_measure.fit_transform(signals) + covariances = ConnectivityMeasure(kind='covariance').fit_transform( + signals) + assert_array_almost_equal( + tangent_measure.inverse_transform(displacements), covariances) + + # with vectorization + # when diagonal has not been discarded + tangent_measure = ConnectivityMeasure(kind='tangent', vectorize=True) + vectorized_displacements = tangent_measure.fit_transform(signals) + assert_array_almost_equal( + tangent_measure.inverse_transform(vectorized_displacements), + covariances) + + # when diagonal has been discarded + tangent_measure = ConnectivityMeasure(kind='tangent', vectorize=True, + discard_diagonal=True) + vectorized_displacements = tangent_measure.fit_transform(signals) + diagonal = np.array([np.diagonal(matrix) / sqrt(2) for matrix in + displacements]) + inverse_transformed = tangent_measure.inverse_transform( + vectorized_displacements, diagonal=diagonal) + assert_array_almost_equal(inverse_transformed, covariances) + assert_raises_regex(ValueError, + 'can not reconstruct connectivity matrices', + tangent_measure.inverse_transform, + vectorized_displacements) diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index fca6a88e7f..91c8cf2f5e 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -2,21 +2,56 @@ Helper functions to download NeuroImaging datasets """ -from .struct import (fetch_icbm152_2009, load_mni152_template, fetch_oasis_vbm) +from .struct import (fetch_icbm152_2009, load_mni152_template, + load_mni152_brain_mask, fetch_oasis_vbm, + fetch_icbm152_brain_gm_mask, + MNI152_FILE_PATH, fetch_surf_fsaverage5, + fetch_surf_fsaverage) from .func import (fetch_haxby_simple, fetch_haxby, fetch_nyu_rest, fetch_adhd, fetch_miyawaki2008, fetch_localizer_contrasts, fetch_abide_pcp, - fetch_localizer_calculation_task, fetch_mixed_gambles) + fetch_localizer_button_task, + fetch_localizer_calculation_task, fetch_mixed_gambles, + fetch_megatrawls_netmats, fetch_cobre, + fetch_surf_nki_enhanced) from .atlas import (fetch_atlas_craddock_2012, fetch_atlas_destrieux_2009, fetch_atlas_harvard_oxford, fetch_atlas_msdl, - fetch_atlas_power_2011, fetch_atlas_smith_2009, - fetch_atlas_yeo_2011, fetch_atlas_aal) + fetch_coords_power_2011, + fetch_atlas_smith_2009, + fetch_atlas_yeo_2011, fetch_atlas_aal, + fetch_atlas_basc_multiscale_2015, + fetch_coords_dosenbach_2010, + fetch_atlas_allen_2011, + fetch_atlas_surf_destrieux, + fetch_atlas_talairach, + fetch_atlas_pauli_2017) -__all__ = ['fetch_icbm152_2009', 'load_mni152_template', 'fetch_oasis_vbm', +from .utils import get_data_dirs +from .neurovault import (fetch_neurovault, + fetch_neurovault_ids, + fetch_neurovault_motor_task, + fetch_neurovault_auditory_computation_task +) + +__all__ = ['MNI152_FILE_PATH', 'fetch_icbm152_2009', 'load_mni152_template', + 'fetch_oasis_vbm', 'fetch_haxby_simple', 'fetch_haxby', 'fetch_nyu_rest', 'fetch_adhd', 'fetch_miyawaki2008', 'fetch_localizer_contrasts', + 'fetch_localizer_button_task', 'fetch_abide_pcp', 'fetch_localizer_calculation_task', 'fetch_atlas_craddock_2012', 'fetch_atlas_destrieux_2009', 'fetch_atlas_harvard_oxford', 'fetch_atlas_msdl', - 'fetch_atlas_power_2011', 'fetch_atlas_smith_2009', - 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal'] + 'fetch_coords_power_2011', + 'fetch_atlas_smith_2009', + 'fetch_atlas_allen_2011', + 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', + 'fetch_megatrawls_netmats', 'fetch_cobre', + 'fetch_surf_nki_enhanced', 'fetch_surf_fsaverage5', + 'fetch_surf_fsaverage', + 'fetch_atlas_basc_multiscale_2015', 'fetch_coords_dosenbach_2010', + 'fetch_neurovault', 'fetch_neurovault_ids', + 'fetch_neurovault_motor_task', + 'fetch_neurovault_auditory_computation_task', + 'load_mni152_brain_mask', 'fetch_icbm152_brain_gm_mask', + 'fetch_atlas_surf_destrieux', 'fetch_atlas_talairach', + 'get_data_dirs'] diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index c2dd3ef558..98ed3e88ff 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -2,18 +2,22 @@ Downloading NeuroImaging datasets: atlas datasets """ import os +import warnings import xml.etree.ElementTree -import numpy as np -from scipy import ndimage +from tempfile import mkdtemp +import json +import shutil +import nibabel as nb +import numpy as np from sklearn.datasets.base import Bunch -#from . import utils from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr - from .._utils import check_niimg -from ..image import new_img_like from .._utils.compat import _basestring +from ..image import new_img_like + +_TALAIRACH_LEVELS = ['hemisphere', 'lobe', 'gyrus', 'tissue', 'ba'] def fetch_atlas_craddock_2012(data_dir=None, url=None, resume=True, verbose=1): @@ -147,14 +151,17 @@ def fetch_atlas_destrieux_2009(lateralized=True, data_dir=None, url=None, def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, symmetric_split=False, resume=True, verbose=1): - """Load Harvard-Oxford parcellation from FSL if installed or download it. + """Load Harvard-Oxford parcellations from FSL. - This function looks up for Harvard Oxford atlas in the system and load it - if present. If not, it downloads it and stores it in NILEARN_DATA - directory. + This function downloads Harvard Oxford atlas packaged from FSL 5.0 + and stores atlases in NILEARN_DATA folder in home directory. + + This function can also load Harvard Oxford atlas from your local directory + specified by your FSL installed path given in `data_dir` argument. + See documentation for details. Parameters - ========== + ---------- atlas_name: string Name of atlas to load. Can be: cort-maxprob-thr0-1mm, cort-maxprob-thr0-2mm, @@ -167,16 +174,23 @@ def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, sub-prob-1mm, sub-prob-2mm data_dir: string, optional - Path of data directory. It can be FSL installation directory - (which is dependent on your installation). - - symmetric_split: bool, optional - If True, split every symmetric region in left and right parts. - Effectively doubles the number of regions. Default: False. - Not implemented for probabilistic atlas (*-prob-* atlases) + Path of data directory where data will be stored. Optionally, + it can also be a FSL installation directory (which is dependent + on your installation). + Example, if FSL is installed in /usr/share/fsl/ then + specifying as '/usr/share/' can get you Harvard Oxford atlas + from your installed directory. Since we mimic same root directory + as FSL to load it easily from your installation. + + symmetric_split: bool, optional, (default False). + If True, lateralized atlases of cort or sub with maxprob will be + returned. For subcortical types (sub-maxprob), we split every + symmetric region in left and right parts. Effectively doubles the + number of regions. + NOTE Not implemented for full probabilistic atlas (*-prob-* atlases). Returns - ======= + ------- data: sklearn.datasets.base.Bunch dictionary-like object, keys are: @@ -199,28 +213,32 @@ def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, "among:\n{1}".format( atlas_name, '\n'.join(atlas_items))) - url = 'http://www.nitrc.org/frs/download.php/7700/HarvardOxford.tgz' + url = 'http://www.nitrc.org/frs/download.php/9902/HarvardOxford.tgz' # For practical reasons, we mimic the FSL data directory here. dataset_name = 'fsl' - # Environment variables - default_paths = [] - for env_var in ['FSL_DIR', 'FSLDIR']: - path = os.getenv(env_var) - if path is not None: - default_paths.extend(path.split(':')) data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, - default_paths=default_paths, verbose=verbose) + verbose=verbose) opts = {'uncompress': True} root = os.path.join('data', 'atlases') - atlas_file = os.path.join(root, 'HarvardOxford', - 'HarvardOxford-' + atlas_name + '.nii.gz') + if atlas_name[0] == 'c': - label_file = 'HarvardOxford-Cortical.xml' + if 'cort-maxprob' in atlas_name and symmetric_split: + split_name = atlas_name.split('cort') + atlas_name = 'cortl' + split_name[1] + label_file = 'HarvardOxford-Cortical-Lateralized.xml' + lateralized = True + else: + label_file = 'HarvardOxford-Cortical.xml' + lateralized = False else: label_file = 'HarvardOxford-Subcortical.xml' + lateralized = False label_file = os.path.join(root, label_file) + atlas_file = os.path.join(root, 'HarvardOxford', + 'HarvardOxford-' + atlas_name + '.nii.gz') + atlas_img, label_file = _fetch_files( data_dir, [(atlas_file, url, opts), (label_file, url, opts)], @@ -231,7 +249,7 @@ def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, names[0] = 'Background' for label in ElementTree.parse(label_file).findall('.//label'): names[int(label.get('index')) + 1] = label.text - names = np.asarray(list(names.values())) + names = list(names.values()) if not symmetric_split: return Bunch(maps=atlas_img, labels=names) @@ -242,38 +260,43 @@ def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, "atlases") atlas_img = check_niimg(atlas_img) + if lateralized: + return Bunch(maps=atlas_img, labels=names) + atlas = atlas_img.get_data() labels = np.unique(atlas) - # ndimage.find_objects output contains None elements for labels - # that do not exist - found_slices = (s for s in ndimage.find_objects(atlas) - if s is not None) + # Build a mask of both halves of the brain middle_ind = (atlas.shape[0] - 1) // 2 - crosses_middle = [s.start < middle_ind and s.stop > middle_ind - for s, _, _ in found_slices] - - # Split every zone crossing the median plane into two parts. - # Assumes that the background label is zero. - half = np.zeros(atlas.shape, dtype=np.bool) - half[:middle_ind, ...] = True - new_label = max(labels) + 1 # Put zeros on the median plane atlas[middle_ind, ...] = 0 - for label, crosses in zip(labels[1:], crosses_middle): - if not crosses: - continue - atlas[np.logical_and(atlas == label, half)] = new_label - new_label += 1 + # Split every zone crossing the median plane into two parts. + left_atlas = atlas.copy() + left_atlas[middle_ind:, ...] = 0 + right_atlas = atlas.copy() + right_atlas[:middle_ind, ...] = 0 - # Duplicate labels for right and left + new_label = 0 + new_atlas = atlas.copy() + # Assumes that the background label is zero. new_names = [names[0]] - for n in names[1:]: - new_names.append(n + ', right part') - for n in names[1:]: - new_names.append(n + ', left part') + for label, name in zip(labels[1:], names[1:]): + new_label += 1 + left_elements = (left_atlas == label).sum() + right_elements = (right_atlas == label).sum() + n_elements = float(left_elements + right_elements) + if (left_elements / n_elements < 0.05 or + right_elements / n_elements < 0.05): + new_atlas[atlas == label] = new_label + new_names.append(name) + continue + new_atlas[right_atlas == label] = new_label + new_names.append(name + ', left part') + new_label += 1 + new_atlas[left_atlas == label] = new_label + new_names.append(name + ', right part') - atlas_img = new_img_like(atlas_img, atlas, atlas_img.get_affine()) + atlas_img = new_img_like(atlas_img, new_atlas, atlas_img.affine) return Bunch(maps=atlas_img, labels=new_names) @@ -294,8 +317,14 @@ def fetch_atlas_msdl(data_dir=None, url=None, resume=True, verbose=1): ------- data: sklearn.datasets.base.Bunch Dictionary-like object, the interest attributes are : - - 'labels': str. Path to csv file containing labels. - - 'maps': str. path to nifti file containing regions definition. + + - 'maps': str, path to nifti file containing regions definition. + - 'labels': string list containing the labels of the regions. + - 'region_coords': tuple list (x, y, z) containing coordinates + of each region in MNI space. + - 'networks': string list containing names of the networks. + - 'description': description about the atlas. + References ---------- @@ -325,12 +354,21 @@ def fetch_atlas_msdl(data_dir=None, url=None, resume=True, verbose=1): data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) files = _fetch_files(data_dir, files, resume=resume, verbose=verbose) + csv_data = np.recfromcsv(files[0]) + labels = [name.strip() for name in csv_data['name'].tolist()] + labels = [label.decode("utf-8") for label in labels] + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', module='numpy', + category=FutureWarning) + region_coords = csv_data[['x', 'y', 'z']].tolist() + net_names = [net_name.strip() for net_name in csv_data['net_name'].tolist()] fdescr = _get_dataset_descr(dataset_name) - return Bunch(labels=files[0], maps=files[1], description=fdescr) + return Bunch(maps=files[1], labels=labels, region_coords=region_coords, + networks=net_names, description=fdescr) -def fetch_atlas_power_2011(): +def fetch_coords_power_2011(): """Download and load the Power et al. brain atlas composed of 264 ROIs. Returns @@ -364,9 +402,9 @@ def fetch_atlas_smith_2009(data_dir=None, mirror='origin', url=None, Path of the data directory. Used to force data storage in a non- standard location. Default: None (meaning: default) mirror: string, optional - By default, the dataset is downloaded from the original website of the atlas. - Specifying "nitrc" will force download from a mirror, with potentially - higher bandwith. + By default, the dataset is downloaded from the original website of the + atlas. Specifying "nitrc" will force download from a mirror, with + potentially higher bandwith. url: string, optional Download URL of the dataset. Overwrite the default URL. @@ -405,11 +443,11 @@ def fetch_atlas_smith_2009(data_dir=None, mirror='origin', url=None, Notes ----- For more information about this dataset's structure: - http://www.fmrib.ox.ac.uk/analysis/brainmap+rsns/ + http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/ """ if url is None: if mirror == 'origin': - url = "http://www.fmrib.ox.ac.uk/analysis/brainmap+rsns/" + url = "http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/" elif mirror == 'nitrc': url = [ 'https://www.nitrc.org/frs/download.php/7730/', @@ -562,10 +600,9 @@ def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True, data: sklearn.datasets.base.Bunch dictionary-like object, keys are: - - "regions": str. path to nifti file containing regions. + - "maps": str. path to nifti file containing regions. - - "labels": dict. labels dictionary with their region id as key and - name as value + - "labels": list of the names of the regions Notes ----- @@ -608,11 +645,543 @@ def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True, # We return the labels contained in the xml file as a dictionary xml_tree = xml.etree.ElementTree.parse(labels_file) root = xml_tree.getroot() - labels_dict = {} + labels = [] + indices = [] for label in root.getiterator('label'): - labels_dict[label.find('index').text] = label.find('name').text + indices.append(label.find('index').text) + labels.append(label.find('name').text) - params = {'description': fdescr, 'regions': atlas_img, - 'labels': labels_dict} + params = {'description': fdescr, 'maps': atlas_img, + 'labels': labels, 'indices': indices} return Bunch(**params) + + +def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, + resume=True, verbose=1): + """Downloads and loads multiscale functional brain parcellations + + This atlas includes group brain parcellations generated from + resting-state functional magnetic resonance images from about + 200 young healthy subjects. + + Multiple scales (number of networks) are available, among + 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations + have been generated using a method called bootstrap analysis of + stable clusters called as BASC, (Bellec et al., 2010) and the + scales have been selected using a data-driven method called MSTEPS + (Bellec, 2013). + + Note that two versions of the template are available, 'sym' or 'asym'. + The 'asym' type contains brain images that have been registered in the + asymmetric version of the MNI brain template (reflecting that the brain + is asymmetric), while the 'sym' type contains images registered in the + symmetric version of the MNI template. The symmetric template has been + forced to be symmetric anatomically, and is therefore ideally suited to + study homotopic functional connections in fMRI: finding homotopic regions + simply consists of flipping the x-axis of the template. + + .. versionadded:: 0.2.3 + + Parameters + ---------- + version: str, optional + Available versions are 'sym' or 'asym'. By default all scales of + brain parcellations of version 'sym' will be returned. + + data_dir: str, optional + directory where data should be downloaded and unpacked. + + url: str, optional + url of file to download. + + resume: bool + whether to resumed download of a partly-downloaded file. + + verbose: int + verbosity level (0 means no message). + + Returns + ------- + data: sklearn.datasets.base.Bunch + dictionary-like object, Keys are: + + - "scale007", "scale012", "scale020", "scale036", "scale064", + "scale122", "scale197", "scale325", "scale444": str, path + to Nifti file of various scales of brain parcellations. + + - "description": details about the data release. + + References + ---------- + Bellec P, Rosa-Neto P, Lyttelton OC, Benali H, Evans AC, Jul. 2010. + Multi-level bootstrap analysis of stable clusters in resting-state fMRI. + NeuroImage 51 (3), 1126-1139. + URL http://dx.doi.org/10.1016/j.neuroimage.2010.02.082 + + Bellec P, Jun. 2013. Mining the Hierarchy of Resting-State Brain Networks: + Selection of Representative Clusters in a Multiscale Structure. + Pattern Recognition in Neuroimaging (PRNI), 2013 pp. 54-57. + + Notes + ----- + For more information on this dataset's structure, see + https://figshare.com/articles/basc/1285615 + """ + versions = ['sym', 'asym'] + if version not in versions: + raise ValueError('The version of Brain parcellations requested "%s" ' + 'does not exist. Please choose one among them %s.' % + (version, str(versions))) + + keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', + 'scale122', 'scale197', 'scale325', 'scale444'] + + if version == 'sym': + url = "https://ndownloader.figshare.com/files/1861819" + elif version == 'asym': + url = "https://ndownloader.figshare.com/files/1861820" + opts = {'uncompress': True} + + dataset_name = "basc_multiscale_2015" + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + + folder_name = 'template_cambridge_basc_multiscale_nii_' + version + basenames = ['template_cambridge_basc_multiscale_' + version + + '_' + key + '.nii.gz' for key in keys] + + filenames = [(os.path.join(folder_name, basename), url, opts) + for basename in basenames] + data = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose) + + descr = _get_dataset_descr(dataset_name) + + params = dict(zip(keys, data)) + params['description'] = descr + + return Bunch(**params) + + +def fetch_coords_dosenbach_2010(ordered_regions=True): + """Load the Dosenbach et al. 160 ROIs. These ROIs cover + much of the cerebral cortex and cerebellum and are assigned to 6 + networks. + + Parameters + ---------- + ordered_regions : bool, optional + ROIs from same networks are grouped together and ordered with respect + to their names and their locations (anterior to posterior). + + Returns + ------- + data: sklearn.datasets.base.Bunch + dictionary-like object, contains: + - "rois": coordinates of 160 ROIs in MNI space + - "labels": ROIs labels + - "networks": networks names + + References + ---------- + Dosenbach N.U., Nardos B., et al. "Prediction of individual brain maturity + using fMRI.", 2010, Science 329, 1358-1361. + """ + dataset_name = 'dosenbach_2010' + fdescr = _get_dataset_descr(dataset_name) + package_directory = os.path.dirname(os.path.abspath(__file__)) + csv = os.path.join(package_directory, "data", "dosenbach_2010.csv") + out_csv = np.recfromcsv(csv) + + if ordered_regions: + out_csv = np.sort(out_csv, order=['network', 'name', 'y']) + + # We add the ROI number to its name, since names are not unique + names = out_csv['name'] + numbers = out_csv['number'] + labels = np.array(['{0} {1}'.format(name, number) for (name, number) in + zip(names, numbers)]) + params = dict(rois=out_csv[['x', 'y', 'z']], + labels=labels, + networks=out_csv['network'], description=fdescr) + + return Bunch(**params) + + +def fetch_atlas_allen_2011(data_dir=None, url=None, resume=True, verbose=1): + """Download and return file names for the Allen and MIALAB ICA atlas + (dated 2011). + + The provided images are in MNI152 space. + + Parameters + ---------- + data_dir: str, optional + directory where data should be downloaded and unpacked. + url: str, optional + url of file to download. + resume: bool + whether to resumed download of a partly-downloaded file. + verbose: int + verbosity level (0 means no message). + + Returns + ------- + data: sklearn.datasets.base.Bunch + dictionary-like object, keys are: + + - "maps": T-maps of all 75 unthresholded components. + - "rsn28": T-maps of 28 RSNs included in E. Allen et al. + - "networks": string list containing the names for the 28 RSNs. + - "rsn_indices": dict[rsn_name] -> list of int, indices in the "maps" + file of the 28 RSNs. + - "comps": The aggregate ICA Components. + - "description": details about the data release. + + References + ---------- + E. Allen, et al, "A baseline for the multivariate comparison of resting + state networks," Frontiers in Systems Neuroscience, vol. 5, p. 12, 2011. + + Notes + ----- + Licence: unknown + + See http://mialab.mrn.org/data/index.html for more information + on this dataset. + """ + if url is None: + url = "http://mialab.mrn.org/data/hcp/" + + dataset_name = "allen_rsn_2011" + keys = ("maps", + "rsn28", + "comps") + + opts = {} + files = ["ALL_HC_unthresholded_tmaps.nii", + "RSN_HC_unthresholded_tmaps.nii", + "rest_hcp_agg__component_ica_.nii"] + + labels = [('Basal Ganglia', [21]), + ('Auditory', [17]), + ('Sensorimotor', [7, 23, 24, 38, 56, 29]), + ('Visual', [46, 64, 67, 48, 39, 59]), + ('Default-Mode', [50, 53, 25, 68]), + ('Attentional', [34, 60, 52, 72, 71, 55]), + ('Frontal', [42, 20, 47, 49])] + + networks = [[name] * len(idxs) for name, idxs in labels] + + filenames = [(f, url + f, opts) for f in files] + + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + sub_files = _fetch_files(data_dir, filenames, resume=resume, + verbose=verbose) + + fdescr = _get_dataset_descr(dataset_name) + + params = [('description', fdescr), + ('rsn_indices', labels), + ('networks', networks)] + params.extend(list(zip(keys, sub_files))) + + return Bunch(**dict(params)) + + +def fetch_atlas_surf_destrieux(data_dir=None, url=None, + resume=True, verbose=1): + """Download and load Destrieux et al, 2010 cortical atlas. + + This atlas returns 76 labels per hemisphere based on sulco-gryal pattnerns + as distributed with Freesurfer in fsaverage5 surface space. + + .. versionadded:: 0.3 + + Parameters + ---------- + data_dir: str, optional + Path of the data directory. Use to force data storage in a non- + standard location. Default: None + + url: str, optional + Download URL of the dataset. Overwrite the default URL. + + resume: bool, optional (default True) + If True, try resuming download if possible. + + verbose: int, optional (default 1) + Defines the level of verbosity of the output. + + Returns + ------- + data: sklearn.datasets.base.Bunch + dictionary-like object, contains: + + - "labels": list + Contains region labels + + - "map_left": numpy.ndarray + Index into 'labels' for each vertex on the + left hemisphere of the fsaverage5 surface + + - "map_right": numpy.ndarray + Index into 'labels' for each vertex on the + right hemisphere of the fsaverage5 surface + + - "description": str + Details about the dataset + + + References + ---------- + Destrieux et al. (2010), Automatic parcellation of human cortical gyri and + sulci using standard anatomical nomenclature. NeuroImage 53, 1-15. + """ + + if url is None: + url = "https://www.nitrc.org/frs/download.php/" + + dataset_name = 'destrieux_surface' + fdescr = _get_dataset_descr(dataset_name) + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + + # Download annot files, fsaverage surfaces and sulcal information + annot_file = '%s.aparc.a2009s.annot' + annot_url = url + '%i/%s.aparc.a2009s.annot' + annot_nids = {'lh annot': 9343, 'rh annot': 9342} + + annots = [] + for hemi in [('lh', 'left'), ('rh', 'right')]: + + annot = _fetch_files(data_dir, + [(annot_file % (hemi[1]), + annot_url % (annot_nids['%s annot' % hemi[0]], + hemi[0]), + {'move': annot_file % (hemi[1])})], + resume=resume, verbose=verbose)[0] + annots.append(annot) + + annot_left = nb.freesurfer.read_annot(annots[0]) + annot_right = nb.freesurfer.read_annot(annots[1]) + + return Bunch(labels=annot_left[2], map_left=annot_left[0], + map_right=annot_right[0], description=fdescr) + + +def _separate_talairach_levels(atlas_img, labels, verbose=1): + """Separate the multiple annotation levels in talairach raw atlas. + + The Talairach atlas has five levels of annotation: hemisphere, lobe, gyrus, + tissue, brodmann area. They are mixed up in the original atlas: each label + in the atlas corresponds to a 5-tuple containing, for each of these levels, + a value or the string '*' (meaning undefined, background). + + This function disentangles the levels, and stores each on an octet in an + int64 image (the level with most labels, ba, has 72 labels). + This way, any subset of these levels can be accessed by applying a bitwise + mask. + + In the created image, the least significant octet contains the hemisphere, + the next one the lobe, then gyrus, tissue, and ba. Background is 0. + The labels contain + [('level name', ['labels', 'for', 'this', 'level' ...]), ...], + where the levels are in the order mentionned above. + + The label '*' is replaced by 'Background' for clarity. + + """ + labels = np.asarray(labels) + if verbose: + print( + 'Separating talairach atlas levels: {}'.format(_TALAIRACH_LEVELS)) + levels = [] + new_img = np.zeros(atlas_img.shape, dtype=np.int64) + for pos, level in enumerate(_TALAIRACH_LEVELS): + if verbose: + print(level) + level_img = np.zeros(atlas_img.shape, dtype=np.int64) + level_labels = {'*': 0} + for region_nb, region in enumerate(labels[:, pos]): + level_labels.setdefault(region, len(level_labels)) + level_img[atlas_img.get_data() == region_nb] = level_labels[ + region] + # shift this level to its own octet and add it to the new image + level_img <<= 8 * pos + new_img |= level_img + # order the labels so that image values are indices in the list of + # labels for each level + level_labels = list(list( + zip(*sorted(level_labels.items(), key=lambda t: t[1])))[0]) + # rename '*' -> 'Background' + level_labels[0] = 'Background' + levels.append((level, level_labels)) + new_img = new_img_like(atlas_img, data=new_img) + return new_img, levels + + +def _get_talairach_all_levels(data_dir=None, verbose=1): + """Get the path to Talairach atlas and labels + + The atlas is downloaded and the files are created if necessary. + + The image contains all five levels of the atlas, each encoded on 8 bits + (least significant octet contains the hemisphere, the next one the lobe, + then gyrus, tissue, and ba). + + The labels json file contains + [['level name', ['labels', 'for', 'this', 'level' ...]], ...], + where the levels are in the order mentionned above. + + """ + data_dir = _get_dataset_dir( + 'talairach_atlas', data_dir=data_dir, verbose=verbose) + img_file = os.path.join(data_dir, 'talairach.nii') + labels_file = os.path.join(data_dir, 'talairach_labels.json') + if os.path.isfile(img_file) and os.path.isfile(labels_file): + return img_file, labels_file + atlas_url = 'http://www.talairach.org/talairach.nii' + temp_dir = mkdtemp() + try: + temp_file = _fetch_files( + temp_dir, [('talairach.nii', atlas_url, {})], verbose=verbose)[0] + atlas_img = nb.load(temp_file, mmap=False) + atlas_img = check_niimg(atlas_img) + finally: + shutil.rmtree(temp_dir) + labels = atlas_img.header.extensions[0].get_content() + labels = labels.strip().decode('utf-8').split('\n') + labels = [l.split('.') for l in labels] + new_img, level_labels = _separate_talairach_levels( + atlas_img, labels, verbose=verbose) + new_img.to_filename(img_file) + with open(labels_file, 'w') as fp: + json.dump(level_labels, fp) + return img_file, labels_file + + +def fetch_atlas_talairach(level_name, data_dir=None, verbose=1): + """Download the Talairach atlas. + + .. versionadded:: 0.4.0 + + Parameters + ---------- + level_name : {'hemisphere', 'lobe', 'gyrus', 'tissue', 'ba'} + Which level of the atlas to use: the hemisphere, the lobe, the gyrus, + the tissue type or the Brodmann area. + + data_dir : str, optional (default=None) + Path of the data directory. Used to force data storage in a specified + location. + + verbose : int + verbosity level (0 means no message). + + Returns + ------- + sklearn.datasets.base.Bunch + Dictionary-like object, contains: + + - maps: 3D Nifti image, values are indices in the list of labels. + - labels: list of strings. Starts with 'Background'. + - description: a short description of the atlas and some references. + + References + ---------- + http://talairach.org/about.html#Labels + + `Lancaster JL, Woldorff MG, Parsons LM, Liotti M, Freitas CS, Rainey L, + Kochunov PV, Nickerson D, Mikiten SA, Fox PT, "Automated Talairach Atlas + labels for functional brain mapping". Human Brain Mapping 10:120-131, + 2000.` + + `Lancaster JL, Rainey LH, Summerlin JL, Freitas CS, Fox PT, Evans AC, Toga + AW, Mazziotta JC. Automated labeling of the human brain: A preliminary + report on the development and evaluation of a forward-transform method. Hum + Brain Mapp 5, 238-242, 1997.` + """ + if level_name not in _TALAIRACH_LEVELS: + raise ValueError('"level_name" should be one of {}'.format( + _TALAIRACH_LEVELS)) + position = _TALAIRACH_LEVELS.index(level_name) + atlas_file, labels_file = _get_talairach_all_levels(data_dir, verbose) + atlas_img = check_niimg(atlas_file) + with open(labels_file) as fp: + labels = json.load(fp)[position][1] + level_data = (atlas_img.get_data() >> 8 * position) & 255 + atlas_img = new_img_like(atlas_img, data=level_data) + description = _get_dataset_descr( + 'talairach_atlas').decode('utf-8').format(level_name) + return Bunch(maps=atlas_img, labels=labels, description=description) + + +def fetch_atlas_pauli_2017(version='prob', data_dir=None, verbose=1): + """Download the Pauli et al. (2017) atlas with in total + 12 subcortical nodes. + + Parameters + ---------- + + version: str, optional (default='prob') + Which version of the atlas should be download. This can be 'prob' + for the probabilistic atlas or 'det' for the deterministic atlas. + + data_dir : str, optional (default=None) + Path of the data directory. Used to force data storage in a specified + location. + + verbose : int + verbosity level (0 means no message). + + Returns + ------- + sklearn.datasets.base.Bunch + Dictionary-like object, contains: + + - maps: 3D Nifti image, values are indices in the list of labels. + - labels: list of strings. Starts with 'Background'. + - description: a short description of the atlas and some references. + + References + ---------- + https://osf.io/r2hvk/ + + `Pauli, W. M., Nili, A. N., & Tyszka, J. M. (2018). A high-resolution + probabilistic in vivo atlas of human subcortical brain nuclei. + Scientific Data, 5, 180063-13. http://doi.org/10.1038/sdata.2018.63`` + """ + + if version == 'prob': + url_maps = 'https://osf.io/w8zq2/download' + filename = 'pauli_2017_labels.nii.gz' + elif version == 'labels': + url_maps = 'https://osf.io/5mqfx/download' + filename = 'pauli_2017_prob.nii.gz' + else: + raise NotImplementedError('{} is no valid version for '.format(version) + \ + 'the Pauli atlas') + + url_labels = 'https://osf.io/6qrcb/download' + dataset_name = 'pauli_2017' + + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + + files = [(filename, + url_maps, + {'move':filename}), + ('labels.txt', + url_labels, + {'move':'labels.txt'})] + atlas_file, labels = _fetch_files(data_dir, files) + + labels = np.loadtxt(labels, dtype=str)[:, 1].tolist() + + fdescr = _get_dataset_descr(dataset_name) + + return Bunch(maps=atlas_file, + labels=labels, + description=fdescr) diff --git a/nilearn/datasets/data/dosenbach_2010.csv b/nilearn/datasets/data/dosenbach_2010.csv new file mode 100644 index 0000000000..e6de285980 --- /dev/null +++ b/nilearn/datasets/data/dosenbach_2010.csv @@ -0,0 +1,161 @@ +number,x,y,z,name,network +1,6,64,3,vmPFC,default +2,29,57,18,aPFC,fronto-parietal +3,-29,57,10,aPFC,fronto-parietal +4,0,51,32,mPFC,default +5,-25,51,27,aPFC,default +6,9,51,16,vmPFC,default +7,-6,50,-1,vmPFC,default +8,27,49,26,aPFC,cingulo-opercular +9,42,48,-3,vent aPFC,fronto-parietal +10,-43,47,2,vent aPFC,fronto-parietal +11,-11,45,17,vmPFC,default +12,39,42,16,vlPFC,fronto-parietal +13,8,42,-5,vmPFC,default +14,9,39,20,ACC,default +15,46,39,-15,vlPFC,default +16,40,36,29,dlPFC,fronto-parietal +17,23,33,47,sup frontal,default +18,34,32,7,vPFC,cingulo-opercular +19,-2,30,27,ACC,cingulo-opercular +20,-16,29,54,sup frontal,default +21,-1,28,40,ACC,fronto-parietal +22,46,28,31,dlPFC,fronto-parietal +23,-52,28,17,vPFC,fronto-parietal +24,-44,27,33,dlPFC,fronto-parietal +25,51,23,8,vFC,cingulo-opercular +26,38,21,-1,ant insula,cingulo-opercular +27,9,20,34,dACC,cingulo-opercular +28,-36,18,2,ant insula,cingulo-opercular +29,40,17,40,dFC,fronto-parietal +30,-6,17,34,basal ganglia,cingulo-opercular +31,0,15,45,mFC,cingulo-opercular +32,58,11,14,frontal,sensorimotor +33,-46,10,14,vFC,cingulo-opercular +34,44,8,34,dFC,fronto-parietal +35,60,8,34,dFC,sensorimotor +36,-42,7,36,dFC,fronto-parietal +37,-55,7,23,vFC,sensorimotor +38,-20,6,7,basal ganglia,cingulo-opercular +39,14,6,7,basal ganglia,cingulo-opercular +40,-48,6,1,vFC,cingulo-opercular +41,10,5,51,pre-SMA,sensorimotor +42,43,1,12,vFC,sensorimotor +43,0,-1,52,SMA,sensorimotor +44,37,-2,-3,mid insula,cingulo-opercular +45,53,-3,32,frontal,sensorimotor +46,58,-3,17,precentral gyrus,sensorimotor +47,-12,-3,13,thalamus,cingulo-opercular +48,-42,-3,11,mid insula,sensorimotor +49,-44,-6,49,precentral gyrus,sensorimotor +50,-26,-8,54,parietal,sensorimotor +51,46,-8,24,precentral gyrus,sensorimotor +52,-54,-9,23,precentral gyrus,sensorimotor +53,44,-11,38,precentral gyrus,sensorimotor +54,-47,-12,36,parietal,sensorimotor +55,33,-12,16,mid insula,sensorimotor +56,-36,-12,15,mid insula,sensorimotor +57,-12,-12,6,thalamus,cingulo-opercular +58,11,-12,6,thalamus,cingulo-opercular +59,32,-12,2,mid insula,cingulo-opercular +60,59,-13,8,temporal,sensorimotor +61,-30,-14,1,mid insula,cingulo-opercular +62,-38,-15,59,parietal,sensorimotor +63,52,-15,-13,inf temporal,default +64,-47,-18,50,parietal,sensorimotor +65,46,-20,45,parietal,sensorimotor +66,-55,-22,38,parietal,sensorimotor +67,-54,-22,22,precentral gyrus,sensorimotor +68,-54,-22,9,temporal,sensorimotor +69,41,-23,55,parietal,sensorimotor +70,42,-24,17,post insula,sensorimotor +71,11,-24,2,basal ganglia,cingulo-opercular +72,-59,-25,-15,inf temporal,default +73,1,-26,31,post cingulate,default +74,18,-27,62,parietal,sensorimotor +75,-38,-27,60,parietal,sensorimotor +76,-30,-28,9,post insula,cingulo-opercular +77,-24,-30,64,parietal,sensorimotor +78,51,-30,5,temporal,cingulo-opercular +79,-41,-31,48,post parietal,sensorimotor +80,-4,-31,-4,post cingulate,cingulo-opercular +81,54,-31,-18,fusiform,cingulo-opercular +82,-41,-37,16,temporal,sensorimotor +83,-53,-37,13,temporal,sensorimotor +84,28,-37,-15,fusiform,default +85,-3,-38,45,precuneus,default +86,34,-39,65,sup parietal,sensorimotor +87,8,-40,50,precuneus,cingulo-opercular +88,-41,-40,42,IPL,fronto-parietal +89,58,-41,20,parietal,cingulo-opercular +90,-8,-41,3,post cingulate,default +91,-61,-41,-2,inf temporal,default +92,-28,-42,-11,occipital,default +93,-5,-43,25,post cingulate,default +94,9,-43,25,precuneus,default +95,43,-43,8,temporal,cingulo-opercular +96,54,-44,43,IPL,fronto-parietal +97,-55,-44,30,parietal,cingulo-opercular +98,-28,-44,-25,lat cerebellum,cerebellum +99,-35,-46,48,post parietal,fronto-parietal +100,42,-46,21,sup temporal,cingulo-opercular +101,-48,-47,49,IPL,fronto-parietal +102,-41,-47,29,angular gyrus,cingulo-opercular +103,-59,-47,11,temporal,cingulo-opercular +104,-53,-50,39,IPL,fronto-parietal +105,5,-50,33,precuneus,default +106,-18,-50,1,occipital,occipital +107,44,-52,47,IPL,fronto-parietal +108,-5,-52,17,post cingulate,default +109,-24,-54,-21,lat cerebellum,cerebellum +110,-37,-54,-37,inf cerebellum,cerebellum +111,10,-55,17,post cingulate,default +112,-6,-56,29,precuneus,default +113,-34,-57,-24,lat cerebellum,cerebellum +114,-32,-58,46,IPS,fronto-parietal +115,-11,-58,17,post cingulate,default +116,32,-59,41,IPS,fronto-parietal +117,51,-59,34,angular gyrus,default +118,-34,-60,-5,occipital,occipital +119,36,-60,-8,occipital,occipital +120,-6,-60,-15,med cerebellum,cerebellum +121,-25,-60,-34,inf cerebellum,cerebellum +122,32,-61,-31,inf cerebellum,cerebellum +123,46,-62,5,temporal,occipital +124,-48,-63,35,angular gyrus,default +125,-52,-63,15,TPJ,cingulo-opercular +126,-44,-63,-7,occipital,occipital +127,-16,-64,-21,med cerebellum,cerebellum +128,21,-64,-22,lat cerebellum,cerebellum +129,19,-66,-1,occipital,occipital +130,1,-66,-24,med cerebellum,cerebellum +131,-34,-67,-29,inf cerebellum,cerebellum +132,11,-68,42,precuneus,default +133,17,-68,20,occipital,occipital +134,-36,-69,40,IPS,default +135,39,-71,13,occipital,occipital +136,-9,-72,41,occipital,default +137,45,-72,29,occipital,default +138,-11,-72,-14,med cerebellum,cerebellum +139,29,-73,29,occipital,occipital +140,33,-73,-30,inf cerebellum,cerebellum +141,-2,-75,32,occipital,default +142,-29,-75,28,occipital,occipital +143,5,-75,-11,med cerebellum,cerebellum +144,14,-75,-21,med cerebellum,cerebellum +145,-16,-76,33,occipital,occipital +146,-42,-76,26,occipital,default +147,9,-76,14,occipital,occipital +148,15,-77,32,occipital,occipital +149,20,-78,-2,occipital,occipital +150,-21,-79,-33,inf cerebellum,cerebellum +151,-6,-79,-33,inf cerebellum,cerebellum +152,-5,-80,9,post occipital,occipital +153,29,-81,14,post occipital,occipital +154,33,-81,-2,post occipital,occipital +155,18,-81,-33,inf cerebellum,cerebellum +156,-37,-83,-2,post occipital,occipital +157,-29,-88,8,post occipital,occipital +158,13,-91,2,post occipital,occipital +159,27,-91,2,post occipital,occipital +160,-4,-94,12,post occipital,occipital diff --git a/doc/sphinxext/numpy_ext/__init__.py b/nilearn/datasets/data/fsaverage5/__init__.py similarity index 100% rename from doc/sphinxext/numpy_ext/__init__.py rename to nilearn/datasets/data/fsaverage5/__init__.py diff --git a/nilearn/datasets/data/fsaverage5/pial.left.gii.gz b/nilearn/datasets/data/fsaverage5/pial.left.gii.gz new file mode 100644 index 0000000000..b5db6f4085 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/pial.left.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/pial.right.gii.gz b/nilearn/datasets/data/fsaverage5/pial.right.gii.gz new file mode 100644 index 0000000000..16c0b350c7 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/pial.right.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/pial_inflated.left.gii.gz b/nilearn/datasets/data/fsaverage5/pial_inflated.left.gii.gz new file mode 100644 index 0000000000..0f2c47edb9 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/pial_inflated.left.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/pial_inflated.right.gii.gz b/nilearn/datasets/data/fsaverage5/pial_inflated.right.gii.gz new file mode 100644 index 0000000000..5f6820a933 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/pial_inflated.right.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/sulc.left.gii.gz b/nilearn/datasets/data/fsaverage5/sulc.left.gii.gz new file mode 100644 index 0000000000..c8666e2a37 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/sulc.left.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/sulc.right.gii.gz b/nilearn/datasets/data/fsaverage5/sulc.right.gii.gz new file mode 100644 index 0000000000..2ef29aa34f Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/sulc.right.gii.gz differ diff --git a/nilearn/datasets/description/Megatrawls.rst b/nilearn/datasets/description/Megatrawls.rst new file mode 100644 index 0000000000..62a3283f31 --- /dev/null +++ b/nilearn/datasets/description/Megatrawls.rst @@ -0,0 +1,61 @@ +MegaTrawls Network Matrices HCP + + +Notes +----- +Contains network matrices data of two types, full correlation and partial +correlation which were estimated using each subject specific timeseries +signals extracted from group of ICA nodes or parcellations. In total, +461 functional connectivity datasets were used to obtain these matrices +and is part of HCP Megatrawls release. + +The number of nodes available for download are 25, 50, 100, 200, 300 +with combination of two variants of timeseries extraction methods, +multiple spatial regression (ts2) and eigen regression (ts3). + +These matrices can be used to predict the relationships between subjects +functional connectivity datasets and their behavioural measures. Both can be +downloaded from HCP connectome website under conditions. See disclaimer below. + + +Content +------- + :'dimensions': contains given input in dimensions used in fetching data. + :'timeseries': contains given specific timeseries method used in fetching data. + :'matrices': contains given specific type of matrices name. + :'correlation_matrices': contains correlation network matrices data. + + +References +---------- +For more technical details about predicting the measures, refer to: +Stephen Smith et al, HCP beta-release of the Functional Connectivity MegaTrawl. +April 2015 "HCP500-MegaTrawl" release. +https://db.humanconnectome.org/megatrawl/ + + +Disclaimer +---------- +IMPORTANT: This is open access data. You must agree to Terms and conditions +of using this data before using it, available at: +http://humanconnectome.org/data/data-use-terms/open-access.html + +Open Access Data (all imaging data and most of the behavioral data) +is available to those who register an account at ConnectomeDB and agree to +the Open Access Data Use Terms. This includes agreement to comply with +institutional rules and regulations. This means you may need the approval +of your IRB or Ethics Committee to use the data. The released HCP data are +not considered de-identified, since certain combinations of HCP Restricted +Data (available through a separate process) might allow identification of +individuals. Different national, state and local laws may apply and be +interpreted differently, so it is important that you consult with your IRB +or Ethics Committee before beginning your research. If needed and upon +request, the HCP will provide a certificate stating that you have accepted the +HCP Open Access Data Use Terms. Please note that everyone who works with HCP +open access data must review and agree to these terms, including those who are +accessing shared copies of this data. If you are sharing HCP Open Access data, +please advice your co-researchers that they must register with ConnectomeDB +and agree to these terms. + +Register and sign the Open Access Data Use Terms at +ConnectomeDB: https://db.humanconnectome.org/ diff --git a/nilearn/datasets/description/allen_rsn_2011.rst b/nilearn/datasets/description/allen_rsn_2011.rst new file mode 100644 index 0000000000..272627fc9e --- /dev/null +++ b/nilearn/datasets/description/allen_rsn_2011.rst @@ -0,0 +1,31 @@ +Allen 2011 + + +Notes +----- +Collection of resting-state network templates extracted from 600 healthy +subjects fMRI data. + + +Content +------- + :"maps": T-maps of all 75 unthresholded components. + :"rsn28": T-maps of 28 RSNs included in E. Allen et al. + :"networks": string list containing the names for the 28 RSNs. + :"rsn_indices": dict[rsn_name] -> list of int, indices in the "maps" + file of the 28 RSNs. + :"comps": The aggregate ICA Components. + +References +---------- +For more information on this dataset's structure, see: +http://mialab.mrn.org/data/index.html + +Resting state data from paper E. Allen, et al, "A baseline for the multivariate +comparison of resting state networks," Frontiers in Systems Neuroscience, +vol. 5, p. 12, 2011. http://dx.doi.org/10.3389/fnsys.2011.00002 + +Slices and label of the RSN ICs: +http://www.frontiersin.org/files/Articles/2093/fnsys-05-00002-HTML/image_m/fnsys-05-00002-g004.jpg + +Licence: unknown. diff --git a/nilearn/datasets/description/basc_multiscale_2015.rst b/nilearn/datasets/description/basc_multiscale_2015.rst new file mode 100644 index 0000000000..3de652cc9a --- /dev/null +++ b/nilearn/datasets/description/basc_multiscale_2015.rst @@ -0,0 +1,133 @@ +An atlas of multiscale brain parcellations + + +Content +------- +This work is a derivative from the Cambridge sample found in the [1000 +functional connectome project] +(http://fcon_1000.projects.nitrc.org/fcpClassic/FcpTable.html) (Liu et +al., 2009), originally released under Creative Commons -- Attribution +Non-Commercial. It includes group brain parcellations generated from +resting-state functional magnetic resonance images for about 200 young +healthy subjects. Multiple scales (number of networks) are available, +and includes 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations +have been generated using a method called bootstrap analysis of stable clusters +(BASC, Bellec et al., 2010) and the scales have been selected using a data-driven +method called MSTEPS (Bellec, 2013). + + +This release more specifically contains the following files: + :'description': a markdown (text) description of the release. + :'scale007', 'scale012', 'scale020', 'scale036', 'scale064', + 'scale122', 'scale197', 'scale325', 'scale444' +brain_parcellation_cambridge_basc_multiscale_(sym,asym)_scale(NNN).nii.gz: +a 3D volume .nii format at 3 mm isotropic resolution, in the MNI non-linear +2009a space (http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009). +Region number I is filled with Is (background is filled with 0s). + + +Note that two versions of the template are available, ending with either +nii_sym or nii_asym. The asym flavor contains brain images that have been +registered in the asymmetric version of the MNI brain template (reflecting +that the brain is asymmetric), while with the sym flavor they have been +registered in the symmetric version of the MNI template. The symmetric +template has been forced to be symmetric anatomically, and is therefore +ideally suited to study homotopic functional connections in fMRI: finding +homotopic regions simply consists of flipping the x-axis of the template. + + +Preprocessing +------------- +The datasets were analysed using the NeuroImaging Analysis Kit (NIAK +https://github.com/SIMEXP/niak) version 0.12.14, under CentOS version 6.3 with +Octave (http://gnu.octave.org) version 3.8.1 and the Minc toolkit +(http://www.bic.mni.mcgill.ca/ServicesSoftware/ServicesSoftwareMincToolKit) +version 0.3.18. +Each fMRI dataset was corrected for inter-slice difference in acquisition time +and the parameters of a rigid-body motion were estimated for each time frame. +Rigid-body motion was estimated within as well as between runs, using the +median volume of the first run as a target. The median volume of one selected +fMRI run for each subject was coregistered with a T1 individual scan using +Minctracc (Collins and Evans, 1998), which was itself non-linearly transformed +to the Montreal Neurological Institute (MNI) template (Fonov et al., 2011) +using the CIVET pipeline (Ad-Dabbagh et al., 2006). The MNI symmetric template +was generated from the ICBM152 sample of 152 young adults, after 40 iterations +of non-linear coregistration. The rigid-body transform, fMRI-to-T1 transform +and T1-to-stereotaxic transform were all combined, and the functional volumes +were resampled in the MNI space at a 3 mm isotropic resolution. The +"scrubbing" method of (Power et al., 2012), was used to remove the volumes +with excessive motion (frame displacement greater than 0.5 mm). A minimum +number of 60 unscrubbed volumes per run, corresponding to ~180 s of +acquisition, was then required for further analysis. The following nuisance +parameters were regressed out from the time series at each voxel: slow time +drifts (basis of discrete cosines with a 0.01 Hz high-pass cut-off), average +signals in conservative masks of the white matter and the lateral ventricles +as well as the first principal components (95% energy) of the +six rigid-body motion parameters and their squares (Giove et al., 2009). The +fMRI volumes were finally spatially smoothed with a 6 mm isotropic Gaussian +blurring kernel. + + +Bootstrap Analysis of Stable Clusters +------------------------------------- +Brain parcellations were derived using BASC (Bellec et al. 2010). A region +growing algorithm was first applied to reduce the brain into regions of +roughly equal size, set to 1000 mm3. The BASC used 100 replications of a +hierarchical clustering with Ward's criterion on resampled individual time +series, using circular block bootstrap. A consensus clustering (hierarchical +with Ward's criterion) was generated across all the individual clustering +replications pooled together, hence generating group clusters. The generation +of group clusters was itself replicated by bootstraping subjects 500 times, +and a (final) consensus clustering (hierarchical Ward's criterion) was +generated on the replicated group clusters. The MSTEPS procedure (Bellec et +al., 2013) was implemented to select a data-driven subset of scales in the +range 5-500, approximating the group stability matrices up to 5% residual +energy, through linear interpolation over selected scales. Note that the +number of scales itself was selected by the MSTEPS procedure in a data-driven +fashion, and that the number of individual, group and final (consensus) number +of clusters were not necessarily identical. + + +References +---------- +Ad-Dabbagh Y, Einarson D, Lyttelton O, Muehlboeck J S, Mok K, +Ivanov O, Vincent R D, Lepage C, Lerch J, Fombonne E, Evans A C, +2006. The CIVET Image-Processing Environment: A Fully Automated +Comprehensive Pipeline for Anatomical Neuroimaging Research. +In: Corbetta, M. (Ed.), Proceedings of the 12th Annual Meeting +of the Human Brain Mapping Organization. Neuroimage, Florence, Italy. + +Bellec P, Rosa-Neto P, Lyttelton O C, Benali H, Evans A C, Jul. 2010 +Multi-level bootstrap analysis of stable clusters in resting-state fMRI. +NeuroImage 51 (3), 1126-1139. +URL http://dx.doi.org/10.1016/j.neuroimage.2010.02.082 + +Bellec P, Jun. 2013. Mining the Hierarchy of Resting-State Brain Networks: +Selection of Representative Clusters in a Multiscale Structure. In: Pattern +Recognition in Neuroimaging (PRNI), 2013 International Workshop on. pp. +54-57. + +Collins D L, Evans A C, 1997. Animal: validation and applications of +nonlinear registration-based segmentation. International Journal of +Pattern Recognition and Artificial Intelligence 11, 1271-1294. + +Fonov V, Evans A C, Botteron K, Almli C R, McKinstry, R C, Collins D L, +Jan. 2011. Unbiased average age-appropriate atlases for pediatric +studies. NeuroImage 54 (1), 313-327. +URL http://dx.doi.org/10.1016/j.neuroimage.2010.07.033 + +Giove F, Gili T, Iacovella V, Macaluso E, Maraviglia B, Oct. 2009. +Images-based suppression of unwanted global signals in resting-state +functional connectivity studies. Magnetic resonance imaging 27 (8), 1058-1064. +URL http://dx.doi.org/10.1016/j.mri.2009.06.004 + +Liu H, Stufflebeam S M, Sepulcre J, Hedden T, Buckner R L, Dec. 2009 +Evidence from intrinsic activity that asymmetry of the human brain +is controlled by multiple factors. Proceedings of the National Academy +of Sciences 106 (48), 20499-20503. +URL http://dx.doi.org/10.1073/pnas.0908073106 + +Power J D, Barnes K A, Snyder A Z, Schlaggar B L, Petersen S E, Feb 2012 +Spurious but systematic correlations in functional connectivity +MRI networks arise from subject motion. NeuroImage 59 (3), 2142-2154. +URL http://dx.doi.org/10.1016/j.neuroimage.2011.10.018 diff --git a/nilearn/datasets/description/cobre.rst b/nilearn/datasets/description/cobre.rst new file mode 100644 index 0000000000..8ec83ac8f2 --- /dev/null +++ b/nilearn/datasets/description/cobre.rst @@ -0,0 +1,100 @@ +COBRE + + +Notes +----- +This work is a derivative from the COBRE sample found in the International +Neuroimaging Data-sharing Initiative. +(http://fcon_1000.projects.nitrc.org/indi/retro/cobre.html), originally +released under Creative Commons - Attribution Non-Commercial. +It includes preprocessed resting - state functional magnetic resonance images +for 72 patients diagnosed with schizophrenia and 74 healthy controls. + +Content +------- + :'phenotypic_data.tsv.gz': A gzipped tabular-separated value file, + with each column representing a phenotypic variable as well as measures + of data quality, related to motions. Each row corresponds to one + participant, except the first row which contains the names of the + variables. + :'keys_phenotypic_data.json': a json file describing each variable found + in 'phenotypic_data.tsv.gz'. + :'fmri_XXXXXXX.tsv.gz': A gzipped tabular-separated value file, with each + column representing a confounding variable for the time series of + participant XXXXXXX, which is the same participant ID found in + 'phenotypic_data.tsv.gz'. Each row corresponds to a time frame, except for + the first row, which contains the names of the variables. + :'keys_confounds.json': a json file describing each variable found in the + files 'fmri_XXXXXXX.tsv.gz'. + :'fmri_XXXXXXX.nii.gz': a 3D + t nifti volume at 6 mm isotropic resolution. + Each fMRI data features 150 volumes. + + +Usage recommendations +--------------------- +Individual analyses: You may want to remove some time frames with excessive +motion for each subject, see the confounding variable called 'scrub' in +'fmri_XXXXXXX.tsv.gz'. Also, after removing these time frames there may not be +enough usable data. We recommend a minimum number of 60 time frames. A fairly +large number of confounds have been made available as part of the release: slow +time drifts, motion paramaters, frame displacement, scrubbing, average WM/Vent +signal, COMPCOR, global signal. +We strongly recommend regression of slow time drifts. +Everything else is optional. + +Group analyses: There will also be some residuals effect of motion, which you +may want to regress out from connectivity measures at the group level. The +number of acceptable time frames as well as a measure of residual motion, can +be found in the variables 'Frames OK' and 'FD scrubbed' in +'phenotypic_data.tsv.gz'. Finally, the simplest use case with these data is to +predict the overall presence of a diagnosis of schizophrenia (values 'Control' +or 'Patient' in the phenotypic variable 'Subject Type'). + + +Preprocessing +------------- +The datasets were analysed using the NeuroImaging Analysis Kit (NIAK +https://github.com/SIMEXP/niak) version 0.17, under CentOS version 6.3 with +Octave(http://gnu.octave.org) version 4.0.2 and the Minc toolkit +(http://www.bic.mni.mcgill.ca/ServicesSoftware/ServicesSoftwareMincToolKit) +version 0.3.18. + +Note that a number of confounding variables were estimated and are made +available as part of the release. + +WARNING: no confounds were actually regressed from the data, so it can be done +interactively by the user who will be able to explore different analytical +paths easily. + + +References +---------- +Ad-Dab’bagh, et. al., 2006. The CIVET Image-Processing Environment: A Fully +Automated Comprehensive Pipeline for Anatomical Neuroimaging Research. In: +Corbetta, M. (Ed.), Proceedings of the 12th Annual Meeting of the Human Brain +Mapping Organization. Neuroimage, Florence, Italy. + +Bellec, P., et. al., 2010. Multi-level bootstrap analysis of stable clusters in +resting-state fMRI. NeuroImage 51 (3), 1126-1139. + +F. Carbonell, P. Bellec, A. Shmuel. Validation of a superposition model of +global and system-specific resting state activity reveals anti-correlated +networks. Brain Connectivity 2011 1(6): 496-510. + +Chai, X. J., et. al., 2012. Anticorrelations in resting state networks without +global signal regression. NeuroImage 59 (2), 1420-1428. + +Collins, D. L., Evans, A. C., 1997. Animal: validation and applications of +nonlinear registration-based segmentation. International Journal of Pattern +Recognition and Artificial Intelligence 11, 1271-1294. + +Fonov, V., et. al., 2011. Unbiased average age-appropriate atlases for +pediatric studies. NeuroImage 54 (1), 313-327. + +Giove, F., et. al., 2009. Images-based suppression of unwanted global signals +in resting-state functional connectivity studies. Magnetic resonance imaging +27 (8), 1058-1064. + +Power, J. D., et. al., 2012. Spurious but systematic correlations in functional +connectivity MRI networks arise from subject motion. NeuroImage 59 (3), +2142-2154. diff --git a/nilearn/datasets/description/destrieux_surface.rst b/nilearn/datasets/description/destrieux_surface.rst new file mode 100644 index 0000000000..89142006bf --- /dev/null +++ b/nilearn/datasets/description/destrieux_surface.rst @@ -0,0 +1,29 @@ +Destrieux Surface + + +Notes +----- +Anatomical parcellation of the cortex based on sulco-gyral patterns +(Destrieux et al, 2010) distributed with Freesurfer (Dale et al, 1999, +Fischl et al, 1999) in fsaverage5 standard surface space. + +Content +------- + :'annot_left': parcellation on left hemisphere in freesurfer annot format + :'annot_right': parcellation on right hemisphere in freesurfer annot format + + +References +---------- + +Destrieux et al, (2010). Automatic parcellation of human cortical gyri and +sulci using standard anatomical nomenclature. NeuroImage 53. +http://dx.doi.org/10.1016/j.neuroimage.2010.06.010 + +Dale et al, (1999). Cortical surface-based analysis.I. Segmentation and +surface reconstruction. Neuroimage 9. +http://dx.doi.org/10.1006/nimg.1998.0395 + +Fischl et al, (1999). Cortical surface-based analysis. II: Inflation, +flattening, and a surface-based coordinate system. Neuroimage 9. +http://dx.doi.org/10.1006/nimg.1998.0396 diff --git a/nilearn/datasets/description/dosenbach_2010.rst b/nilearn/datasets/description/dosenbach_2010.rst new file mode 100644 index 0000000000..6e0dd86565 --- /dev/null +++ b/nilearn/datasets/description/dosenbach_2010.rst @@ -0,0 +1,22 @@ +Dosenbach 2010 atlas + + +Notes +----- +160 regions of interest covering much of the cerebral cortex and cerebellum. +They were obtained from meta-analyses of fMRI activation studies +and assigned into 6 networks according to a modularity analysis of +resting-state data. + + +Content +------- + :'rois': Coordinates of ROIs in MNI space. + :'labels': ROIs labels. + :'networks': Networks names. + + +References +---------- +Dosenbach N.U., Nardos B., et al. "Prediction of individual brain maturity +using fMRI.", 2010, Science 329, 1358-1361. diff --git a/nilearn/datasets/description/fsaverage.rst b/nilearn/datasets/description/fsaverage.rst new file mode 100644 index 0000000000..db4eb1a91e --- /dev/null +++ b/nilearn/datasets/description/fsaverage.rst @@ -0,0 +1,21 @@ +fsaverage + + +Notes +----- +Fsaverage standard surface as distributed with Freesurfer (Fischl et al, 1999) + +Content +------- + :'pial_left': Gifti file, left hemisphere pial surface mesh + :'pial_right': Gifti file, right hemisphere pial surface mesh + :'infl_left': Gifti file, left hemisphere inflated pial surface mesh + :'infl_right': Gifti file, right hemisphere inflated pial + surface mesh + :'sulc_left': Gifti file, left hemisphere sulcal depth data + :'sulc_right': Gifti file, right hemisphere sulcal depth data + +References +---------- +Fischl et al, (1999). High-resolution intersubject averaging and a +coordinate system for the cortical surface. Hum Brain Mapp 8, 272-284. diff --git a/nilearn/datasets/description/fsaverage5.rst b/nilearn/datasets/description/fsaverage5.rst new file mode 100644 index 0000000000..44bd88ab8b --- /dev/null +++ b/nilearn/datasets/description/fsaverage5.rst @@ -0,0 +1,21 @@ +fsaverage5 + + +Notes +----- +Fsaverage5 standard surface as distributed with Freesurfer (Fischl et al, 1999) + +Content +------- + :'pial_left': Gifti file, left hemisphere pial surface mesh + :'pial_right': Gifti file, right hemisphere pial surface mesh + :'infl_left': Gifti file, left hemisphere inflated pial surface mesh + :'infl_right': Gifti file, right hemisphere inflated pial + surface mesh + :'sulc_left': Gifti file, left hemisphere sulcal depth data + :'sulc_right': Gifti file, right hemisphere sulcal depth data + +References +---------- +Fischl et al, (1999). High-resolution intersubject averaging and a +coordinate system for the cortical surface. Hum Brain Mapp 8, 272-284. diff --git a/nilearn/datasets/description/icbm152_2009.rst b/nilearn/datasets/description/icbm152_2009.rst index 7448f18eca..937d15aea5 100644 --- a/nilearn/datasets/description/icbm152_2009.rst +++ b/nilearn/datasets/description/icbm152_2009.rst @@ -37,8 +37,8 @@ VS Fonov, AC Evans, RC McKinstry, CR Almli and DL Collins, to adulthood", NeuroImage, Volume 47, Supplement 1, July 2009, Page S102 Organization for Human Brain Mapping 2009 Annual Meeting. -DL Collins, AP Zijdenbos, WFC Baaré and AC Evans, +DL Collins, AP Zijdenbos, WFC Baare and AC Evans, "ANIMAL+INSECT: Improved Cortical Structure Segmentation", -IPMI Lecture Notes in Computer Science, 1999, Volume 1613/1999, 210–223 +IPMI Lecture Notes in Computer Science, 1999, Volume 1613/1999, 210-223 Licence: unknown. diff --git a/nilearn/datasets/description/msdl_atlas.rst b/nilearn/datasets/description/msdl_atlas.rst index f6113c9e7e..51a5e508f2 100644 --- a/nilearn/datasets/description/msdl_atlas.rst +++ b/nilearn/datasets/description/msdl_atlas.rst @@ -18,10 +18,10 @@ Content References ---------- For more information about this dataset's structure: -https://team.inria.fr/parietal/research/spatial_patterns/spatial-patterns-in-resting-state/ +https://team.inria.fr/parietal/18-2/spatial_patterns/spatial-patterns-in-resting-state/ -Multi-subject dictionary learning to segment an atlas of brain spontaneous activity Gaël Varoquaux, Alexandre Gramfort, Fabian Pedregosa, Vincent Michel, Bertrand Thirion Information Processing in Medical Imaging, 2011, pp. 562-573, Lecture Notes in Computer Science +Multi-subject dictionary learning to segment an atlas of brain spontaneous activity Gael Varoquaux, Alexandre Gramfort, Fabian Pedregosa, Vincent Michel, Bertrand Thirion Information Processing in Medical Imaging, 2011, pp. 562-573, Lecture Notes in Computer Science -Learning and comparing functional connectomes across subjects. Gaël Varoquaux, R.C. Craddock NeuroImage, 2013 +Learning and comparing functional connectomes across subjects. Gael Varoquaux, R.C. Craddock NeuroImage, 2013 -Licence: usage is unrestricted for non-commercial research purposes. \ No newline at end of file +Licence: usage is unrestricted for non-commercial research purposes. diff --git a/nilearn/datasets/description/neurovault.rst b/nilearn/datasets/description/neurovault.rst new file mode 100644 index 0000000000..124c0ae6d3 --- /dev/null +++ b/nilearn/datasets/description/neurovault.rst @@ -0,0 +1,42 @@ +Neurovault statistical maps + + +Notes +----- +Neurovault is a public repository of unthresholded statistical +maps, parcellations, and atlases of the human brain. You can read +about it and browse the images it contains at www.neurovault.org. + +It is also possible to ask Neurosynth to annotate the maps found on +Neurovault. Neurosynth is a platform for large-scale, automated +synthesis of fMRI data. It can be used to perform decoding. You can +find out more about Neurosynth at www.neurosynth.org. + +Content +------- + :'images': Nifti images representing the statistical maps. + :'images_meta': Dictionaries containing metadata for each image. + :'collections_meta': Dictionaries containing metadata for collections. + :'vocabulary': A list of words retrieved from neurosynth.org + :'word_frequencies': For each image, the weights of the words + from 'vocabulary'. + + +References +---------- +.. [1] Gorgolewski KJ, Varoquaux G, Rivera G, Schwartz Y, Ghosh SS, + Maumet C, Sochat VV, Nichols TE, Poldrack RA, Poline J-B, Yarkoni + T and Margulies DS (2015) NeuroVault.org: a web-based repository + for collecting and sharing unthresholded statistical maps of the + human brain. Front. Neuroinform. 9:8. doi: + 10.3389/fninf.2015.00008 + +.. [2] Yarkoni, Tal, Russell A. Poldrack, Thomas E. Nichols, David + C. Van Essen, and Tor D. Wager. "Large-scale automated synthesis + of human functional neuroimaging data." Nature methods 8, no. 8 + (2011): 665-670. + + +License +------- +All data are distributed under the CC0 license. diff --git a/nilearn/datasets/description/nki_enhanced_surface.rst b/nilearn/datasets/description/nki_enhanced_surface.rst new file mode 100644 index 0000000000..1989fc7f64 --- /dev/null +++ b/nilearn/datasets/description/nki_enhanced_surface.rst @@ -0,0 +1,24 @@ +NKI enhanced surface + + +Notes +----- +Enhanced Nathan Kline Institute-Rockland Sample resting state fMRI data +(TR=645ms) of 102 subjects, preprocessed and projected to the fsaverage5 surface +according to https://github.com/fliem/nki_nilearn. +Contains phenotypical metadata. + +Content +------- + :'func_left': resting-state data for the left hemisphere + :'func_right': resting-state data for the right hemisphere + :'phenotypic': phenotypical metadata + + +References +---------- +:Download: http://fcon_1000.projects.nitrc.org/indi/enhanced/ + +Nooner et al, (2012). The NKI-Rockland Sample: A model for accelerating the +pace of discovery science in psychiatry. Frontiers in neuroscience 6, 152. +URL http://dx.doi.org/10.3389/fnins.2012.00152 diff --git a/nilearn/datasets/description/pauli_2017.rst b/nilearn/datasets/description/pauli_2017.rst new file mode 100644 index 0000000000..eaaa8b12ac --- /dev/null +++ b/nilearn/datasets/description/pauli_2017.rst @@ -0,0 +1,21 @@ +In Vivo High Resolution Atlas of the Subcortical Human Brain + + +Notes +----- +The purpose of this project is to develop a crowd-sourced In Vivo High Resolution Atlas of the Subcortical Human Brain. +We invite contributions to this project, both to increase the precision of anatomical labels, and to increase the number of labeled subcortical nuclei. + +This resource can be used as a reference atlas for researchers and students alike. + +Content +------- + :'maps': Nifti images with the (probabilistic) region definitions + :'labels': text file containing the file names + +References +---------- +For more information about this dataset: +https://osf.io/r2hvk/ + +Licence: UCC-By Attribution 4.0 International diff --git a/nilearn/datasets/description/talairach_atlas.rst b/nilearn/datasets/description/talairach_atlas.rst new file mode 100644 index 0000000000..29a481ed6e --- /dev/null +++ b/nilearn/datasets/description/talairach_atlas.rst @@ -0,0 +1,21 @@ +Talairach atlas + +Content +------- + :'maps': 3D Nifti image, values are integers corresponding to indices in the + list of labels. + + :'labels': Annotations ({}) + +References +---------- +http://talairach.org/about.html#Labels + +`Lancaster JL, Woldorff MG, Parsons LM, Liotti M, Freitas CS, Rainey L, Kochunov +PV, Nickerson D, Mikiten SA, Fox PT, "Automated Talairach Atlas labels for +functional brain mapping". Human Brain Mapping 10:120-131, 2000.` + +`Lancaster JL, Rainey LH, Summerlin JL, Freitas CS, Fox PT, Evans AC, Toga AW, +Mazziotta JC. Automated labeling of the human brain: A preliminary report on the +development and evaluation of a forward-transform method. Hum Brain Mapp 5, +238-242, 1997.` diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 2ede2484b4..4de5288080 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -4,16 +4,24 @@ import warnings import os import re +import json import numpy as np +import numbers + import nibabel from sklearn.datasets.base import Bunch +from sklearn.utils import deprecated from .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr, _read_md5_sum_file, _tree, _filter_columns) - +from .._utils import check_niimg from .._utils.compat import BytesIO, _basestring, _urllib +from .._utils.numpy_conversions import csv_to_array +from .._utils.exceptions import VisibleDeprecationWarning +@deprecated("fetch_haxby_simple will be removed in future releases. " + "Use 'fetch_haxby' instead.") def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): """Download and load a simple example haxby dataset. @@ -32,7 +40,7 @@ def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): target data. 'mask': string. Path to nifti mask file. 'session': list of string. Path to text file containing labels - (can be used for LeaveOneLabelOut cross validation for example). + (can be used for LeaveOneGroupOut cross validation for example). References ---------- @@ -78,8 +86,8 @@ def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): conditions_target=[files[3]], description=fdescr) -def fetch_haxby(data_dir=None, n_subjects=1, fetch_stimuli=False, - url=None, resume=True, verbose=1): +def fetch_haxby(data_dir=None, n_subjects=None, subjects=(2,), + fetch_stimuli=False, url=None, resume=True, verbose=1): """Download and loads complete haxby dataset Parameters @@ -91,6 +99,14 @@ def fetch_haxby(data_dir=None, n_subjects=1, fetch_stimuli=False, n_subjects: int, optional Number of subjects, from 1 to 6. + NOTE: n_subjects is deprecated from 0.2.6 and will be removed in next + release. Use `subjects` instead. + + subjects : list or int, optional + Either a list of subjects or the number of subjects to load, from 1 to + 6. By default, 2nd subject will be loaded. Empty list returns no subject + data. + fetch_stimuli: boolean, optional Indicate if stimuli images must be downloaded. They will be presented as a dictionnary of categories. @@ -132,10 +148,23 @@ def fetch_haxby(data_dir=None, n_subjects=1, fetch_stimuli=False, Run 8 in subject 5 does not contain any task labels. The anatomical image for subject 6 is unavailable. """ - - if n_subjects > 6: - warnings.warn('Warning: there are only 6 subjects') - n_subjects = 6 + if n_subjects is not None: + warn_str = ("The parameter 'n_subjects' is deprecated from 0.2.6 and " + "will be removed in nilearn next release. Use parameter " + "'subjects' instead.") + warnings.warn(warn_str, VisibleDeprecationWarning, stacklevel=2) + subjects = n_subjects + + if isinstance(subjects, numbers.Number) and subjects > 6: + subjects = 6 + + if subjects is not None and (isinstance(subjects, list) or + isinstance(subjects, tuple)): + for sub_id in subjects: + if sub_id not in [1, 2, 3, 4, 5, 6]: + raise ValueError("You provided invalid subject id {0} in a " + "list. Subjects must be selected in " + "[1, 2, 3, 4, 5, 6]".format(sub_id)) dataset_name = 'haxby2001' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, @@ -160,19 +189,28 @@ def fetch_haxby(data_dir=None, n_subjects=1, fetch_stimuli=False, 'mask8_house_vt.nii.gz', 'anat.nii.gz'] n_files = len(sub_files) + if subjects is None: + subjects = [] + + if isinstance(subjects, numbers.Number): + subject_mask = np.arange(1, subjects + 1) + else: + subject_mask = np.array(subjects) + files = [ (os.path.join('subj%d' % i, sub_file), url + 'subj%d-2010.01.14.tar.gz' % i, {'uncompress': True, 'md5sum': md5sums.get('subj%d-2010.01.14.tar.gz' % i, None)}) - for i in range(1, n_subjects + 1) + for i in subject_mask for sub_file in sub_files if not (sub_file == 'anat.nii.gz' and i == 6) # no anat for sub. 6 ] files = _fetch_files(data_dir, files, resume=resume, verbose=verbose) - if n_subjects == 6: + if ((isinstance(subjects, numbers.Number) and subjects == 6) or + np.any(subject_mask == 6)): files.append(None) # None value because subject 6 has no anat kwargs = {} @@ -390,15 +428,16 @@ def fetch_nyu_rest(n_subjects=None, sessions=[1], data_dir=None, resume=True, session=session, description=fdescr) -def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, +def fetch_adhd(n_subjects=30, data_dir=None, url=None, resume=True, verbose=1): """Download and load the ADHD resting-state dataset. Parameters ---------- n_subjects: int, optional - The number of subjects to load. If None is given, all the - 40 subjects are used. + The number of subjects to load from maximum of 40 subjects. + By default, 30 subjects will be loaded. If None is given, + all 40 subjects will be loaded. data_dir: string, optional Path of the data directory. Used to force data storage in a specified @@ -406,7 +445,7 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, url: string, optional Override download URL. Used for test only (or if you setup a mirror of - the data). + the data). Default: None Returns ------- @@ -459,7 +498,7 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume, verbose=verbose)[0] - ## Load the csv file + # Load the csv file phenotypic = np.genfromtxt(phenotypic, names=True, delimiter=',', dtype=None) @@ -503,6 +542,9 @@ def fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1): - 'mask': string Path to nifti mask file to define target volume in visual cortex + - 'background': string + Path to nifti file containing a background image usable as a + background image for miyawaki images. References ---------- @@ -524,7 +566,7 @@ def fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1): """ url = 'https://www.nitrc.org/frs/download.php' \ - '/5899/miyawaki2008.tgz?i_agree=1&download_now=1' + '/8486/miyawaki2008.tgz?i_agree=1&download_now=1' opts = {'uncompress': True} # Dataset files @@ -605,6 +647,10 @@ def fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1): verbose=verbose) files = _fetch_files(data_dir, file_names, resume=resume, verbose=verbose) + # Fetch the background image + bg_img = _fetch_files(data_dir, [('bg.nii.gz', url, opts)], resume=resume, + verbose=verbose)[0] + fdescr = _get_dataset_descr(dataset_name) # Return the data @@ -613,6 +659,7 @@ def fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1): label=files[32:64], mask=files[64], mask_roi=files[65:], + background=bg_img, description=fdescr) @@ -710,8 +757,8 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, "visual click vs visual sentences", "auditory&visual motor vs cognitive processing"} - n_subjects: int, optional - The number of subjects to load. If None is given, + n_subjects: int or list, optional + The number or list of subjects to load. If None is given, all 94 subjects are used. get_tmaps: boolean @@ -758,13 +805,19 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, individual functional cognitive networks." BMC neuroscience 8.1 (2007): 91. + See Also + --------- + nilearn.datasets.fetch_localizer_calculation_task + nilearn.datasets.fetch_localizer_button_task + """ if isinstance(contrasts, _basestring): raise ValueError('Contrasts should be a list of strings, but ' 'a single string was given: "%s"' % contrasts) if n_subjects is None: n_subjects = 94 # 94 subjects available - if (n_subjects > 94) or (n_subjects < 1): + if (isinstance(n_subjects, numbers.Number) and + ((n_subjects > 94) or (n_subjects < 1))): warnings.warn("Wrong value for \'n_subjects\' (%d). The maximum " "value will be used instead (\'n_subjects=94\')") n_subjects = 94 # 94 subjects available @@ -847,8 +900,15 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, # is generated on the remote server) # - Local (cached) version of the files can be checked for each contrast opts = {'uncompress': True} - subject_ids = ["S%02d" % s for s in range(1, n_subjects + 1)] - subject_id_max = subject_ids[-1] + + if isinstance(n_subjects, numbers.Number): + subject_mask = np.arange(1, n_subjects + 1) + subject_id_max = "S%02d" % n_subjects + else: + subject_mask = np.array(n_subjects) + subject_id_max = "S%02d" % np.max(n_subjects) + n_subjects = len(n_subjects) + subject_ids = ["S%02d" % s for s in subject_mask] data_types = ["c map"] if get_tmaps: data_types.append("t map") @@ -938,7 +998,7 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, files = files[:-1] # join_by sorts the output along the key csv_data = join_by('subject_id', csv_data, csv_data2, - usemask=False, asrecarray=True)[:n_subjects] + usemask=False, asrecarray=True)[subject_mask - 1] if get_anats: anats = files[-n_subjects:] files = files[:-n_subjects] @@ -952,14 +1012,10 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, ext_vars=csv_data, description=fdescr) -def fetch_localizer_calculation_task(n_subjects=None, data_dir=None, url=None, +def fetch_localizer_calculation_task(n_subjects=1, data_dir=None, url=None, verbose=1): """Fetch calculation task contrast maps from the localizer. - This function is only a caller for the fetch_localizer_contrasts in order - to simplify examples reading and understanding. - The 'calculation (auditory and visual cue)' contrast is used. - Parameters ---------- n_subjects: int, optional @@ -983,6 +1039,18 @@ def fetch_localizer_calculation_task(n_subjects=None, data_dir=None, url=None, Dictionary-like object, the interest attributes are : 'cmaps': string list, giving paths to nifti contrast maps + Notes + ------ + + This function is only a caller for the fetch_localizer_contrasts in order + to simplify examples reading and understanding. + The 'calculation (auditory and visual cue)' contrast is used. + + See Also + --------- + nilearn.datasets.fetch_localizer_button_task + nilearn.datasets.fetch_localizer_contrasts + """ data = fetch_localizer_contrasts(["calculation (auditory and visual cue)"], n_subjects=n_subjects, @@ -995,6 +1063,57 @@ def fetch_localizer_calculation_task(n_subjects=None, data_dir=None, url=None, return data +def fetch_localizer_button_task(n_subjects=[2, ], data_dir=None, url=None, + get_anats=False, verbose=1): + """Fetch left vs right button press contrast maps from the localizer. + + Parameters + ---------- + n_subjects: int or list, optional + The number or list of subjects to load. If None is given, + all 94 subjects are used. + + data_dir: string, optional + Path of the data directory. Used to force data storage in a specified + location. + + url: string, optional + Override download URL. Used for test only (or if you setup a mirror of + the data). + + get_anats: boolean + Whether individual structural images should be fetched or not. + + verbose: int, optional + verbosity level (0 means no message). + + Returns + ------- + data: Bunch + Dictionary-like object, the interest attributes are : + 'cmaps': string list, giving paths to nifti contrast maps + + Notes + ------ + + This function is only a caller for the fetch_localizer_contrasts in order + to simplify examples reading and understanding. + The 'left vs right button press' contrast is used. + + See Also + --------- + nilearn.datasets.fetch_localizer_calculation_task + nilearn.datasets.fetch_localizer_contrasts + + """ + data = fetch_localizer_contrasts(["left vs right button press"], + n_subjects=n_subjects, + get_tmaps=True, get_masks=False, + get_anats=get_anats, data_dir=data_dir, + url=url, resume=True, verbose=verbose) + return data + + def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', band_pass_filtering=False, global_signal_regression=False, derivatives=['func_preproc'], @@ -1014,7 +1133,8 @@ def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', n_subjects: int, optional The number of subjects to load. If None is given, - all 94 subjects are used. + all available subjects are used (this number depends on the + preprocessing pipeline used). pipeline: string, optional Possible pipelines are "ccs", "cpac", "dparsf" and "niak" @@ -1077,6 +1197,10 @@ def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', classification of autism: ABIDE results." Frontiers in human neuroscience 7 (2013). """ + # People keep getting it wrong and submiting a string instead of a + # list of strings. We'll make their life easy + if isinstance(derivatives, _basestring): + derivatives = [derivatives, ] # Parameter check for derivative in derivatives: @@ -1106,11 +1230,11 @@ def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', 'ABIDE_Initiative') if quality_checked: - kwargs['qc_rater_1'] = 'OK' - kwargs['qc_anat_rater_2'] = ['OK', 'maybe'] - kwargs['qc_func_rater_2'] = ['OK', 'maybe'] - kwargs['qc_anat_rater_3'] = 'OK' - kwargs['qc_func_rater_3'] = 'OK' + kwargs['qc_rater_1'] = b'OK' + kwargs['qc_anat_rater_2'] = [b'OK', b'maybe'] + kwargs['qc_func_rater_2'] = [b'OK', b'maybe'] + kwargs['qc_anat_rater_3'] = b'OK' + kwargs['qc_func_rater_3'] = b'OK' # Fetch the phenotypic file and load it csv = 'Phenotypic_V1_0b_preprocessed1.csv' @@ -1155,10 +1279,14 @@ def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', results['phenotypic'] = pheno for derivative in derivatives: ext = '.1D' if derivative.startswith('rois') else '.nii.gz' - files = [(file_id + '_' + derivative + ext, - '/'.join([url, derivative, file_id + '_' + derivative + ext]), - {}) for file_id in file_ids] - files = _fetch_files(data_dir, files, verbose=verbose) + files = [] + for file_id in file_ids: + file_ = [( + file_id + '_' + derivative + ext, + '/'.join([url, derivative, file_id + '_' + derivative + ext]), + {} + )] + files.append(_fetch_files(data_dir, file_, verbose=verbose)[0]) # Load derivatives if needed if ext == '.1D': files = [np.loadtxt(f) for f in files] @@ -1177,7 +1305,7 @@ def _load_mixed_gambles(zmap_imgs): for zmap_img in zmap_imgs: # load subject data this_X = zmap_img.get_data() - affine = zmap_img.get_affine() + affine = zmap_img.affine finite_mask = np.all(np.isfinite(this_X), axis=-1) this_mask = np.logical_and(np.all(this_X != 0, axis=-1), finite_mask) @@ -1200,7 +1328,7 @@ def _load_mixed_gambles(zmap_imgs): mask.append(this_mask) y = np.array(y) X = np.concatenate(X, axis=-1) - mask = np.sum(mask, axis=0) > .5 * len(zmap_imgs) + mask = np.sum(mask, axis=0) > .5 * len(mask) mask = np.logical_and(mask, np.all(np.isfinite(X), axis=-1)) X = X[mask, :].T tmp = np.zeros(list(mask.shape) + [len(X)]) @@ -1274,8 +1402,448 @@ def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, data_dir = _get_dataset_dir('jimura_poldrack_2012_zmaps', data_dir=data_dir) zmap_fnames = _fetch_files(data_dir, files, resume=resume, verbose=verbose) - data = Bunch(zmaps=zmap_fnames) + subject_id = np.repeat(np.arange(n_subjects), 6 * 8) + data = Bunch(zmaps=zmap_fnames, + subject_id=subject_id) if not return_raw_data: - X, y, mask_img = _load_mixed_gambles(map(nibabel.load, data.zmaps)) + X, y, mask_img = _load_mixed_gambles(check_niimg(data.zmaps, + return_iterator=True)) data.zmaps, data.gain, data.mask_img = X, y, mask_img return data + + +def fetch_megatrawls_netmats(dimensionality=100, timeseries='eigen_regression', + matrices='partial_correlation', data_dir=None, + resume=True, verbose=1): + """Downloads and returns Network Matrices data from MegaTrawls release in HCP. + + This data can be used to predict relationships between imaging data and + non-imaging behavioural measures such as age, sex, education, etc. + The network matrices are estimated from functional connectivity + datasets of 461 subjects. Full technical details in [1] [2]. + + .. versionadded:: 0.2.2 + + Parameters + ---------- + dimensionality: int, optional + Valid inputs are 25, 50, 100, 200, 300. By default, network matrices + estimated using Group ICA brain parcellations of 100 components/dimensions + will be returned. + + timeseries: str, optional + Valid inputs are 'multiple_spatial_regression' or 'eigen_regression'. By + default 'eigen_regression', matrices estimated using first principal + eigen component timeseries signals extracted from each subject data + parcellations will be returned. Otherwise, 'multiple_spatial_regression' + matrices estimated using spatial regressor based timeseries signals + extracted from each subject data parcellations will be returned. + + matrices: str, optional + Valid inputs are 'full_correlation' or 'partial_correlation'. By default, + partial correlation matrices will be returned otherwise if selected + full correlation matrices will be returned. + + data_dir: str, default is None, optional + Path of the data directory. Used to force data storage in a specified + location. + + resume: bool, default is True + This parameter is required if a partially downloaded file is needed + to be resumed to download again. + + verbose: int, default is 1 + This parameter is used to set the verbosity level to print the message + to give information about the processing. + 0 indicates no information will be given. + + Returns + ------- + data: Bunch + dictionary-like object, the attributes are : + + - 'dimensions': int, consists of given input in dimensions. + + - 'timeseries': str, consists of given input in timeseries method. + + - 'matrices': str, consists of given type of specific matrices. + + - 'correlation_matrices': ndarray, consists of correlation matrices + based on given type of matrices. Array size will depend on given + dimensions (n, n). + - 'description': data description + + References + ---------- + [1] Stephen Smith et al, HCP beta-release of the Functional Connectivity + MegaTrawl. + April 2015 "HCP500-MegaTrawl" release. + https://db.humanconnectome.org/megatrawl/ + + [2] Smith, S.M. et al. Nat. Neurosci. 18, 1565-1567 (2015). + + [3] N.Filippini, et al. Distinct patterns of brain activity in young + carriers of the APOE-e4 allele. + Proc Natl Acad Sci USA (PNAS), 106::7209-7214, 2009. + + [4] S.Smith, et al. Methods for network modelling from high quality rfMRI data. + Meeting of the Organization for Human Brain Mapping. 2014 + + [5] J.X. O'Reilly et al. Distinct and overlapping functional zones in the + cerebellum defined by resting state functional connectivity. + Cerebral Cortex, 2009. + + Note: See description for terms & conditions on data usage. + + """ + url = "http://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" + opts = {'uncompress': True} + + error_message = "Invalid {0} input is provided: {1}, choose one of them {2}" + # standard dataset terms + dimensionalities = [25, 50, 100, 200, 300] + if dimensionality not in dimensionalities: + raise ValueError(error_message.format('dimensionality', dimensionality, + dimensionalities)) + timeseries_methods = ['multiple_spatial_regression', 'eigen_regression'] + if timeseries not in timeseries_methods: + raise ValueError(error_message.format('timeseries', timeseries, + timeseries_methods)) + output_matrices_names = ['full_correlation', 'partial_correlation'] + if matrices not in output_matrices_names: + raise ValueError(error_message.format('matrices', matrices, + output_matrices_names)) + + dataset_name = 'Megatrawls' + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) + description = _get_dataset_descr(dataset_name) + + timeseries_map = dict(multiple_spatial_regression='ts2', eigen_regression='ts3') + matrices_map = dict(full_correlation='Znet1.txt', partial_correlation='Znet2.txt') + filepath = [(os.path.join( + '3T_Q1-Q6related468_MSMsulc_d%d_%s' % (dimensionality, timeseries_map[timeseries]), + matrices_map[matrices]), url, opts)] + + # Fetch all the files + files = _fetch_files(data_dir, filepath, resume=resume, verbose=verbose) + + # Load the files into arrays + correlation_matrices = csv_to_array(files[0]) + + return Bunch( + dimensions=dimensionality, + timeseries=timeseries, + matrices=matrices, + correlation_matrices=correlation_matrices, + description=description) + + +def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): + """Fetch COBRE datasets preprocessed using NIAK 0.17 under CentOS + version 6.3 with Octave version 4.0.2 and the Minc toolkit version 0.3.18. + + Downloads and returns COBRE preprocessed resting state fMRI datasets, + covariates and phenotypic information such as demographic, clinical + variables, measure of frame displacement FD (an average FD for all the time + frames left after censoring). + + Each subject `fmri_XXXXXXX.nii.gz` is a 3D+t nifti volume (150 volumes). + WARNING: no confounds were actually regressed from the data, so it can be + done interactively by the user who will be able to explore different + analytical paths easily. + + For each subject, there is `fmri_XXXXXXX.tsv` files which contains the + covariates such as motion parameters, mean CSF signal that should to be + regressed out of the functional data. + + `keys_confounds.json`: a json file, that describes each variable mentioned + in the files `fmri_XXXXXXX.tsv.gz`. It also contains a list of time frames + that have been removed from the time series by censoring for high motion. + + `phenotypic_data.tsv` contains the data of clinical variables that + explained in `keys_phenotypic_data.json` + + .. versionadded:: 0.3 + + Parameters + ---------- + n_subjects: int, optional + The number of subjects to load from maximum of 146 subjects. + By default, 10 subjects will be loaded. If n_subjects=None, + all subjects will be loaded. + + data_dir: str, optional + Path to the data directory. Used to force data storage in a + specified location. Default: None + + url: str, optional + Override download url. Used for test only (or if you setup a + mirror of the data). Default: None + + verbose: int, optional + Verbosity level (0 means no message). + + Returns + ------- + data: Bunch + Dictionary-like object, the attributes are: + + - 'func': string list + Paths to Nifti images. + - 'confounds': string list + Paths to .tsv files of each subject, confounds. + - 'phenotypic': numpy.recarray + Contains data of clinical variables, sex, age, FD. + - 'description': data description of the release and references. + - 'desc_con': str + description of the confounds variables + - 'desc_phenotypic': str + description of the phenotypic variables. + + Notes + ----- + See `more information about datasets structure + `_ + """ + + if url is None: + # Here we use the file that provides URL for all others + url = 'https://api.figshare.com/v2/articles/4197885' + dataset_name = 'cobre' + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + fdescr = _get_dataset_descr(dataset_name) + + # First, fetch the file that references all individual URLs + files = _fetch_files(data_dir, [("4197885", url, {})], + verbose=verbose)[0] + + files = json.load(open(files, 'r')) + files = files['files'] + # Index files by name + files_ = {} + for f in files: + files_[f['name']] = f + files = files_ + + # Fetch the phenotypic file and load it + csv_name_gz = 'phenotypic_data.tsv.gz' + csv_name = os.path.splitext(csv_name_gz)[0] + csv_file_phen = _fetch_files( + data_dir, [(csv_name, files[csv_name_gz]['download_url'], + {'md5': files[csv_name_gz].get('md5', None), + 'move': csv_name_gz, + 'uncompress': True})], + verbose=verbose)[0] + + # Load file in filename to numpy arrays + names = ['ID', 'Current Age', 'Gender', 'Handedness', 'Subject Type', + 'Diagnosis', 'Frames OK', 'FD', 'FD Scrubbed'] + + csv_array_phen = np.recfromcsv(csv_file_phen, names=names, + skip_header=True, delimiter='\t') + + # Check number of subjects + max_subjects = len(csv_array_phen) + if n_subjects is None: + n_subjects = max_subjects + + if n_subjects > max_subjects: + warnings.warn('Warning: there are only %d subjects' % max_subjects) + n_subjects = max_subjects + + sz_count = list(csv_array_phen['subject_type']).count(b'Patient') + ct_count = list(csv_array_phen['subject_type']).count(b'Control') + + n_sz = np.round(float(n_subjects) / max_subjects * sz_count).astype(int) + n_ct = np.round(float(n_subjects) / max_subjects * ct_count).astype(int) + + # First, restrict the csv files to the adequate number of subjects + sz_ids = csv_array_phen[csv_array_phen['subject_type'] == + b'Patient']['id'][:n_sz] + ct_ids = csv_array_phen[csv_array_phen['subject_type'] == + b'Control']['id'][:n_ct] + ids = np.hstack([sz_ids, ct_ids]) + csv_array_phen = csv_array_phen[np.in1d(csv_array_phen['id'], ids)] + + # Call fetch_files once per subject. + + func = [] + con = [] + for i in ids: + f = 'fmri_00' + str(i) + '.nii.gz' + c_gz = 'fmri_00' + str(i) + '.tsv.gz' + c = os.path.splitext(c_gz)[0] + + f, c = _fetch_files( + data_dir, + [(f, files[f]['download_url'], {'md5': files[f].get('md5', None), + 'move': f}), + (c, files[c_gz]['download_url'], + {'md5': files[c_gz].get('md5', None), + 'move': c_gz, 'uncompress': True}) + ], + verbose=verbose) + func.append(f) + con.append(c) + + # Fetch the the complementary files + keys_con = "keys_confounds.json" + keys_phen = "keys_phenotypic_data.json" + + csv_keys_con, csv_keys_phen = _fetch_files( + data_dir, + [(keys_con, files[keys_con]['download_url'], + {'md5': files[keys_con].get('md5', None), 'move': keys_con}), + (keys_phen, files[keys_phen]['download_url'], + {'md5': files[keys_phen].get('md5', None), 'move': keys_phen}) + ], + verbose=verbose) + + files_keys_con = open(csv_keys_con, 'r').read() + files_keys_phen = open(csv_keys_phen, 'r').read() + + return Bunch(func=func, confounds=con, phenotypic=csv_array_phen, + description=fdescr, desc_con=files_keys_con, + desc_phenotypic=files_keys_phen) + + +def fetch_surf_nki_enhanced(n_subjects=10, data_dir=None, + url=None, resume=True, verbose=1): + """Download and load the NKI enhanced resting-state dataset, + preprocessed and projected to the fsaverage5 space surface. + + .. versionadded:: 0.3 + + Parameters + ---------- + n_subjects: int, optional + The number of subjects to load from maximum of 102 subjects. + By default, 10 subjects will be loaded. If None is given, + all 102 subjects will be loaded. + + data_dir: str, optional + Path of the data directory. Used to force data storage in a specified + location. Default: None + + url: str, optional + Override download URL. Used for test only (or if you setup a mirror of + the data). Default: None + + resume: bool, optional (default True) + If True, try resuming download if possible. + + verbose: int, optional (default 1) + Defines the level of verbosity of the output. + + Returns + ------- + data: sklearn.datasets.base.Bunch + Dictionary-like object, the interest attributes are : + - 'func_left': Paths to Gifti files containing resting state + time series left hemisphere + - 'func_right': Paths to Gifti files containing resting state + time series right hemisphere + - 'phenotypic': array containing tuple with subject ID, age, + dominant hand and sex for each subject. + - 'description': data description of the release and references. + + References + ---------- + :Download: http://fcon_1000.projects.nitrc.org/indi/enhanced/ + + Nooner et al, (2012). The NKI-Rockland Sample: A model for accelerating the + pace of discovery science in psychiatry. Frontiers in neuroscience 6, 152. + URL http://dx.doi.org/10.3389/fnins.2012.00152 + + """ + + if url is None: + url = 'https://www.nitrc.org/frs/download.php/' + + # Preliminary checks and declarations + dataset_name = 'nki_enhanced_surface' + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + ids = ['A00028185', 'A00033747', 'A00035072', 'A00035827', 'A00035840', + 'A00037112', 'A00037511', 'A00038998', 'A00039391', 'A00039431', + 'A00039488', 'A00040524', 'A00040623', 'A00040944', 'A00043299', + 'A00043520', 'A00043677', 'A00043722', 'A00045589', 'A00050998', + 'A00051063', 'A00051064', 'A00051456', 'A00051457', 'A00051477', + 'A00051513', 'A00051514', 'A00051517', 'A00051528', 'A00051529', + 'A00051539', 'A00051604', 'A00051638', 'A00051658', 'A00051676', + 'A00051678', 'A00051679', 'A00051726', 'A00051774', 'A00051796', + 'A00051835', 'A00051882', 'A00051925', 'A00051927', 'A00052070', + 'A00052117', 'A00052118', 'A00052126', 'A00052180', 'A00052197', + 'A00052214', 'A00052234', 'A00052307', 'A00052319', 'A00052499', + 'A00052502', 'A00052577', 'A00052612', 'A00052639', 'A00053202', + 'A00053369', 'A00053456', 'A00053474', 'A00053546', 'A00053576', + 'A00053577', 'A00053578', 'A00053625', 'A00053626', 'A00053627', + 'A00053874', 'A00053901', 'A00053927', 'A00053949', 'A00054038', + 'A00054153', 'A00054173', 'A00054358', 'A00054482', 'A00054532', + 'A00054533', 'A00054534', 'A00054621', 'A00054895', 'A00054897', + 'A00054913', 'A00054929', 'A00055061', 'A00055215', 'A00055352', + 'A00055353', 'A00055542', 'A00055738', 'A00055763', 'A00055806', + 'A00056097', 'A00056098', 'A00056164', 'A00056372', 'A00056452', + 'A00056489', 'A00056949'] + + nitrc_ids = range(8260, 8470) + max_subjects = len(ids) + if n_subjects is None: + n_subjects = max_subjects + if n_subjects > max_subjects: + warnings.warn('Warning: there are only %d subjects' % max_subjects) + n_subjects = max_subjects + ids = ids[:n_subjects] + nitrc_ids = nitrc_ids[:n_subjects] + + # Dataset description + fdescr = _get_dataset_descr(dataset_name) + + # First, get the metadata + phenotypic_file = 'NKI_enhanced_surface_phenotypics.csv' + phenotypic = (phenotypic_file, url + '8470/pheno_nki_nilearn.csv', + {'move': phenotypic_file}) + + phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume, + verbose=verbose)[0] + + # Load the csv file + phenotypic = np.genfromtxt(phenotypic, skip_header=True, + names=['Subject', 'Age', + 'Dominant Hand', 'Sex'], + delimiter=',', dtype=['U9', '>> from nilearn.datasets.neurovault import IsNull + >>> null = IsNull() + >>> null == 0 + True + >>> null == '' + True + >>> null == None + True + >>> null == 'a' + False + + """ + def __eq__(self, other): + return not bool(other) + + +class NotNull(_SpecialValue): + """Special value used to filter terms. + + An instance of this class will always be equal to, and only to, + any non-zero value of any type (by non-zero we mean for which bool + returns True). + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import NotNull + >>> not_null = NotNull() + >>> not_null == 0 + False + >>> not_null == '' + False + >>> not_null == None + False + >>> not_null == 'a' + True + + """ + def __eq__(self, other): + return bool(other) + + +class NotEqual(_SpecialValue): + """Special value used to filter terms. + + An instance of this class is constructed with `NotEqual(obj)`. It + will always be equal to, and only to, any value for which + ``obj == value`` is ``False``. + + Parameters + ---------- + negated : object + The object from which a candidate should be different in order + to pass through the filter. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import NotEqual + >>> not_0 = NotEqual(0) + >>> not_0 == 0 + False + >>> not_0 == '0' + True + + """ + def __init__(self, negated): + self.negated_ = negated + self.repr_arg_ = self.negated_ + + def __eq__(self, other): + return not self.negated_ == other + + +class _OrderComp(_SpecialValue): + """Base class for special values based on order comparisons.""" + def __init__(self, bound): + self.bound_ = bound + self._cast = type(bound) + self.repr_arg_ = self.bound_ + + def __eq__(self, other): + try: + return self._eq_impl(self._cast(other)) + except (TypeError, ValueError): + return False + + +class GreaterOrEqual(_OrderComp): + """Special value used to filter terms. + + An instance of this class is constructed with `GreaterOrEqual(obj)`. It + will always be equal to, and only to, any value for which + ``obj <= value`` is ``True``. + + Parameters + ---------- + bound : object + The object to which a candidate should be superior or equal in + order to pass through the filter. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import GreaterOrEqual + >>> nonnegative = GreaterOrEqual(0.) + >>> nonnegative == -.1 + False + >>> nonnegative == 0 + True + >>> nonnegative == .1 + True + + """ + def _eq_impl(self, other): + return self.bound_ <= other + + +class GreaterThan(_OrderComp): + """Special value used to filter terms. + + An instance of this class is constructed with `GreaterThan(obj)`. It + will always be equal to, and only to, any value for which + ``obj < value`` is ``True``. + + Parameters + ---------- + bound : object + The object to which a candidate should be strictly superior in + order to pass through the filter. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import GreaterThan + >>> positive = GreaterThan(0.) + >>> positive == 0. + False + >>> positive == 1. + True + >>> positive == -1. + False + + """ + def _eq_impl(self, other): + return self.bound_ < other + + +class LessOrEqual(_OrderComp): + """Special value used to filter terms. + + An instance of this class is constructed with `LessOrEqual(obj)`. It + will always be equal to, and only to, any value for which + ``value <= obj`` is ``True``. + + Parameters + ---------- + bound : object + The object to which a candidate should be inferior or equal in + order to pass through the filter. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import LessOrEqual + >>> nonpositive = LessOrEqual(0.) + >>> nonpositive == -1. + True + >>> nonpositive == 0. + True + >>> nonpositive == 1. + False + + """ + def _eq_impl(self, other): + return other <= self.bound_ + + +class LessThan(_OrderComp): + """Special value used to filter terms. + + An instance of this class is constructed with `LessThan(obj)`. It + will always be equal to, and only to, any value for which + ``value < obj`` is ``True``. + + Parameters + ---------- + bound : object + The object to which a candidate should be strictly inferior in + order to pass through the filter. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import LessThan + >>> negative = LessThan(0.) + >>> negative == -1. + True + >>> negative == 0. + False + >>> negative == 1. + False + + """ + def _eq_impl(self, other): + return other < self.bound_ + + +class IsIn(_SpecialValue): + """Special value used to filter terms. + + An instance of this class is constructed with + `IsIn(*accepted)`. It will always be equal to, and only to, any + value for which ``value in accepted`` is ``True``. + + Parameters + ---------- + accepted : container + A value will pass through the filter if it is present in + `accepted`. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import IsIn + >>> vowels = IsIn('a', 'e', 'i', 'o', 'u', 'y') + >>> 'a' == vowels + True + >>> vowels == 'b' + False + + """ + def __init__(self, *accepted): + self.accepted_ = accepted + + def __eq__(self, other): + return other in self.accepted_ + + def __repr__(self): + return '{0}{1!r}'.format( + self.__class__.__name__, self.accepted_) + + +class NotIn(_SpecialValue): + """Special value used to filter terms. + + An instance of this class is constructed with + `NotIn(*rejected)`. It will always be equal to, and only to, any + value for which ``value in rejected`` is ``False``. + + Parameters + ---------- + rejected : container + A value will pass through the filter if it is absent from + `rejected`. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import NotIn + >>> consonants = NotIn('a', 'e', 'i', 'o', 'u', 'y') + >>> 'b' == consonants + True + >>> consonants == 'a' + False + + """ + def __init__(self, *rejected): + self.rejected_ = rejected + + def __eq__(self, other): + return other not in self.rejected_ + + def __repr__(self): + return '{0}{1!r}'.format( + self.__class__.__name__, self.rejected_) + + +class Contains(_SpecialValue): + """Special value used to filter terms. + + An instance of this class is constructed with + `Contains(*must_be_contained)`. It will always be equal to, and + only to, any value for which ``item in value`` is ``True`` for + every item in ``must_be_contained``. + + Parameters + ---------- + must_be_contained : container + A value will pass through the filter if it contains all the + items in must_be_contained. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import Contains + >>> contains = Contains('house', 'face') + >>> 'face vs house' == contains + True + >>> 'smiling face vs frowning face' == contains + False + + """ + def __init__(self, *must_be_contained): + self.must_be_contained_ = must_be_contained + + def __eq__(self, other): + if not isinstance(other, Container): + return False + for item in self.must_be_contained_: + if item not in other: + return False + return True + + def __repr__(self): + return '{0}{1!r}'.format( + self.__class__.__name__, self.must_be_contained_) + + +class NotContains(_SpecialValue): + """Special value used to filter terms. + + An instance of this class is constructed with + `NotContains(*must_not_be_contained)`. It will always be equal + to, and only to, any value for which ``item in value`` is + ``False`` for every item in ``must_not_be_contained``. + + Parameters + ---------- + must_not_be_contained : container + A value will pass through the filter if it does not contain + any of the items in must_not_be_contained. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import NotContains + >>> no_garbage = NotContains('bad', 'test') + >>> no_garbage == 'test image' + False + >>> no_garbage == 'good image' + True + + """ + def __init__(self, *must_not_be_contained): + self.must_not_be_contained_ = must_not_be_contained + + def __eq__(self, other): + if not isinstance(other, Container): + return False + for item in self.must_not_be_contained_: + if item in other: + return False + return True + + def __repr__(self): + return '{0}{1!r}'.format( + self.__class__.__name__, self.must_not_be_contained_) + + +class Pattern(_SpecialValue): + """Special value used to filter terms. + + An instance of this class is constructed with + + `Pattern(pattern[, flags])`. It will always be equal to, and only + to, any value for which ``re.match(pattern, value, flags)`` is + ``True``. + + Parameters + ---------- + pattern : str + The pattern to try to match to candidates. + + flags : int, optional (default=0) + Value for ``re.match`` `flags` parameter, + e.g. ``re.IGNORECASE``. The default (0), is the default value + used by ``re.match``. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains. + + Documentation for standard library ``re`` module. + + Examples + -------- + >>> from nilearn.datasets.neurovault import Pattern + >>> poker = Pattern(r'[0-9akqj]{5}$') + >>> 'ak05q' == poker + True + >>> 'ak05e' == poker + False + + """ + def __init__(self, pattern, flags=0): + # Don't use re.compile because compiled patterns + # can't be deepcopied. + self.pattern_ = pattern + self.flags_ = flags + + def __eq__(self, other): + if not isinstance(other, _basestring) or re.match( + self.pattern_, other, self.flags_) is None: + return False + return True + + def __repr__(self): + return '{0}(pattern={1!r}, flags={2})'.format( + self.__class__.__name__, self.pattern_, self.flags_) + + +def _empty_filter(arg): + """Place holder for a filter which always returns True. + + This is the default ``image_filter`` and ``collection_filter`` + argument for ``fetch_neurovault``. + + """ + return True + + +class ResultFilter(object): + + """Easily create callable (local) filters for ``fetch_neurovault``. + + Constructed from a mapping of key-value pairs (optional) and a + callable filter (also optional), instances of this class are meant + to be used as ``image_filter`` or ``collection_filter`` parameters + for ``fetch_neurovault``. + + Such filters can be combined using their methods ``AND``, ``OR``, + ``XOR``, and ``NOT``, with the usual semantics. + + Key-value pairs can be added by treating a ``ResultFilter`` as a + dictionary: after evaluating ``res_filter[key] = value``, only + metadata such that ``metadata[key] == value`` can pass through the + filter. + + Parameters + ---------- + + query_terms : dict, optional (default=None) + A ``metadata`` dictionary will be blocked by the filter if it + does not respect ``metadata[key] == value`` for all + ``key``, ``value`` pairs in `query_terms`. If ``None``, the + empty dictionary is used. + + callable_filter : callable, optional (default=_empty_filter) + A ``metadata`` dictionary will be blocked by the filter if + `callable_filter` does not return ``True`` for ``metadata``. + + As an alternative to the `query_terms` dictionary parameter, + key, value pairs can be passed as keyword arguments. + + Attributes + ---------- + query_terms_ : dict + In order to pass through the filter, metadata must verify + ``metadata[key] == value`` for each ``key``, ``value`` pair in + `query_terms_`. + + callable_filters_ : list of callables + In addition to ``(key, value)`` pairs, we can use this + attribute to specify more elaborate requirements. Called with + a dict representing metadata for an image or collection, each + element of this list returns ``True`` if the metadata should + pass through the filter and ``False`` otherwise. + + A dict of metadata will only pass through the filter if it + satisfies all the `query_terms` AND all the elements of + `callable_filters_`. + + See Also + -------- + nilearn.datasets.neurovault.IsNull, + nilearn.datasets.neurovault.NotNull, + nilearn.datasets.neurovault.NotEqual, + nilearn.datasets.neurovault.GreaterOrEqual, + nilearn.datasets.neurovault.GreaterThan, + nilearn.datasets.neurovault.LessOrEqual, + nilearn.datasets.neurovault.LessThan, + nilearn.datasets.neurovault.IsIn, + nilearn.datasets.neurovault.NotIn, + nilearn.datasets.neurovault.Contains, + nilearn.datasets.neurovault.NotContains, + nilearn.datasets.neurovault.Pattern. + + Examples + -------- + >>> from nilearn.datasets.neurovault import ResultFilter + >>> filt = ResultFilter(a=0).AND(ResultFilter(b=1).OR(ResultFilter(b=2))) + >>> filt({'a': 0, 'b': 1}) + True + >>> filt({'a': 0, 'b': 0}) + False + + """ + + def __init__(self, query_terms=None, + callable_filter=_empty_filter, **kwargs): + if query_terms is None: + query_terms = {} + query_terms = dict(query_terms, **kwargs) + self.query_terms_ = query_terms + self.callable_filters_ = [callable_filter] + + def __call__(self, candidate): + """Return True if candidate satisfies the requirements. + + Parameters + ---------- + candidate : dict + A dictionary representing metadata for a file or a + collection, to be filtered. + + Returns + ------- + bool + ``True`` if `candidate` passes through the filter and ``False`` + otherwise. + + """ + for key, value in self.query_terms_.items(): + if not (value == candidate.get(key)): + return False + for callable_filter in self.callable_filters_: + if not callable_filter(candidate): + return False + return True + + def OR(self, other_filter): + filt1, filt2 = deepcopy(self), deepcopy(other_filter) + new_filter = ResultFilter( + callable_filter=lambda r: filt1(r) or filt2(r)) + return new_filter + + def AND(self, other_filter): + filt1, filt2 = deepcopy(self), deepcopy(other_filter) + new_filter = ResultFilter( + callable_filter=lambda r: filt1(r) and filt2(r)) + return new_filter + + def XOR(self, other_filter): + filt1, filt2 = deepcopy(self), deepcopy(other_filter) + new_filter = ResultFilter( + callable_filter=lambda r: filt1(r) != filt2(r)) + return new_filter + + def NOT(self): + filt = deepcopy(self) + new_filter = ResultFilter( + callable_filter=lambda r: not filt(r)) + return new_filter + + def __getitem__(self, item): + """Get item from query_terms_""" + return self.query_terms_[item] + + def __setitem__(self, item, value): + """Set item in query_terms_""" + self.query_terms_[item] = value + + def __delitem__(self, item): + """Remove item from query_terms_""" + if item in self.query_terms_: + del self.query_terms_[item] + + def add_filter(self, callable_filter): + """Add a function to the callable_filters_. + + After a call add_filter(additional_filt), in addition to all + the previous requirements, a candidate must also verify + additional_filt(candidate) in order to pass through the + filter. + + """ + self.callable_filters_.append(callable_filter) + + def __str__(self): + return self.__class__.__name__ + + +# Utilities for composing queries and interacting with +# neurovault and neurosynth + +class _TemporaryDirectory(object): + """Context manager that provides a temporary directory + + A temporary directory is created on __enter__ + and removed on __exit__ . + + Attributes + ---------- + temp_dir_ : str or None + location of temporary directory or None if not created. + + """ + + def __init__(self): + self.temp_dir_ = None + + def __enter__(self): + self.temp_dir_ = mkdtemp() + return self.temp_dir_ + + def __exit__(self, *args): + if self.temp_dir_ is None: + return + shutil.rmtree(self.temp_dir_) + self.temp_dir_ = None + + +def _print_if(message, level, threshold_level, + with_traceback=False): + """Print a message if its importance is above a threshold. + + Parameters + ---------- + message : str + the message to print if `level` is strictly above + `threshold_level`. + + level : int + importance of the message. + + threshold_level : int + the message is printed if `level` is strictly above + `threshold_level`. + + with_traceback : bool, optional (default=False) + if `message` is printed, also print the last traceback. + + """ + if level > threshold_level: + return + print(message) + if with_traceback: + traceback.print_exc() + + +def _append_filters_to_query(query, filters): + """Encode dict or sequence of key-value pairs into a URL query string + + Parameters + ---------- + query : str + URL to which the filters should be appended + + filters : dict or sequence of pairs + Filters to append to the URL. + + Returns + ------- + str + The query with filters appended to it. + + Notes + ----- + If one of the `filters` keys is 'id', we get the url that points + directly to that id, + e.g. 'http://neurovault.org/api/collections/40', and the other + filters are ignored. + + """ + if not filters: + return query + if 'id' in filters: + return urljoin(query, str(filters['id'])) + new_query = urljoin( + query, '?{0}'.format(urlencode(filters))) + return new_query + + +def _get_encoding(resp): + """Get the encoding of an HTTP response. + + Parameters + ---------- + resp : http.client.HTTPResponse + Response whose encoding we want to find out. + + Returns + ------- + str + str representing the encoding, e.g. 'utf-8'. + + Raises + ------ + ValueError + If the response does not specify an encoding. + + """ + try: + charset = resp.headers.get_content_charset() + if charset is not None: + return charset + except AttributeError: + pass + content_type = resp.headers.get('Content-Type', '') + match = re.search(r'charset=\b(.+)\b', content_type) + if match is None: + raise ValueError( + 'HTTP response encoding not found; headers: {0}'.format( + resp.headers)) + return match.group(1) + + +def _get_batch(query, prefix_msg='', timeout=10., verbose=3): + """Given an URL, get the HTTP response and transform it to python dict. + + The URL is used to send an HTTP GET request and the response is + transformed into a dictionary. + + Parameters + ---------- + query : str + The URL from which to get data. + + prefix_msg : str, optional (default='') + Prefix for all log messages. + + timeout : float + Timeout in seconds. + + verbose : int, optional (default=3) + an integer in [0, 1, 2, 3] to control the verbosity level. + + Returns + ------- + batch : dict + Python dict representing the response's content. + + Raises + ------ + urllib.error.URLError + If there was a problem opening the URL. + + ValueError + If the response could not be decoded, or did not contain + either 'id' (single result), or 'results' and 'count' (actual + batch). + + Notes + ----- + urllib.error.HTTPError is a subclass of URLError. + + """ + request = Request(query) + request.add_header('Connection', 'Keep-Alive') + opener = build_opener() + _print_if('{0}getting new batch: {1}'.format( + prefix_msg, query), _DEBUG, verbose) + try: + resp = opener.open(request, timeout=timeout) + + except Exception: + _print_if('Could not download batch from {0}'.format(query), + _ERROR, verbose, with_traceback=True) + raise + try: + encoding = _get_encoding(resp) + content = resp.read() + batch = json.loads(content.decode(encoding)) + except(URLError, ValueError): + _print_if('Could not decypher batch from {0}'.format(query), + _ERROR, verbose, with_traceback=True) + raise + finally: + resp.close() + if 'id' in batch: + batch = {'count': 1, 'results': [batch]} + for key in ['results', 'count']: + if batch.get(key) is None: + msg = ('Could not find required key "{0}" ' + 'in batch retrieved from {1}'.format(key, query)) + _print_if(msg, _ERROR, verbose) + raise ValueError(msg) + + return batch + + +def _scroll_server_results(url, local_filter=_empty_filter, + query_terms=None, max_results=None, + batch_size=None, prefix_msg='', verbose=3): + """Download list of metadata from Neurovault. + + Parameters + ---------- + url : str + The base url (without the filters) from which to get data. + + local_filter : callable, optional (default=_empty_filter) + Used to filter the results based on their metadata: + must return True if the result is to be kept and False otherwise. + Is called with the dict containing the metadata as sole argument. + + query_terms : dict, sequence of pairs or None, optional (default=None) + Key-value pairs to add to the base url in order to form query. + If ``None``, nothing is added to the url. + + max_results: int or None, optional (default=None) + Maximum number of results to fetch; if ``None``, all available data + that matches the query is fetched. + + batch_size: int or None, optional (default=None) + Neurovault returns the metadata for hits corresponding to a query + in batches. batch_size is used to choose the (maximum) number of + elements in a batch. If None, ``_DEFAULT_BATCH_SIZE`` is used. + + prefix_msg: str, optional (default='') + Prefix for all log messages. + + verbose : int, optional (default=3) + an integer in [0, 1, 2, 3] to control the verbosity level. + + Yields + ------ + result : dict + A result in the retrieved batch. + + None + Once for each batch that could not be downloaded or decoded, + to indicate a failure. + + """ + query = _append_filters_to_query(url, query_terms) + if batch_size is None: + batch_size = _DEFAULT_BATCH_SIZE + query = '{0}{1}limit={2}&offset={{0}}'.format( + query, ('&' if '?' in query else '?'), batch_size) + downloaded = 0 + n_available = None + while(max_results is None or downloaded < max_results): + new_query = query.format(downloaded) + try: + batch = _get_batch(new_query, prefix_msg, verbose=verbose) + except Exception: + yield None + batch = None + if batch is not None: + batch_size = len(batch['results']) + downloaded += batch_size + _print_if('{0}batch size: {1}'.format(prefix_msg, batch_size), + _DEBUG, verbose) + if n_available is None: + n_available = batch['count'] + max_results = (n_available if max_results is None + else min(max_results, n_available)) + for result in batch['results']: + if local_filter(result): + yield result + + +def _yield_from_url_list(url_list, verbose=3): + """Get metadata coming from an explicit list of URLs. + + This is different from ``_scroll_server_results``, which is used + to get all the metadata that matches certain filters. + + Parameters + ---------- + url_list : Container of str + URLs from which to get data + + verbose : int, optional (default=3) + an integer in [0, 1, 2, 3] to control the verbosity level. + + Yields + ------ + content : dict + The metadata from one URL. + + None + Once for each URL that resulted in an error, to signify failure. + + """ + for url in url_list: + try: + batch = _get_batch(url, verbose=verbose) + except Exception: + yield None + batch = None + if batch is not None: + yield batch['results'][0] + + +def _simple_download(url, target_file, temp_dir, verbose=3): + """Wrapper around ``utils._fetch_file``. + + This allows specifying the target file name. + + Parameters + ---------- + url : str + URL of the file to download. + + target_file : str + Location of the downloaded file on filesystem. + + temp_dir : str + Location of sandbox directory used by ``_fetch_file``. + + verbose : int, optional (default=3) + an integer in [0, 1, 2, 3] to control the verbosity level. + + Returns + ------- + target_file : str + The location in which the file was downloaded. + + Raises + ------ + URLError, ValueError + If an error occurred when downloading the file. + + See Also + -------- + nilearn.datasets._utils._fetch_file + + + Notes + ----- + It can happen that an HTTP error that occurs inside + ``_fetch_file`` gets transformed into an ``AttributeError`` when + we try to set the ``reason`` attribute of the exception raised; + here we replace it with an ``URLError``. + + """ + _print_if('Downloading file: {0}'.format(url), _DEBUG, verbose) + try: + downloaded = _fetch_file(url, temp_dir, resume=False, + overwrite=True, verbose=0) + except Exception as e: + _print_if('Problem downloading file from {0}'.format(url), + _ERROR, verbose) + + # reason is a property of urlib.error.HTTPError objects, + # but these objects don't have a setter for it, so + # an HTTPError raised in _fetch_file might be transformed + # into an AttributeError when we try to set its reason attribute + if (isinstance(e, AttributeError) and + e.args[0] == "can't set attribute"): + raise URLError( + 'HTTPError raised in nilearn.datasets._fetch_file: {0}'.format( + traceback.format_exc())) + raise + shutil.move(downloaded, target_file) + _print_if( + 'Download succeeded, downloaded to: {0}'.format(target_file), + _DEBUG, verbose) + return target_file + + +def neurosynth_words_vectorized(word_files, verbose=3, **kwargs): + """Load Neurosynth data from disk into an (n images, voc size) matrix + + Neurosynth data is saved on disk as ``{word: weight}`` + dictionaries for each image, this function reads it and returns a + vocabulary list and a term weight matrix. + + Parameters: + ----------- + word_files : Container + The paths to the files from which to read word weights (each + is supposed to contain the Neurosynth response for a + particular image). + + verbose : int, optional (default=3) + an integer in [0, 1, 2, 3] to control the verbosity level. + + Keyword arguments are passed on to + ``sklearn.feature_extraction.DictVectorizer``. + + Returns: + -------- + frequencies : numpy.ndarray + An (n images, vocabulary size) array. Each row corresponds to + an image, and each column corresponds to a word. The words are + in the same order as in returned value `vocabulary`, so that + `frequencies[i, j]` corresponds to the weight of + `vocabulary[j]` for image ``i``. This matrix is computed by + an ``sklearn.feature_extraction.DictVectorizer`` instance. + + vocabulary : list of str + A list of all the words encountered in the word files. + + See Also + -------- + sklearn.feature_extraction.DictVectorizer + + """ + _print_if('Computing word features.', _INFO, verbose) + words = [] + voc_empty = True + for file_name in word_files: + try: + with open(file_name, 'rb') as word_file: + info = json.loads(word_file.read().decode('utf-8')) + words.append(info['data']['values']) + if info['data']['values'] != {}: + voc_empty = False + except Exception: + _print_if( + 'Could not load words from file {0}; error: {1}'.format( + file_name, traceback.format_exc()), + _ERROR, verbose) + words.append({}) + if voc_empty: + warnings.warn('No word weight could be loaded, ' + 'vectorizing Neurosynth words failed.') + return None, None + vectorizer = DictVectorizer(**kwargs) + frequencies = vectorizer.fit_transform(words).toarray() + vocabulary = np.asarray(vectorizer.feature_names_) + _print_if('Computing word features done; vocabulary size: {0}'.format( + vocabulary.size), _INFO, verbose) + return frequencies, vocabulary + + +def _remove_none_strings(metadata): + """Replace strings representing a null value with ``None``. + + Some collections and images in Neurovault, for some fields, use the + string "None", "None / Other", or "null", instead of having ``null`` + in the json file; we replace these strings with ``None`` so that + they are consistent with the rest and for correct behaviour when we + want to select or filter out null values. + + Parameters + ---------- + metadata : dict + Metadata to transform + + Returns + ------- + metadata : dict + Original metadata in which strings representing null values + have been replaced by ``None``. + + """ + metadata = metadata.copy() + for key, value in metadata.items(): + if (isinstance(value, _basestring) and + re.match(r'($|n/?a$|none|null)', value, re.IGNORECASE)): + metadata[key] = None + return metadata + + +def _write_metadata(metadata, file_name): + """Save metadata to disk. + + Absolute paths are not written; they are recomputed using the + relative paths when data is loaded again, so that if the + Neurovault directory has been moved paths are still valid. + + Parameters + ---------- + metadata : dict + Dictionary representing metadata for a file or a + collection. Any key containing 'absolute' is ignored. + + file_name : str + Path to the file in which to write the data. + + """ + metadata = dict([(k, v) for k, v in metadata.items() if + 'absolute' not in k]) + with open(file_name, 'wb') as metadata_file: + metadata_file.write(json.dumps(metadata).encode('utf-8')) + + +def _add_absolute_paths(root_dir, metadata, force=True): + """Add absolute paths to a dictionary containing relative paths. + + Parameters + ---------- + root_dir : str + The root of the data directory, to prepend to relative paths + in order to form absolute paths. + + metadata : dict + Dictionary containing metadata for a file or a collection. Any + key containing 'relative' is understood to be mapped to a + relative path and the corresponding absolute path is added to + the dictionary. + + force : bool, optional (default=True) + If ``True``, if an absolute path is already present in the + metadata, it is replaced with the recomputed value. If + ``False``, already specified absolute paths have priority. + + Returns + ------- + metadata : dict + The metadata enriched with absolute paths. + + """ + absolute_paths = {} + for name, value in metadata.items(): + match = re.match(r'(.*)relative_path(.*)', name) + if match is not None: + abs_name = '{0}absolute_path{1}'.format(*match.groups()) + absolute_paths[abs_name] = os.path.join(root_dir, value) + if not absolute_paths: + return metadata + new_metadata = metadata.copy() + set_func = new_metadata.__setitem__ if force else new_metadata.setdefault + for name, value in absolute_paths.items(): + set_func(name, value) + return new_metadata + + +def _json_from_file(file_name): + """Load a json file encoded with UTF-8.""" + with open(file_name, 'rb') as dumped: + loaded = json.loads(dumped.read().decode('utf-8')) + return loaded + + +def _json_add_collection_dir(file_name, force=True): + """Load a json file and add is parent dir to resulting dict.""" + loaded = _json_from_file(file_name) + set_func = loaded.__setitem__ if force else loaded.setdefault + dir_path = os.path.dirname(file_name) + set_func('absolute_path', dir_path) + set_func('relative_path', os.path.basename(dir_path)) + return loaded + + +def _json_add_im_files_paths(file_name, force=True): + """Load a json file and add image and words paths.""" + loaded = _json_from_file(file_name) + set_func = loaded.__setitem__ if force else loaded.setdefault + dir_path = os.path.dirname(file_name) + dir_relative_path = os.path.basename(dir_path) + image_file_name = 'image_{0}.nii.gz'.format(loaded['id']) + words_file_name = 'neurosynth_words_for_image_{0}.json'.format( + loaded['id']) + set_func('relative_path', os.path.join(dir_relative_path, image_file_name)) + if os.path.isfile(os.path.join(dir_path, words_file_name)): + set_func('ns_words_relative_path', + os.path.join(dir_relative_path, words_file_name)) + loaded = _add_absolute_paths( + os.path.dirname(dir_path), loaded, force=force) + return loaded + + +def _download_collection(collection, download_params): + """Create directory and download metadata for a collection. + + Parameters + ---------- + collection : dict + Collection metadata. + + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Returns + ------- + collection : dict + Collection metadata, with local path added to it. + + """ + if collection is None: + return None + collection = _remove_none_strings(collection) + collection_id = collection['id'] + collection_name = 'collection_{0}'.format(collection_id) + collection_dir = os.path.join(download_params['nv_data_dir'], + collection_name) + collection['relative_path'] = collection_name + collection['absolute_path'] = collection_dir + if not os.path.isdir(collection_dir): + os.makedirs(collection_dir) + metadata_file_path = os.path.join(collection_dir, + 'collection_metadata.json') + _write_metadata(collection, metadata_file_path) + return collection + + +def _fetch_collection_for_image(image_info, download_params): + """Find the collection metadata for an image. + + If necessary, the collection metadata is downloaded and its + directory is created. + + Parameters + ---------- + image_info : dict + Image metadata. + + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Returns + ------- + collection : dict + The collection metadata. + + """ + collection_id = image_info['collection_id'] + collection_relative_path = 'collection_{0}'.format(collection_id) + collection_absolute_path = os.path.join( + download_params['nv_data_dir'], collection_relative_path) + if not os.path.isdir(collection_absolute_path): + col_batch = _get_batch(urljoin( + _NEUROVAULT_COLLECTIONS_URL, str(collection_id)), + verbose=download_params['verbose']) + collection = _download_collection( + col_batch['results'][0], download_params) + else: + collection = _json_add_collection_dir(os.path.join( + collection_absolute_path, 'collection_metadata.json')) + + return collection + + +def _download_image_nii_file(image_info, collection, download_params): + """Download an image (.nii.gz) file from Neurovault. + + Parameters + ---------- + image_info : dict + Image metadata. + + collection : dict + Corresponding collection metadata. + + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Returns + ------- + image_info : dict + Image metadata with local paths added to it. + + collection : dict + Corresponding collection metadata with local paths added to it. + + """ + image_info = image_info.copy() + image_id = image_info['id'] + image_url = image_info['file'] + image_file_name = 'image_{0}.nii.gz'.format(image_id) + image_relative_path = os.path.join( + collection['relative_path'], image_file_name) + image_absolute_path = os.path.join( + collection['absolute_path'], image_file_name) + _simple_download( + image_url, image_absolute_path, + download_params['temp_dir'], verbose=download_params['verbose']) + image_info['absolute_path'] = image_absolute_path + image_info['relative_path'] = image_relative_path + return image_info, collection + + +def _check_has_words(file_name): + if not os.path.isfile(file_name): + return False + info = _remove_none_strings(_json_from_file(file_name)) + try: + assert len(info['data']['values']) + return True + except (AttributeError, TypeError, AssertionError): + pass + os.remove(file_name) + return False + + +def _download_image_terms(image_info, collection, download_params): + """Download Neurosynth words for an image. + + Parameters + ---------- + image_info : dict + Image metadata. + + collection : dict + Corresponding collection metadata. + + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Returns + ------- + image_info : dict + Image metadata with neurosynth words file path added to it. + + collection : dict + Corresponding collection metadata. + + """ + + if not download_params['fetch_neurosynth_words']: + return image_info, collection + + ns_words_file_name = 'neurosynth_words_for_image_{0}.json'.format( + image_info['id']) + image_info = image_info.copy() + image_info['ns_words_relative_path'] = os.path.join( + collection['relative_path'], ns_words_file_name) + image_info['ns_words_absolute_path'] = os.path.join( + collection['absolute_path'], ns_words_file_name) + + if os.path.isfile(image_info['ns_words_absolute_path']): + return image_info, collection + + query = urljoin(_NEUROSYNTH_FETCH_WORDS_URL, + '?neurovault={0}'.format(image_info['id'])) + try: + _simple_download(query, image_info['ns_words_absolute_path'], + download_params['temp_dir'], + verbose=download_params['verbose']) + assert _check_has_words(image_info['ns_words_absolute_path']) + except(URLError, ValueError, AssertionError): + message = 'Could not fetch words for image {0}'.format( + image_info['id']) + if not download_params.get('allow_neurosynth_failure', True): + raise RuntimeError(message) + _print_if( + message, _ERROR, download_params['verbose'], with_traceback=True) + + return image_info, collection + + +def _download_image(image_info, download_params): + """Download a Neurovault image. + + If necessary, create the corresponding collection's directory and + download the collection's metadata. + + Parameters + ---------- + image_info : dict + Image metadata. + + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Returns + ------- + image_info : dict + Image metadata with local paths added to it. + + """ + if image_info is None: + return None + image_info = _remove_none_strings(image_info) + + # image_info = self._image_hook(image_info) + collection = _fetch_collection_for_image( + image_info, download_params) + image_info, collection = _download_image_nii_file( + image_info, collection, download_params) + image_info, collection = _download_image_terms( + image_info, collection, download_params) + metadata_file_path = os.path.join( + collection['absolute_path'], 'image_{0}_metadata.json'.format( + image_info['id'])) + _write_metadata(image_info, metadata_file_path) + + return image_info + + +def _update_image(image_info, download_params): + """Update local metadata for an image. + + If required and necessary, download the Neurosynth tags. + + Parameters + ---------- + image_info : dict + Image metadata. + + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Returns + ------- + image_info : dict + Image metadata. + + """ + if not download_params['write_ok']: + return image_info + collection = _fetch_collection_for_image( + image_info, download_params) + image_info, collection = _download_image_terms( + image_info, collection, download_params) + metadata_file_path = os.path.join( + os.path.dirname(image_info['absolute_path']), + 'image_{0}_metadata.json'.format(image_info['id'])) + _write_metadata(image_info, metadata_file_path) + return image_info + + +def _update(image_info, collection, download_params): + """Update local metadata for an image and its collection.""" + image_info = _update_image(image_info, download_params) + return image_info, collection + + +def _scroll_local(download_params): + """Iterate over local neurovault data. + + Parameters + ---------- + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Yields + ------ + image : dict + Metadata for an image. + + collection : dict + Metadata for the corresponding collection. + + """ + _print_if('Reading local neurovault data.', _DEBUG, + download_params['verbose']) + + collections = glob( + os.path.join( + download_params['nv_data_dir'], '*', 'collection_metadata.json')) + + good_collections = (col for col in + (_json_add_collection_dir(col) for col in collections) + if download_params['local_collection_filter'](col)) + for collection in good_collections: + images = glob(os.path.join( + collection['absolute_path'], 'image_*_metadata.json')) + + good_images = (img for img in + (_json_add_im_files_paths(img) for img in images) + if download_params['local_image_filter'](img)) + for image in good_images: + image, collection = _update(image, collection, download_params) + download_params['visited_images'].add(image['id']) + download_params['visited_collections'].add(collection['id']) + yield image, collection + + +def _scroll_collection(collection, download_params): + """Iterate over the content of a collection on Neurovault server. + + Images that are found and match filter criteria are downloaded. + + Parameters + ---------- + collection : dict + Metadata for the collection + + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Yields + ------ + image : dict + Metadata for an image. + + Notes + ----- + ``image`` can be ``None`` to signify a failed download. + + """ + if collection is None: + yield None + return + n_im_in_collection = 0 + fails_in_collection = 0 + query = urljoin(_NEUROVAULT_COLLECTIONS_URL, + '{0}/images/'.format(collection['id'])) + images = _scroll_server_results( + query, query_terms=download_params['image_terms'], + local_filter=download_params['image_filter'], + prefix_msg='Scroll images from collection {0}: '.format( + collection['id']), batch_size=download_params['batch_size'], + verbose=download_params['verbose']) + + for image in images: + if image is None: + yield None + try: + image = _download_image(image, download_params) + fails_in_collection = 0 + n_im_in_collection += 1 + yield image + except Exception: + fails_in_collection += 1 + _print_if( + '_scroll_collection: bad image: {0}'.format(image), + _ERROR, download_params['verbose'], with_traceback=True) + yield None + if fails_in_collection == download_params['max_fails_in_collection']: + _print_if('Too many bad images in collection {0}: ' + '{1} bad images.'.format( + collection['id'], fails_in_collection), + _ERROR, download_params['verbose']) + return + _print_if( + 'On neurovault.org: ' + '{0} image{1} matched query in collection {2}'.format( + (n_im_in_collection if n_im_in_collection else 'no'), + ('s' if n_im_in_collection > 1 else ''), collection['id']), + _INFO, download_params['verbose']) + + +def _scroll_filtered(download_params): + """Iterate over Neurovault data that matches specified filters. + + Images and collections which match the filters provided in the + download parameters are fetched from the server. + + Parameters + ---------- + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Yields + ------ + image : dict + Metadata for an image. + + collection : dict + Metadata for the corresponding collection. + + Notes + ----- + ``image``, ``collection`` can be ``None``, ``None`` to signify a + failed download. + + """ + _print_if('Reading server neurovault data.', + _DEBUG, download_params['verbose']) + + download_params['collection_filter'] = ResultFilter( + {'id': NotIn(*download_params['visited_collections'])}).AND( + download_params['collection_filter']) + + download_params['image_filter'] = ResultFilter( + {'id': NotIn(*download_params['visited_images'])}).AND( + download_params['image_filter']) + + collections = _scroll_server_results( + _NEUROVAULT_COLLECTIONS_URL, + query_terms=download_params['collection_terms'], + local_filter=download_params['collection_filter'], + prefix_msg='Scroll collections: ', + batch_size=download_params['batch_size'], + verbose=download_params['verbose']) + + for collection in collections: + collection = _download_collection(collection, download_params) + collection_content = _scroll_collection(collection, download_params) + for image in collection_content: + yield image, collection + + +def _scroll_collection_ids(download_params): + """Download a specific list of collections from Neurovault. + + The collections listed in the download parameters, and all + the images they contain, are downloaded. + + Parameters + ---------- + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Yields + ------ + image : dict + Metadata for an image. + + collection : dict + Metadata for the corresponding collection. + + Notes + ----- + ``image``, ``collection`` can be ``None``, ``None`` to signify a + failed download. + + """ + collection_urls = [ + urljoin(_NEUROVAULT_COLLECTIONS_URL, str(col_id)) for + col_id in download_params['wanted_collection_ids']] + + if(collection_urls): + _print_if('Reading server neurovault data.', + _DEBUG, download_params['verbose']) + + collections = _yield_from_url_list( + collection_urls, verbose=download_params['verbose']) + for collection in collections: + collection = _download_collection(collection, download_params) + for image in _scroll_collection(collection, download_params): + yield image, collection + + +def _scroll_image_ids(download_params): + """Download a specific list of images from Neurovault. + + The images listed in the download parameters, and the metadata for + the collections they belong to, are downloaded. + + Parameters + ---------- + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Yields + ------ + image : dict + Metadata for an image. + + collection : dict + Metadata for the corresponding collection. + + Notes + ----- + ``image``, ``collection`` can be ``None``, ``None`` to signify a + failed download. + + """ + + image_urls = [urljoin(_NEUROVAULT_IMAGES_URL, str(im_id)) for + im_id in download_params['wanted_image_ids']] + + images = _yield_from_url_list( + image_urls, verbose=download_params['verbose']) + for image in images: + try: + image = _download_image(image, download_params) + collection = _json_add_collection_dir(os.path.join( + os.path.dirname(image['absolute_path']), + 'collection_metadata.json')) + except Exception: + image, collection = None, None + yield image, collection + + +def _scroll_explicit(download_params): + """Download specific lists of collections and images from Neurovault. + + Parameters + ---------- + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Yields + ------ + image : dict + Metadata for an image. + + collection : dict + Metadata for the corresponding collection. + + Notes + ----- + ``image``, ``collection`` can be ``None``, ``None`` to signify a + failed download. + + """ + + download_params['wanted_collection_ids'] = set( + download_params['wanted_collection_ids'] or []).difference( + download_params['visited_collections']) + for image, collection in _scroll_collection_ids(download_params): + if image is not None: + download_params['visited_images'].add(image['id']) + yield image, collection + + download_params['wanted_image_ids'] = set( + download_params['wanted_image_ids'] or []).difference( + download_params['visited_images']) + + for image, collection in _scroll_image_ids(download_params): + yield image, collection + + +def _print_progress(found, download_params, level=_INFO): + """Print number of images fetched so far.""" + _print_if('Already fetched {0} image{1}'.format( + found, ('s' if found > 1 else '')), + level, download_params['verbose']) + + +def _scroll(download_params): + """Iterate over Neurovault data. + + Relevant images and collections are loaded from local disk, then + from neurovault.org + + Parameters + ---------- + download_params : dict + General information about download session, containing e.g. the + data directory (see `_read_download_params` and + `_prepare_download_params for details`) + + Yields + ------ + image : dict + Metadata for an image. + + collection : dict + Metadata for the corresponding collection. + + Notes + ----- + Stops if: + - All available images have been fetched. + - Or a max number of images has been specified by user and + reached. + - Or too many downloads have failed in a row. + + """ + scroll_modes = {'filtered': _scroll_filtered, 'explicit': _scroll_explicit} + if download_params['max_images'] == 0: + return + found = 0 + + if download_params['download_mode'] != 'overwrite': + for image, collection in _scroll_local(download_params): + found = len(download_params['visited_images']) + _print_progress(found, download_params, _DEBUG) + yield image, collection + if found == download_params['max_images']: + break + _print_if('{0} image{1} found on local disk.'.format( + ('No' if not found else found), ('s' if found > 1 else '')), + _INFO, download_params['verbose']) + + if download_params['download_mode'] == 'offline': + return + if found == download_params['max_images']: + return + server_data = scroll_modes[download_params['scroll_mode']](download_params) + n_consecutive_fails = 0 + for image, collection in server_data: + if image is None or collection is None: + n_consecutive_fails += 1 + else: + n_consecutive_fails = 0 + found += 1 + _print_progress(found, download_params) + yield image, collection + + if n_consecutive_fails >= download_params['max_consecutive_fails']: + warnings.warn('Neurovault download stopped early: ' + 'too many downloads failed in a row ({0})'.format( + n_consecutive_fails)) + return + if found == download_params['max_images']: + return + + +# Utilities for providing defaults and transforming input and output + +def _split_terms(terms, available_on_server): + """Isolate term filters that can be applied by server.""" + terms_ = dict(terms) + server_terms = dict([(k, terms_.pop(k)) for k in + available_on_server if k in terms_ and + (isinstance(terms_[k], _basestring) or + isinstance(terms_[k], int))]) + return terms_, server_terms + + +def _move_unknown_terms_to_local_filter(terms, local_filter, + available_on_server): + """Move filters handled by the server inside URL. + + Some filters are available on the server and can be inserted into + the URL query. The rest will have to be applied on metadata + locally. + + """ + local_terms, server_terms = _split_terms(terms, available_on_server) + local_filter = ResultFilter(query_terms=local_terms).AND(local_filter) + return server_terms, local_filter + + +def basic_collection_terms(): + """Return a term filter that excludes empty collections.""" + return {'number_of_images': NotNull()} + + +def basic_image_terms(): + """Filter that selects unthresholded F, T and Z maps in mni space + + More precisely, an image is excluded if one of the following is + true: + + - It is not in MNI space. + - Its metadata field "is_valid" is cleared. + - It is thresholded. + - Its map type is one of "ROI/mask", "anatomical", or "parcellation". + - Its image type is "atlas" + + """ + return {'not_mni': False, 'is_valid': True, 'is_thresholded': False, + 'map_type': NotIn('ROI/mask', 'anatomical', 'parcellation'), + 'image_type': NotEqual('atlas')} + + +def _move_col_id(im_terms, col_terms): + """Reposition 'collection_id' term. + + If the collection id was specified in image filters, move it to + the collection filters for efficiency. + + This makes specifying the collection id as a keyword argument for + ``fetch_neurovault`` efficient. + + """ + if 'collection_id' not in im_terms: + return im_terms, col_terms + im_terms = copy(im_terms) + col_terms = copy(col_terms) + if 'id' not in col_terms: + col_terms['id'] = im_terms.pop('collection_id') + elif col_terms['id'] == im_terms['collection_id']: + col_terms['id'] = im_terms.pop('collection_id') + else: + warnings.warn('You specified contradictory collection ids, ' + 'one in the image filters and one in the ' + 'collection filters') + return im_terms, col_terms + + +def _read_download_params( + data_dir, download_mode='download_new', collection_terms=None, + collection_filter=_empty_filter, image_terms=None, + image_filter=_empty_filter, wanted_collection_ids=None, + wanted_image_ids=None, max_images=None, + max_consecutive_fails=_MAX_CONSECUTIVE_FAILS, + max_fails_in_collection=_MAX_FAILS_IN_COLLECTION, + batch_size=None, verbose=3, fetch_neurosynth_words=False, + vectorize_words=True): + + """Create a dictionary containing download information. + + """ + download_params = {} + download_params['verbose'] = verbose + download_mode = download_mode.lower() + if download_mode not in ['overwrite', 'download_new', 'offline']: + raise ValueError( + 'supported download modes are overwrite,' + ' download_new, offline; got {0}'.format(download_mode)) + download_params['download_mode'] = download_mode + if collection_terms is None: + collection_terms = {} + if image_terms is None: + image_terms = {} + if max_images is not None and max_images < 0: + max_images = None + download_params['nv_data_dir'] = data_dir + download_params['collection_terms'] = dict(collection_terms) + download_params['collection_filter'] = collection_filter + download_params['image_terms'] = dict(image_terms) + download_params['image_filter'] = image_filter + download_params['visited_images'] = set() + download_params['visited_collections'] = set() + download_params['max_images'] = max_images + download_params['max_consecutive_fails'] = max_consecutive_fails + download_params['max_fails_in_collection'] = max_fails_in_collection + download_params['batch_size'] = batch_size + download_params['wanted_image_ids'] = wanted_image_ids + download_params['wanted_collection_ids'] = wanted_collection_ids + download_params['fetch_neurosynth_words'] = fetch_neurosynth_words + download_params['write_ok'] = os.access( + download_params['nv_data_dir'], os.W_OK) + download_params['vectorize_words'] = vectorize_words + return download_params + + +def _prepare_explicit_ids_download_params(download_params): + """Prepare the download parameters if explicit ids are specified.""" + if download_params.get('wanted_image_ids') is None: + download_params['wanted_image_ids'] = [] + if download_params.get('wanted_collection_ids') is None: + download_params['wanted_collection_ids'] = [] + download_params['max_images'] = None + download_params['scroll_mode'] = 'explicit' + download_params['image_terms'] = {} + download_params['image_filter'] = _empty_filter + download_params['collection_terms'] = {} + download_params['collection_filter'] = _empty_filter + download_params['local_collection_filter'] = _empty_filter + download_params['local_image_filter'] = ResultFilter( + {'id': IsIn(*download_params['wanted_image_ids'])}).OR( + ResultFilter( + collection_id=IsIn( + *download_params['wanted_collection_ids']))) + return download_params + + +def _prepare_filtered_download_params(download_params): + """Prepare the download parameters if filters are used.""" + (download_params['image_terms'], + download_params['collection_terms']) = _move_col_id( + download_params['image_terms'], download_params['collection_terms']) + (download_params['collection_terms'], + download_params['collection_filter'] + ) = _move_unknown_terms_to_local_filter( + download_params['collection_terms'], + download_params['collection_filter'], + _COL_FILTERS_AVAILABLE_ON_SERVER) + + (download_params['image_terms'], + download_params[ + 'image_filter']) = _move_unknown_terms_to_local_filter( + download_params['image_terms'], download_params['image_filter'], + _IM_FILTERS_AVAILABLE_ON_SERVER) + + download_params['local_collection_filter'] = ResultFilter( + **download_params['collection_terms']).AND( + download_params['collection_filter']) + download_params['local_image_filter'] = ResultFilter( + **download_params['image_terms']).AND( + download_params['image_filter']) + + download_params['scroll_mode'] = 'filtered' + return download_params + + +def _prepare_download_params(download_params): + """Adjust the download parameters. + + Information for the downloaders is added. The result depends on + whether we are downloading a set of collections and images + explicitly specified by the user (by id), or we are downloading + all the collections and images that match certain filters. + + + """ + if (download_params['wanted_collection_ids'] is not None or + download_params['wanted_image_ids'] is not None): + return _prepare_explicit_ids_download_params(download_params) + return _prepare_filtered_download_params(download_params) + + +def _result_list_to_bunch(result_list, download_params): + """Transform a list of results into a Bunch. + + If necessary, a vocabulary list and a matrix of vectorized tags are + added. + + """ + if not result_list: + images_meta, collections_meta = [], [] + else: + images_meta, collections_meta = zip(*result_list) + images_meta = list(images_meta) + collections_meta = list(collections_meta) + images = [im_meta.get('absolute_path') for im_meta in images_meta] + result = Bunch(images=images, images_meta=images_meta, + collections_meta=collections_meta, + description=_get_dataset_descr('neurovault')) + if download_params[ + 'fetch_neurosynth_words'] and download_params['vectorize_words']: + (result['word_frequencies'], + result['vocabulary']) = neurosynth_words_vectorized( + [meta.get('ns_words_absolute_path') for + meta in images_meta], verbose=download_params['verbose']) + return result + + +# High-level functions that provide access to neurovault and neurosynth. +# _fetch_neurovault_implementation does the work, and two interfaces +# are available: +# fetch_neurovault, to filter results based on metadata +# fetch_neurovault_ids, to ask for specific images or collections + +def _fetch_neurovault_implementation( + max_images=_DEFAULT_MAX_IMAGES, collection_terms=basic_collection_terms(), + collection_filter=_empty_filter, image_terms=basic_image_terms(), + image_filter=_empty_filter, collection_ids=None, image_ids=None, + mode='download_new', data_dir=None, fetch_neurosynth_words=False, + vectorize_words=True, verbose=3, **kwarg_image_filters): + """Download data from neurovault.org and neurosynth.org.""" + image_terms = dict(image_terms, **kwarg_image_filters) + neurovault_data_dir = _get_dataset_dir('neurovault', data_dir) + if mode != 'offline' and not os.access(neurovault_data_dir, os.W_OK): + warnings.warn("You don't have write access to neurovault dir: {0}; " + "fetch_neurovault is working offline.".format( + neurovault_data_dir)) + mode = 'offline' + + download_params = _read_download_params( + neurovault_data_dir, download_mode=mode, + collection_terms=collection_terms, + collection_filter=collection_filter, image_terms=image_terms, + image_filter=image_filter, wanted_collection_ids=collection_ids, + wanted_image_ids=image_ids, max_images=max_images, verbose=verbose, + fetch_neurosynth_words=fetch_neurosynth_words, + vectorize_words=vectorize_words) + download_params = _prepare_download_params(download_params) + + with _TemporaryDirectory() as temp_dir: + download_params['temp_dir'] = temp_dir + scroller = list(_scroll(download_params)) + + return _result_list_to_bunch(scroller, download_params) + + +def fetch_neurovault( + max_images=_DEFAULT_MAX_IMAGES, + collection_terms=basic_collection_terms(), + collection_filter=_empty_filter, + image_terms=basic_image_terms(), + image_filter=_empty_filter, + mode='download_new', data_dir=None, + fetch_neurosynth_words=False, vectorize_words=True, + verbose=3, **kwarg_image_filters): + """Download data from neurovault.org that match certain criteria. + + Any downloaded data is saved on the local disk and subsequent + calls to this function will first look for the data locally before + querying the server for more if necessary. + + We explore the metadata for Neurovault collections and images, + keeping those that match a certain set of criteria, until we have + skimmed through the whole database or until an (optional) maximum + number of images to fetch has been reached. + + Parameters + ---------- + max_images : int, optional (default=100) + Maximum number of images to fetch. + + collection_terms : dict, optional (default=basic_collection_terms()) + Key, value pairs used to filter collection + metadata. Collections for which + ``collection_metadata['key'] == value`` is not ``True`` for + every key, value pair will be discarded. + See documentation for ``basic_collection_terms`` for a + description of the default selection criteria. + + collection_filter : Callable, optional (default=_empty_filter) + Collections for which `collection_filter(collection_metadata)` + is ``False`` will be discarded. + + image_terms : dict, optional (default=basic_image_terms()) + Key, value pairs used to filter image metadata. Images for + which ``image_metadata['key'] == value`` is not ``True`` for + if image_filter != _empty_filter and image_terms = + every key, value pair will be discarded. + See documentation for ``basic_image_terms`` for a + description of the default selection criteria. + + image_filter : Callable, optional (default=_empty_filter) + Images for which `image_filter(image_metadata)` is ``False`` + will be discarded. + + mode : {'download_new', 'overwrite', 'offline'} + When to fetch an image from the server rather than the local + disk. + + - 'download_new' (the default) means download only files that + are not already on disk (regardless of modify date). + - 'overwrite' means ignore files on disk and overwrite them. + - 'offline' means load only data from disk; don't query server. + + data_dir : str, optional (default=None) + The directory we want to use for nilearn data. A subdirectory + named "neurovault" will contain neurovault data. + + fetch_neurosynth_words : bool, optional (default=False) + whether to collect words from Neurosynth. + + vectorize_words : bool, optional (default=True) + If neurosynth words are downloaded, create a matrix of word + counts and add it to the result. Also add to the result a + vocabulary list. See ``sklearn.CountVectorizer`` for more info. + + verbose : int, optional (default=3) + an integer in [0, 1, 2, 3] to control the verbosity level. + + kwarg_image_filters + Keyword arguments are understood to be filter terms for + images, so for example ``map_type='Z map'`` means only + download Z-maps; ``collection_id=35`` means download images + from collection 35 only. + + Returns + ------- + Bunch + A dict-like object which exposes its items as attributes. It contains: + + - 'images', the paths to downloaded files. + - 'images_meta', the metadata for the images in a list of + dictionaries. + - 'collections_meta', the metadata for the + collections. + - 'description', a short description of the Neurovault dataset. + + If `fetch_neurosynth_words` and `vectorize_words` were set, it + also contains: + + - 'vocabulary', a list of words + - 'word_frequencies', the weight of the words returned by + neurosynth.org for each image, such that the weight of word + `vocabulary[j]` for the image found in `images[i]` is + `word_frequencies[i, j]` + + See Also + -------- + nilearn.datasets.fetch_neurovault_ids + Fetch collections and images from Neurovault by explicitly specifying + their ids. + + Notes + ----- + Images and collections from disk are fetched before remote data. + + Some helpers are provided in the ``neurovault`` module to express + filtering criteria more concisely: + + ``ResultFilter``, ``IsNull``, ``NotNull``, ``NotEqual``, + ``GreaterOrEqual``, ``GreaterThan``, ``LessOrEqual``, + ``LessThan``, ``IsIn``, ``NotIn``, ``Contains``, + ``NotContains``, ``Pattern``. + + If you pass a single value to match against the collection id + (whether as the 'id' field of the collection metadata or as the + 'collection_id' field of the image metadata), the server is + directly queried for that collection, so + ``fetch_neurovault(collection_id=40)`` is as efficient as + ``fetch_neurovault(collection_ids=[40])`` (but in the former + version the other filters will still be applied). This is not true + for the image ids. If you pass a single value to match against any + of the fields listed in ``_COL_FILTERS_AVAILABLE_ON_SERVER``, + i.e., 'DOI', 'name', and 'owner', these filters can be + applied by the server, limiting the amount of metadata we have to + download: filtering on those fields makes the fetching faster + because the filtering takes place on the server side. + + In `download_new` mode, if a file exists on disk, it is not + downloaded again, even if the version on the server is newer. Use + `overwrite` mode to force a new download (you can filter on the + field ``modify_date`` to re-download the files that are newer on + the server - see Examples section). + + Tries to yield `max_images` images; stops early if we have fetched + all the images matching the filters or if too many images fail to + be downloaded in a row. + + References + ---------- + + .. [1] Gorgolewski KJ, Varoquaux G, Rivera G, Schwartz Y, Ghosh SS, + Maumet C, Sochat VV, Nichols TE, Poldrack RA, Poline J-B, + Yarkoni T and Margulies DS (2015) NeuroVault.org: a web-based + repository for collecting and sharing unthresholded + statistical maps of the human brain. Front. Neuroinform. 9:8. + doi: 10.3389/fninf.2015.00008 + + .. [2] Yarkoni, Tal, Russell A. Poldrack, Thomas E. Nichols, David + C. Van Essen, and Tor D. Wager. "Large-scale automated synthesis + of human functional neuroimaging data." Nature methods 8, no. 8 + (2011): 665-670. + + Examples + -------- + To download **all** the collections and images from Neurovault:: + + fetch_neurovault(max_images=None, collection_terms={}, image_terms={}) + + To further limit the default selection to collections which + specify a DOI (which reference a published paper, as they may be + more likely to contain good images):: + + fetch_neurovault( + max_images=None, + collection_terms=dict(basic_collection_terms(), DOI=NotNull())) + + To update all the images (matching the default filters):: + + fetch_neurovault( + max_images=None, mode='overwrite', + modify_date=GreaterThan(newest)) + + """ + if max_images == _DEFAULT_MAX_IMAGES: + _print_if( + 'fetch_neurovault: using default value of {0} for max_images. ' + 'Set max_images to another value or None ' + 'if you want more images.'.format(_DEFAULT_MAX_IMAGES), + _INFO, verbose) + # Users may get confused if they write their image_filter function + # and the default filters contained in image_terms still apply, so we + # issue a warning. + if image_filter != _empty_filter and image_terms == basic_image_terms(): + warnings.warn( + "You specified a value for `image_filter` but the " + "default filters in `image_terms` still apply. " + "If you want to disable them, pass `image_terms={}`") + if (collection_filter != _empty_filter + and collection_terms == basic_collection_terms()): + warnings.warn( + "You specified a value for `collection_filter` but the " + "default filters in `collection_terms` still apply. " + "If you want to disable them, pass `collection_terms={}`") + + return _fetch_neurovault_implementation( + max_images=max_images, collection_terms=collection_terms, + collection_filter=collection_filter, image_terms=image_terms, + image_filter=image_filter, mode=mode, + data_dir=data_dir, + fetch_neurosynth_words=fetch_neurosynth_words, + vectorize_words=vectorize_words, verbose=verbose, + **kwarg_image_filters) + + +def fetch_neurovault_ids( + collection_ids=(), image_ids=(), mode='download_new', data_dir=None, + fetch_neurosynth_words=False, vectorize_words=True, verbose=3): + """Download specific images and collections from neurovault.org. + + Any downloaded data is saved on the local disk and subsequent + calls to this function will first look for the data locally before + querying the server for more if necessary. + + This is the fast way to get the data from the server if we already + know which images or collections we want. + + Parameters + ---------- + + collection_ids : Container, optional (default=()) + The ids of whole collections to be downloaded. + + image_ids : Container, optional (default=None) + The ids of particular images to be downloaded. The metadata for the + corresponding collections is also downloaded. + + mode : {'download_new', 'overwrite', 'offline'} + When to fetch an image from the server rather than the local + disk. + + - 'download_new' (the default) means download only files that + are not already on disk (regardless of modify date). + - 'overwrite' means ignore files on disk and overwrite them. + - 'offline' means load only data from disk; don't query server. + + data_dir : str, optional (default=None) + The directory we want to use for nilearn data. A subdirectory + named "neurovault" will contain neurovault data. + + fetch_neurosynth_words : bool, optional (default=False) + whether to collect words from Neurosynth. + + vectorize_words : bool, optional (default=True) + If neurosynth words are downloaded, create a matrix of word + counts and add it to the result. Also add to the result a + vocabulary list. See ``sklearn.CountVectorizer`` for more info. + + verbose : int, optional (default=3) + an integer in [0, 1, 2, 3] to control the verbosity level. + + Returns + ------- + Bunch + A dict-like object which exposes its items as attributes. It contains: + + - 'images', the paths to downloaded files. + - 'images_meta', the metadata for the images in a list of + dictionaries. + - 'collections_meta', the metadata for the + collections. + - 'description', a short description of the Neurovault dataset. + + If `fetch_neurosynth_words` and `vectorize_words` were set, it + also contains: + + - 'vocabulary', a list of words + - 'word_frequencies', the weight of the words returned by + neurosynth.org for each image, such that the weight of word + `vocabulary[j]` for the image found in `images[i]` is + `word_frequencies[i, j]` + + See Also + -------- + nilearn.datasets.fetch_neurovault + Fetch data from Neurovault, but use filters on metadata to select + images and collections rather than giving explicit lists of ids. + + Notes + ----- + Images and collections from disk are fetched before remote data. + + In `download_new` mode, if a file exists on disk, it is not + downloaded again, even if the version on the server is newer. Use + `overwrite` mode to force a new download. + + Stops early if too many images fail to be downloaded in a row. + + References + ---------- + + .. [1] Gorgolewski KJ, Varoquaux G, Rivera G, Schwartz Y, Ghosh SS, + Maumet C, Sochat VV, Nichols TE, Poldrack RA, Poline J-B, + Yarkoni T and Margulies DS (2015) NeuroVault.org: a web-based + repository for collecting and sharing unthresholded + statistical maps of the human brain. Front. Neuroinform. 9:8. + doi: 10.3389/fninf.2015.00008 + + .. [2] Yarkoni, Tal, Russell A. Poldrack, Thomas E. Nichols, David + C. Van Essen, and Tor D. Wager. "Large-scale automated synthesis + of human functional neuroimaging data." Nature methods 8, no. 8 + (2011): 665-670. + + """ + return _fetch_neurovault_implementation( + mode=mode, + collection_ids=collection_ids, image_ids=image_ids, + data_dir=data_dir, + fetch_neurosynth_words=fetch_neurosynth_words, + vectorize_words=vectorize_words, verbose=verbose) + + +def fetch_neurovault_motor_task(data_dir=None, verbose=1): + """Fetch left vs right button press group contrast map from NeuroVault. + + Parameters + ---------- + data_dir: string, optional + Path of the data directory. Used to force data storage in a specified + location. + + verbose: int, optional + verbosity level (0 means no message). + + Returns + ------- + data: Bunch + A dict-like object which exposes its items as attributes. It contains: + - 'images', the paths to downloaded files. + - 'images_meta', the metadata for the images in a list of + dictionaries. + - 'collections_meta', the metadata for the + collections. + - 'description', a short description of the Neurovault dataset. + + Notes + ------ + + This function is only a caller for the fetch_localizer_contrasts in order + to simplify examples reading and understanding. + The 'left vs right button press' contrast is used: + https://neurovault.org/images/10426/ + + See Also + --------- + nilearn.datasets.fetch_neurovault_ids + nilearn.datasets.fetch_neurovault + nilearn.datasets.fetch_neurovault_auditory_computation_task + + """ + data = fetch_neurovault_ids(image_ids=[10426], data_dir=data_dir, + verbose=verbose) + return data + + +def fetch_neurovault_auditory_computation_task(data_dir=None, verbose=1): + """Fetch a contrast map from NeuroVault showing + the effect of mental subtraction upon auditory instructions + + Parameters + ---------- + data_dir: string, optional + Path of the data directory. Used to force data storage in a specified + location. + + verbose: int, optional + verbosity level (0 means no message). + + Returns + ------- + data: Bunch + A dict-like object which exposes its items as attributes. It contains: + - 'images', the paths to downloaded files. + - 'images_meta', the metadata for the images in a list of + dictionaries. + - 'collections_meta', the metadata for the + collections. + - 'description', a short description of the Neurovault dataset. + + Notes + ------ + + This function is only a caller for the fetch_localizer_contrasts in order + to simplify examples reading and understanding. + The 'auditory_calculation_vs_baseline' contrast is used: + https://neurovault.org/images/32980/ + + See Also + --------- + nilearn.datasets.fetch_neurovault_ids + nilearn.datasets.fetch_neurovault + nilearn.datasets.fetch_neurovault_motor_task + + """ + data = fetch_neurovault_ids(image_ids=[32980], data_dir=data_dir, + verbose=verbose) + return data diff --git a/nilearn/datasets/struct.py b/nilearn/datasets/struct.py index 051623d059..434ee268f9 100644 --- a/nilearn/datasets/struct.py +++ b/nilearn/datasets/struct.py @@ -4,11 +4,21 @@ import warnings import os import numpy as np +from scipy import ndimage from sklearn.datasets.base import Bunch -from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr +from .utils import (_get_dataset_dir, _fetch_files, + _get_dataset_descr, _uncompress_file) -from .._utils import check_niimg +from .._utils import check_niimg, niimg +from .._utils.exceptions import VisibleDeprecationWarning +from ..image import new_img_like + +_package_directory = os.path.dirname(os.path.abspath(__file__)) +# Useful for the very simple examples +MNI152_FILE_PATH = os.path.join(_package_directory, "data", + "avg152T1_brain.nii.gz") +FSAVERAGE5_PATH = os.path.join(_package_directory, "data", "fsaverage5") def fetch_icbm152_2009(data_dir=None, url=None, resume=True, verbose=1): @@ -111,11 +121,93 @@ def load_mni152_template(): Human Brain Mapping 2009 Annual Meeting, DOI: 10.1016/S1053-8119(09)70884-5 """ - package_directory = os.path.dirname(os.path.abspath(__file__)) - path = os.path.join(package_directory, "data", "avg152T1_brain.nii.gz") + return check_niimg(MNI152_FILE_PATH) + + +def load_mni152_brain_mask(): + """Load brain mask from MNI152 T1 template + + .. versionadded:: 0.2.5 + + Returns + ------- + mask_img: Nifti-like mask image corresponding to grey and white matter. + + References + ---------- + Refer to load_mni152_template function for more information about the MNI152 + T1 template + + See Also + -------- + nilearn.datasets.load_mni152_template for details about version of the + MNI152 T1 template and related. + """ + # Load MNI template + target_img = load_mni152_template() + mask_voxels = (target_img.get_data() > 0).astype(int) + mask_img = new_img_like(target_img, mask_voxels) + return mask_img + + +def fetch_icbm152_brain_gm_mask(data_dir=None, threshold=0.2, resume=True, + verbose=1): + """Downloads ICBM152 template first, then loads 'gm' mask image. + + .. versionadded:: 0.2.5 + + Parameters + ---------- + data_dir: str, optional + Path of the data directory. Used to force storage in a specified + location. Defaults to None. + + threshold: float, optional + The parameter which amounts to include the values in the mask image. + The values lies above than this threshold will be included. Defaults + to 0.2 (one fifth) of values. + + resume: bool, optional + If True, try resuming partially downloaded data. Defaults to True. + + verbose: int, optional + verbosity level (0 means no message). + + Returns + ------- + gm_mask_img: Nifti image + Corresponding to brain grey matter from ICBM152 template. + + Notes + ----- + This function relies on ICBM152 templates where we particularly pick + grey matter template and threshold the template at .2 to take one fifth + of the values. Then, do a bit post processing such as binary closing + operation to more compact mask image. + + Note: It is advised to check the mask image with your own data processing. + + See Also + -------- + nilearn.datasets.fetch_icbm152_2009: for details regarding the ICBM152 + template. + + nilearn.datasets.load_mni152_template: for details about version of MNI152 + template and related. + + """ + # Fetching ICBM152 grey matter mask image + icbm = fetch_icbm152_2009(data_dir=data_dir, resume=resume, verbose=verbose) + gm = icbm['gm'] + gm_img = check_niimg(gm) + gm_data = niimg._safe_get_data(gm_img) + + # getting one fifth of the values + gm_mask = (gm_data > threshold) - # XXX Should we load the image here? - return check_niimg(path) + gm_mask = ndimage.binary_closing(gm_mask, iterations=2) + gm_mask_img = new_img_like(gm_img, gm_mask) + return gm_mask_img def fetch_oasis_vbm(n_subjects=None, dartel_version=True, data_dir=None, @@ -332,3 +424,144 @@ def fetch_oasis_vbm(n_subjects=None, dartel_version=True, data_dir=None, ext_vars=csv_data, data_usage_agreement=data_usage_agreement, description=fdescr) + + +def fetch_surf_fsaverage(mesh='fsaverage5', data_dir=None): + """ Download a Freesurfer fsaverage surface + + Parameters + ---------- + mesh: str, optional (default='fsaverage5') + Which mesh to fetch. + 'fsaverage5': the low-resolution fsaverage5 mesh (10242 nodes) + 'fsaverage': the high-resolution fsaverage mesh (163842 nodes) + (high-resolution fsaverage will result in + more computation time and memory usage) + + data_dir: str, optional (default=None) + Path of the data directory. Used to force data storage in a specified + location. + + Returns + ------- + data: sklearn.datasets.base.Bunch + Dictionary-like object, the interest attributes are : + - 'pial_left': Gifti file, left hemisphere pial surface mesh + - 'pial_right': Gifti file, right hemisphere pial surface mesh + - 'infl_left': Gifti file, left hemisphere inflated pial surface mesh + - 'infl_right': Gifti file, right hemisphere inflated pial + surface mesh + - 'sulc_left': Gifti file, left hemisphere sulcal depth data + - 'sulc_right': Gifti file, right hemisphere sulcal depth data + + References + ---------- + Fischl et al, (1999). High-resolution intersubject averaging and a + coordinate system for the cortical surface. Hum Brain Mapp 8, 272-284. + + """ + meshes = {'fsaverage5': _fetch_surf_fsaverage5, + 'fsaverage': _fetch_surf_fsaverage} + if mesh not in meshes: + raise ValueError( + "'mesh' should be one of {}; {!r} was provided".format( + list(meshes.keys()), mesh)) + return meshes[mesh](data_dir=data_dir) + + +def _fetch_surf_fsaverage(data_dir=None): + dataset_dir = _get_dataset_dir('fsaverage', data_dir=data_dir) + url = 'https://www.nitrc.org/frs/download.php/10846/fsaverage.tar.gz' + if not os.path.isdir(os.path.join(dataset_dir, 'fsaverage')): + _fetch_files(dataset_dir, [('fsaverage.tar.gz', url, {})]) + _uncompress_file(os.path.join(dataset_dir, 'fsaverage.tar.gz')) + result = { + name: os.path.join(dataset_dir, 'fsaverage', '{}.gii'.format(name)) + for name in ['pial_right', 'sulc_right', 'sulc_left', 'pial_left']} + result['infl_left'] = os.path.join( + dataset_dir, 'fsaverage', 'inflated_left.gii') + result['infl_right'] = os.path.join( + dataset_dir, 'fsaverage', 'inflated_right.gii') + + result['description'] = str(_get_dataset_descr('fsaverage')) + return Bunch(**result) + + +def fetch_surf_fsaverage5(data_dir=None, url=None, resume=True, verbose=1): + """ Deprecated since version 0.4.3 + + Use fetch_surf_fsaverage instead. + + Parameters + ---------- + data_dir: str, optional (default=None) + Path of the data directory. Used to force data storage in a specified + location. + + Returns + ------- + data: sklearn.datasets.base.Bunch + Dictionary-like object, the interest attributes are : + - 'pial_left': Gifti file, left hemisphere pial surface mesh + - 'pial_right': Gifti file, right hemisphere pial surface mesh + - 'infl_left': Gifti file, left hemisphere inflated pial surface mesh + - 'infl_right': Gifti file, right hemisphere inflated pial + surface mesh + - 'sulc_left': Gifti file, left hemisphere sulcal depth data + - 'sulc_right': Gifti file, right hemisphere sulcal depth data + + References + ---------- + Fischl et al, (1999). High-resolution intersubject averaging and a + coordinate system for the cortical surface. Hum Brain Mapp 8, 272-284. + + """ + warnings.warn("fetch_surf_fsaverage5 has been deprecated and will " + "be removed in a future release. " + "Use fetch_surf_fsaverage(mesh='fsaverage5')", + VisibleDeprecationWarning, stacklevel=2) + return fetch_surf_fsaverage(mesh='fsaverage5', data_dir=data_dir) + + +def _fetch_surf_fsaverage5(data_dir=None, url=None, resume=True, verbose=1): + """Helper function to ship fsaverage5 surfaces and sulcal information + with Nilearn. + + The source of the data is coming from nitrc based on this PR #1016. + Manually downloaded gzipped and shipped with this function. + + Shipping is done with Nilearn based on issue #1705. + """ + + dataset_name = 'fsaverage5' + + # Dataset description + fdescr = _get_dataset_descr(dataset_name) + + # Download fsaverage surfaces and sulcal information + surface_file = '%s.%s.gii.gz' + surface_path = os.path.join(FSAVERAGE5_PATH, surface_file) + + pials = [] + infls = [] + sulcs = [] + for hemi in ['left', 'right']: + # pial + pial_path = surface_path % ('pial', hemi) + pials.append(pial_path) + + # pial_inflated + pial_infl_path = surface_path % ('pial_inflated', hemi) + infls.append(pial_infl_path) + + # sulcal + sulc = surface_path % ('sulc', hemi) + sulcs.append(sulc) + + return Bunch(pial_left=pials[0], + pial_right=pials[1], + infl_left=infls[0], + infl_right=infls[1], + sulc_left=sulcs[0], + sulc_right=sulcs[1], + description=fdescr) diff --git a/nilearn/datasets/tests/data/craddock_2011_parcellations.tar.gz b/nilearn/datasets/tests/data/craddock_2011_parcellations.tar.gz new file mode 100644 index 0000000000..c758ea3a9a Binary files /dev/null and b/nilearn/datasets/tests/data/craddock_2011_parcellations.tar.gz differ diff --git a/nilearn/datasets/tests/data/pymvpa-exampledata.tar.bz2 b/nilearn/datasets/tests/data/pymvpa-exampledata.tar.bz2 deleted file mode 100644 index 8ce3f1a16e..0000000000 Binary files a/nilearn/datasets/tests/data/pymvpa-exampledata.tar.bz2 and /dev/null differ diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index 30bc90a506..b9c9673f9f 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -6,20 +6,25 @@ import os import shutil +import itertools + import numpy as np -from tempfile import mkdtemp import nibabel -from nose import with_setup -from nose.tools import assert_true, assert_equal, assert_not_equal +from distutils.version import LooseVersion + +from nose import with_setup, SkipTest +from nose.tools import (assert_true, assert_equal, assert_raises, + assert_not_equal) +from numpy.testing import assert_array_equal from nilearn._utils.testing import assert_raises_regex from . import test_utils as tst -from nilearn._utils.compat import _basestring +from nilearn._utils.compat import _basestring, _urllib -from nilearn.datasets import utils, atlas, struct +from nilearn.datasets import utils, atlas def setup_mock(): @@ -92,6 +97,64 @@ def test_get_dataset_dir(): 'test', test_file, verbose=0) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_downloader(): + + # Sandboxing test + # =============== + + # When nilearn downloads a file, everything is first downloaded in a + # temporary directory (sandbox) and moved to the "real" data directory if + # all files are present. In case of error, the sandbox is deleted. + + # To test this feature, we do as follow: + # - create the data dir with a file that has a specific content + # - try to download the dataset but make it fail on purpose (by requesting a + # file that is not in the archive) + # - check that the previously created file is untouched : + # - if sandboxing is faulty, the file would be replaced by the file of the + # archive + # - if sandboxing works, the file must be untouched. + + local_url = "file:" + _urllib.request.pathname2url( + os.path.join(tst.datadir, "craddock_2011_parcellations.tar.gz")) + datasetdir = os.path.join(tst.tmpdir, 'craddock_2012') + os.makedirs(datasetdir) + + # Create a dummy file. If sandboxing is successful, it won't be overwritten + dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'w') + dummy.write('stuff') + dummy.close() + + opts = {'uncompress': True} + files = [ + ('random_all.nii.gz', local_url, opts), + # The following file does not exists. It will cause an abortion of + # the fetching procedure + ('bald.nii.gz', local_url, opts) + ] + + assert_raises(IOError, utils._fetch_files, + os.path.join(tst.tmpdir, 'craddock_2012'), files, + verbose=0) + dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'r') + stuff = dummy.read(5) + dummy.close() + assert_equal(stuff, 'stuff') + + # Downloading test + # ================ + + # Now, we use the regular downloading feature. This will override the dummy + # file created before. + + atlas.fetch_atlas_craddock_2012(data_dir=tst.tmpdir, url=local_url) + dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'r') + stuff = dummy.read() + dummy.close() + assert_equal(stuff, '') + + @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fail_fetch_atlas_harvard_oxford(): # specify non-existing atlas item @@ -109,20 +172,90 @@ def test_fail_fetch_atlas_harvard_oxford(): os.makedirs(nifti_dir) target_atlas_nii = os.path.join(nifti_dir, target_atlas_fname) - struct.load_mni152_template().to_filename(target_atlas_nii) + + # Create false atlas + atlas_data = np.zeros((10, 10, 10), dtype=int) + + # Create an interhemispheric map + atlas_data[:, :2, :] = 1 + + # Create a left map + atlas_data[:5, 3:5, :] = 2 + + # Create a right map, with one voxel on the left side + atlas_data[5:, 7:9, :] = 3 + atlas_data[4, 7, 0] = 3 + + nibabel.Nifti1Image(atlas_data, np.eye(4) * 3).to_filename( + target_atlas_nii) dummy = open(os.path.join(ho_dir, 'HarvardOxford-Cortical.xml'), 'w') - dummy.write(" " - "" - "") + dummy.write("\n" + "\n" + '\n' + '\n' + '\n' + "") dummy.close() + # when symmetric_split=False (by default), then atlas fetcher should + # have maps as string and n_labels=4 with background. Since, we relay on xml + # file to retrieve labels. + ho_wo_symm = atlas.fetch_atlas_harvard_oxford(target_atlas, + data_dir=tst.tmpdir) + assert_true(isinstance(ho_wo_symm.maps, _basestring)) + assert_true(isinstance(ho_wo_symm.labels, list)) + assert_equal(ho_wo_symm.labels[0], "Background") + assert_equal(ho_wo_symm.labels[1], "R1") + assert_equal(ho_wo_symm.labels[2], "R2") + assert_equal(ho_wo_symm.labels[3], "R3") + + # This section tests with lateralized version. In other words, + # symmetric_split=True + + # Dummy xml file for lateralized control of cortical atlas images + # shipped with FSL 5.0. Atlases are already lateralized in this version + # for cortical type atlases denoted with maxprob but not full prob and but + # not also with subcortical. + + # So, we test the fetcher with symmetric_split=True by creating a new + # dummy local file and fetch them and test the output variables + # accordingly. + dummy2 = open(os.path.join(ho_dir, 'HarvardOxford-Cortical-Lateralized.xml'), 'w') + dummy2.write("\n" + "\n" + '\n' + '\n' + '\n' + '\n' + '\n' + '\n' + "") + dummy2.close() + + # Here, with symmetric_split=True, atlas maps are returned as nibabel Nifti + # image but not string. Now, with symmetric split number of labels should be + # more than without split and contain Left and Right tags in the labels. + + # Create dummy image files too with cortl specified for symmetric split. + split_atlas_fname = 'HarvardOxford-' + 'cortl-maxprob-thr0-1mm' + '.nii.gz' + nifti_target_split = os.path.join(nifti_dir, split_atlas_fname) + nibabel.Nifti1Image(atlas_data, np.eye(4) * 3).to_filename( + nifti_target_split) ho = atlas.fetch_atlas_harvard_oxford(target_atlas, - data_dir=tst.tmpdir) + data_dir=tst.tmpdir, + symmetric_split=True) - assert_true(isinstance(nibabel.load(ho.maps), nibabel.Nifti1Image)) - assert_true(isinstance(ho.labels, np.ndarray)) - assert_true(len(ho.labels) > 0) + assert_true(isinstance(ho.maps, nibabel.Nifti1Image)) + assert_true(isinstance(ho.labels, list)) + assert_equal(len(ho.labels), 7) + assert_equal(ho.labels[0], "Background") + assert_equal(ho.labels[1], "Left R1") + assert_equal(ho.labels[2], "Right R1") + assert_equal(ho.labels[3], "Left R2") + assert_equal(ho.labels[4], "Right R2") + assert_equal(ho.labels[5], "Left R3") + assert_equal(ho.labels[6], "Right R3") @with_setup(setup_mock, teardown_mock) @@ -169,8 +302,8 @@ def test_fetch_atlas_smith_2009(): assert_not_equal(bunch.description, '') -def test_fetch_atlas_power_2011(): - bunch = atlas.fetch_atlas_power_2011() +def test_fetch_coords_power_2011(): + bunch = atlas.fetch_coords_power_2011() assert_equal(len(bunch.rois), 264) assert_not_equal(bunch.description, '') @@ -206,8 +339,24 @@ def test_fetch_atlas_destrieux_2009(): @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_atlas_msdl(): + datadir = os.path.join(tst.tmpdir, 'msdl_atlas') + os.mkdir(datadir) + os.mkdir(os.path.join(datadir, 'MSDL_rois')) + data_dir = os.path.join(datadir, 'MSDL_rois', 'msdl_rois_labels.csv') + csv = np.rec.array([(1.5, 1.5, 1.5, 'Aud', 'Aud'), + (1.2, 1.3, 1.4, 'DMN', 'DMN')], + dtype=[('x', '" "") dataset = atlas.fetch_atlas_aal(data_dir=tst.tmpdir, verbose=0) - assert_true(isinstance(dataset.regions, _basestring)) - assert_true(isinstance(dataset.labels, dict)) + assert_true(isinstance(dataset.maps, _basestring)) + assert_true(isinstance(dataset.labels, list)) + assert_true(isinstance(dataset.indices, list)) assert_equal(len(tst.mock_url_request.urls), 1) assert_raises_regex(ValueError, 'The version of AAL requested "FLS33"', @@ -247,3 +397,149 @@ def test_fetch_atlas_aal(): data_dir=tst.tmpdir, verbose=0) assert_not_equal(dataset.description, '') + + +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_atlas_basc_multiscale_2015(): + # default version='sym' + data_sym = atlas.fetch_atlas_basc_multiscale_2015(data_dir=tst.tmpdir, + verbose=0) + # version='asym' + data_asym = atlas.fetch_atlas_basc_multiscale_2015(version='asym', + verbose=0, + data_dir=tst.tmpdir) + + keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', + 'scale122', 'scale197', 'scale325', 'scale444'] + + dataset_name = 'basc_multiscale_2015' + name_sym = 'template_cambridge_basc_multiscale_nii_sym' + basenames_sym = ['template_cambridge_basc_multiscale_sym_' + + key + '.nii.gz' for key in keys] + for key, basename_sym in zip(keys, basenames_sym): + assert_equal(data_sym[key], os.path.join(tst.tmpdir, dataset_name, + name_sym, basename_sym)) + + name_asym = 'template_cambridge_basc_multiscale_nii_asym' + basenames_asym = ['template_cambridge_basc_multiscale_asym_' + + key + '.nii.gz' for key in keys] + for key, basename_asym in zip(keys, basenames_asym): + assert_equal(data_asym[key], os.path.join(tst.tmpdir, dataset_name, + name_asym, basename_asym)) + + assert_equal(len(data_sym), 10) + assert_raises_regex(ValueError, + 'The version of Brain parcellations requested "aym"', + atlas.fetch_atlas_basc_multiscale_2015, version="aym", + data_dir=tst.tmpdir, verbose=0) + + assert_equal(len(tst.mock_url_request.urls), 2) + assert_not_equal(data_sym.description, '') + assert_not_equal(data_asym.description, '') + + +def test_fetch_coords_dosenbach_2010(): + bunch = atlas.fetch_coords_dosenbach_2010() + assert_equal(len(bunch.rois), 160) + assert_equal(len(bunch.labels), 160) + assert_equal(len(np.unique(bunch.networks)), 6) + assert_not_equal(bunch.description, '') + np.testing.assert_array_equal(bunch.networks, np.sort(bunch.networks)) + + bunch = atlas.fetch_coords_dosenbach_2010(ordered_regions=False) + assert_true(np.any(bunch.networks != np.sort(bunch.networks))) + + +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_atlas_allen_2011(): + bunch = atlas.fetch_atlas_allen_2011(data_dir=tst.tmpdir, verbose=0) + keys = ("maps", + "rsn28", + "comps") + + filenames = ["ALL_HC_unthresholded_tmaps.nii", + "RSN_HC_unthresholded_tmaps.nii", + "rest_hcp_agg__component_ica_.nii"] + + assert_equal(len(tst.mock_url_request.urls), 3) + for key, fn in zip(keys, filenames): + assert_equal(bunch[key], os.path.join(tst.tmpdir, 'allen_rsn_2011', fn)) + + assert_not_equal(bunch.description, '') + + +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_atlas_surf_destrieux(data_dir=tst.tmpdir, verbose=0): + + # Old nibabel versions does not support 'write_annot' + if LooseVersion(nibabel.__version__) <= LooseVersion('1.2.0'): + raise SkipTest + + data_dir = os.path.join(tst.tmpdir, 'destrieux_surface') + os.mkdir(data_dir) + # Create mock annots + for hemi in ('left', 'right'): + nibabel.freesurfer.write_annot( + os.path.join(data_dir, + '%s.aparc.a2009s.annot' % hemi), + np.arange(4), np.zeros((4, 5)), 5 * ['a'], + ) + + bunch = atlas.fetch_atlas_surf_destrieux(data_dir=tst.tmpdir, verbose=0) + # Our mock annots have 4 labels + assert_equal(len(bunch.labels), 4) + assert_equal(bunch.map_left.shape, (4, )) + assert_equal(bunch.map_right.shape, (4, )) + assert_not_equal(bunch.description, '') + + +def _get_small_fake_talairach(): + labels = ['*', 'b', 'a'] + all_labels = itertools.product(*(labels,) * 5) + labels_txt = '\n'.join(map('.'.join, all_labels)) + extensions = nibabel.nifti1.Nifti1Extensions([ + nibabel.nifti1.Nifti1Extension( + 'afni', labels_txt.encode('utf-8')) + ]) + img = nibabel.Nifti1Image( + np.arange(243).reshape((3, 9, 9)), + np.eye(4), nibabel.Nifti1Header(extensions=extensions)) + return img, all_labels + + +def _mock_talairach_fetch_files(data_dir, *args, **kwargs): + img, all_labels = _get_small_fake_talairach() + file_name = os.path.join(data_dir, 'talairach.nii') + img.to_filename(file_name) + return [file_name] + + +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_atlas_talairach(data_dir=tst.tmpdir): + atlas._fetch_files = _mock_talairach_fetch_files + level_values = np.ones((81, 3)) * [0, 1, 2] + talairach = atlas.fetch_atlas_talairach('hemisphere', data_dir=tst.tmpdir) + assert_array_equal(talairach.maps.get_data().ravel(), + level_values.T.ravel()) + assert_array_equal(talairach.labels, ['Background', 'b', 'a']) + talairach = atlas.fetch_atlas_talairach('ba', data_dir=tst.tmpdir) + assert_array_equal(talairach.maps.get_data().ravel(), + level_values.ravel()) + assert_raises(ValueError, atlas.fetch_atlas_talairach, 'bad_level') + +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_atlas_pauli_2017(): + data_dir = os.path.join(tst.tmpdir, 'pauli_2017') + + data = atlas.fetch_atlas_pauli_2017('labels', data_dir) + assert_equal(len(data.labels), 16) + + values = nibabel.load(data.maps).get_data() + assert_equal(len(np.unique(values)), 17) + + data = atlas.fetch_atlas_pauli_2017('prob', data_dir) + assert_equal(nibabel.load(data.maps).shape[-1], 16) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index a866616040..f060ef42d6 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -6,18 +6,19 @@ import os import numpy as np -from tempfile import mkdtemp +import json import nibabel +import gzip from sklearn.utils import check_random_state from nose import with_setup -from nose.tools import (assert_true, assert_equal, assert_raises, - assert_not_equal) +from nose.tools import assert_true, assert_equal, assert_not_equal from . import test_utils as tst from nilearn.datasets import utils, func +from nilearn._utils.testing import assert_raises_regex -from nilearn._utils.compat import _basestring, _urllib +from nilearn._utils.compat import _basestring def setup_mock(): @@ -28,74 +29,50 @@ def teardown_mock(): return tst.teardown_mock(utils, func) -@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) -def test_fetch_haxby_simple(): - local_url = "file:" + _urllib.request.pathname2url(os.path.join(tst.datadir, - "pymvpa-exampledata.tar.bz2")) - haxby = func.fetch_haxby_simple(data_dir=tst.tmpdir, url=local_url, - verbose=0) - datasetdir = os.path.join(tst.tmpdir, 'haxby2001_simple', 'pymvpa-exampledata') - for key, file in [ - ('session_target', 'attributes.txt'), - ('func', 'bold.nii.gz'), - ('conditions_target', 'attributes_literal.txt')]: - assert_equal(haxby[key], [os.path.join(datasetdir, file)]) - assert_true(os.path.exists(os.path.join(datasetdir, file))) - - assert_equal(haxby['mask'], os.path.join(datasetdir, 'mask.nii.gz')) - - -@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) -def test_fail_fetch_haxby_simple(): - # Test a dataset fetching failure to validate sandboxing - local_url = "file:" + _urllib.request.pathname2url(os.path.join(tst.datadir, - "pymvpa-exampledata.tar.bz2")) - datasetdir = os.path.join(tst.tmpdir, 'haxby2001_simple', 'pymvpa-exampledata') - os.makedirs(datasetdir) - # Create a dummy file. If sandboxing is successful, it won't be overwritten - dummy = open(os.path.join(datasetdir, 'attributes.txt'), 'w') - dummy.write('stuff') - dummy.close() - - path = 'pymvpa-exampledata' - - opts = {'uncompress': True} - files = [ - (os.path.join(path, 'attributes.txt'), local_url, opts), - # The following file does not exists. It will cause an abortion of - # the fetching procedure - (os.path.join(path, 'bald.nii.gz'), local_url, opts) - ] - - assert_raises(IOError, utils._fetch_files, - os.path.join(tst.tmpdir, 'haxby2001_simple'), files, - verbose=0) - dummy = open(os.path.join(datasetdir, 'attributes.txt'), 'r') - stuff = dummy.read(5) - dummy.close() - assert_equal(stuff, 'stuff') - - @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_haxby(): for i in range(1, 6): - haxby = func.fetch_haxby(data_dir=tst.tmpdir, n_subjects=i, + haxby = func.fetch_haxby(data_dir=tst.tmpdir, subjects=[i], verbose=0) # subject_data + (md5 + mask if first subj) assert_equal(len(tst.mock_url_request.urls), 1 + 2 * (i == 1)) - assert_equal(len(haxby.func), i) - assert_equal(len(haxby.anat), i) - assert_equal(len(haxby.session_target), i) + assert_equal(len(haxby.func), 1) + assert_equal(len(haxby.anat), 1) + assert_equal(len(haxby.session_target), 1) assert_true(haxby.mask is not None) - assert_equal(len(haxby.mask_vt), i) - assert_equal(len(haxby.mask_face), i) - assert_equal(len(haxby.mask_house), i) - assert_equal(len(haxby.mask_face_little), i) - assert_equal(len(haxby.mask_house_little), i) + assert_equal(len(haxby.mask_vt), 1) + assert_equal(len(haxby.mask_face), 1) + assert_equal(len(haxby.mask_house), 1) + assert_equal(len(haxby.mask_face_little), 1) + assert_equal(len(haxby.mask_house_little), 1) tst.mock_url_request.reset() assert_not_equal(haxby.description, '') + # subjects with list + subjects = [1, 2, 6] + haxby = func.fetch_haxby(data_dir=tst.tmpdir, subjects=subjects, + verbose=0) + assert_equal(len(haxby.func), len(subjects)) + assert_equal(len(haxby.mask_house_little), len(subjects)) + assert_equal(len(haxby.anat), len(subjects)) + assert_true(haxby.anat[2] is None) + assert_true(isinstance(haxby.mask, _basestring)) + assert_equal(len(haxby.mask_face), len(subjects)) + assert_equal(len(haxby.session_target), len(subjects)) + assert_equal(len(haxby.mask_vt), len(subjects)) + assert_equal(len(haxby.mask_face_little), len(subjects)) + + subjects = ['a', 8] + message = "You provided invalid subject id {0} in a list" + + for sub_id in subjects: + assert_raises_regex(ValueError, + message.format(sub_id), + func.fetch_haxby, + data_dir=tst.tmpdir, + subjects=[sub_id]) + @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) @@ -131,14 +108,14 @@ def test_fetch_adhd(): sub1 = [3902469, 7774305, 3699991] sub2 = [2014113, 4275075, 1019436, - 3154996, 3884955, 27034, - 4134561, 27018, 6115230, - 27037, 8409791, 27011] + 3154996, 3884955, 27034, + 4134561, 27018, 6115230, + 27037, 8409791, 27011] sub3 = [3007585, 8697774, 9750701, - 10064, 21019, 10042, + 10064, 21019, 10042, 10128, 2497695, 4164316, - 1552181, 4046678, 23012] - sub4 = [1679142, 1206380, 23008, + 1552181, 4046678, 23012] + sub4 = [1679142, 1206380, 23008, 4016887, 1418396, 2950754, 3994098, 3520880, 1517058, 9744150, 1562298, 3205761, 3624598] @@ -164,6 +141,7 @@ def test_miyawaki2008(): assert_equal(len(dataset.label), 32) assert_true(isinstance(dataset.mask, _basestring)) assert_equal(len(dataset.mask_roi), 38) + assert_true(isinstance(dataset.background, _basestring)) assert_equal(len(tst.mock_url_request.urls), 1) assert_not_equal(dataset.description, '') @@ -284,6 +262,33 @@ def test_fetch_localizer_contrasts(): assert_equal(len(dataset.tmaps), 94) assert_not_equal(dataset.description, '') + # grab a given list of subjects + dataset2 = func.fetch_localizer_contrasts(["checkerboard"], + n_subjects=[2, 3, 5], + data_dir=tst.tmpdir, + url=local_url, + get_anats=True, + get_masks=True, + get_tmaps=True, + verbose=0) + + # Check that we are getting only 3 subjects + assert_equal(dataset2.ext_vars.size, 3) + assert_equal(len(dataset2.anats), 3) + assert_equal(len(dataset2.cmaps), 3) + assert_equal(len(dataset2.masks), 3) + assert_equal(len(dataset2.tmaps), 3) + np.testing.assert_array_equal(dataset2.ext_vars, + dataset.ext_vars[[1, 2, 4]]) + np.testing.assert_array_equal(dataset2.anats, + np.array(dataset.anats)[[1, 2, 4]]) + np.testing.assert_array_equal(dataset2.cmaps, + np.array(dataset.cmaps)[[1, 2, 4]]) + np.testing.assert_array_equal(dataset2.masks, + np.array(dataset.masks)[[1, 2, 4]]) + np.testing.assert_array_equal(dataset2.tmaps, + np.array(dataset.tmaps)[[1, 2, 4]]) + @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) @@ -301,8 +306,8 @@ def test_fetch_localizer_calculation_task(): verbose=0) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_equal(dataset.ext_vars.size, 94) - assert_equal(len(dataset.cmaps), 94) + assert_equal(dataset.ext_vars.size, 1) + assert_equal(len(dataset.cmaps), 1) # 20 subjects dataset = func.fetch_localizer_calculation_task(n_subjects=20, @@ -316,6 +321,37 @@ def test_fetch_localizer_calculation_task(): assert_not_equal(dataset.description, '') +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_localizer_button_task(): + local_url = "file://" + tst.datadir + ids = np.asarray(['S%2d' % i for i in range(94)]) + ids = ids.view(dtype=[('subject_id', 'S3')]) + tst.mock_fetch_files.add_csv('cubicwebexport.csv', ids) + tst.mock_fetch_files.add_csv('cubicwebexport2.csv', ids) + + # Disabled: cannot be tested without actually fetching covariates CSV file + # All subjects + dataset = func.fetch_localizer_button_task(data_dir=tst.tmpdir, + url=local_url, + verbose=0) + assert_true(isinstance(dataset.ext_vars, np.recarray)) + assert_true(isinstance(dataset.cmaps[0], _basestring)) + assert_equal(dataset.ext_vars.size, 1) + assert_equal(len(dataset.cmaps), 1) + + # 20 subjects + dataset = func.fetch_localizer_button_task(n_subjects=20, + data_dir=tst.tmpdir, + url=local_url, + verbose=0) + assert_true(isinstance(dataset.ext_vars, np.recarray)) + assert_true(isinstance(dataset.cmaps[0], _basestring)) + assert_equal(dataset.ext_vars.size, 20) + assert_equal(len(dataset.cmaps), 20) + assert_not_equal(dataset.description, '') + + @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_abide_pcp(): @@ -334,6 +370,11 @@ def test_fetch_abide_pcp(): assert_equal(len(dataset.func_preproc), 400) assert_not_equal(dataset.description, '') + # Smoke test using only a string, rather than a list of strings + dataset = func.fetch_abide_pcp(data_dir=tst.tmpdir, url=local_url, + quality_checked=False, verbose=0, + derivatives='func_preproc') + def test__load_mixed_gambles(): rng = check_random_state(42) @@ -349,7 +390,7 @@ def test__load_mixed_gambles(): assert_equal(len(zmaps), len(gain)) -@with_setup(setup_mock) +@with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_mixed_gambles(): local_url = "file://" + os.path.join(tst.datadir, @@ -362,3 +403,215 @@ def test_fetch_mixed_gambles(): assert_equal(mgambles["zmaps"][0], os.path.join(datasetdir, "zmaps", "sub001_zmaps.nii.gz")) assert_equal(len(mgambles["zmaps"]), n_subjects) + + +def test_check_parameters_megatrawls_datasets(): + # testing whether the function raises the same error message + # if invalid input parameters are provided + message = "Invalid {0} input is provided: {1}." + + for invalid_input_dim in [1, 5, 30]: + assert_raises_regex(ValueError, + message.format('dimensionality', invalid_input_dim), + func.fetch_megatrawls_netmats, + dimensionality=invalid_input_dim) + + for invalid_input_timeserie in ['asdf', 'time', 'st2']: + assert_raises_regex(ValueError, + message.format('timeseries', invalid_input_timeserie), + func.fetch_megatrawls_netmats, + timeseries=invalid_input_timeserie) + + for invalid_output_name in ['net1', 'net2']: + assert_raises_regex(ValueError, + message.format('matrices', invalid_output_name), + func.fetch_megatrawls_netmats, + matrices=invalid_output_name) + + +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_megatrawls_netmats(): + # smoke test to see that files are fetched and read properly + # since we are loading data present in it + files_dir = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d100_ts3') + os.makedirs(files_dir) + with open(os.path.join(files_dir, 'Znet2.txt'), 'w') as net_file: + net_file.write("1") + + files_dir2 = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d300_ts2') + os.makedirs(files_dir2) + with open(os.path.join(files_dir2, 'Znet1.txt'), 'w') as net_file2: + net_file2.write("1") + + megatrawl_netmats_data = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir) + + # expected number of returns in output name should be equal + assert_equal(len(megatrawl_netmats_data), 5) + # check if returned bunch should not be empty + # dimensions + assert_not_equal(megatrawl_netmats_data.dimensions, '') + # timeseries + assert_not_equal(megatrawl_netmats_data.timeseries, '') + # matrices + assert_not_equal(megatrawl_netmats_data.matrices, '') + # correlation matrices + assert_not_equal(megatrawl_netmats_data.correlation_matrices, '') + # description + assert_not_equal(megatrawl_netmats_data.description, '') + + # check if input provided for dimensions, timeseries, matrices to be same + # to user settings + netmats_data = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir, + dimensionality=300, + timeseries='multiple_spatial_regression', + matrices='full_correlation') + assert_equal(netmats_data.dimensions, 300) + assert_equal(netmats_data.timeseries, 'multiple_spatial_regression') + assert_equal(netmats_data.matrices, 'full_correlation') + + +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_cobre(): + ids_n = [40000, 40001, 40002, 40003, 40004, 40005, 40006, 40007, 40008, + 40009, 40010, 40011, 40012, 40013, 40014, 40015, 40016, 40017, + 40018, 40019, 40020, 40021, 40022, 40023, 40024, 40025, 40026, + 40027, 40028, 40029, 40030, 40031, 40032, 40033, 40034, 40035, + 40036, 40037, 40038, 40039, 40040, 40041, 40042, 40043, 40044, + 40045, 40046, 40047, 40048, 40049, 40050, 40051, 40052, 40053, + 40054, 40055, 40056, 40057, 40058, 40059, 40060, 40061, 40062, + 40063, 40064, 40065, 40066, 40067, 40068, 40069, 40071, 40072, + 40073, 40074, 40075, 40076, 40077, 40078, 40079, 40080, 40081, + 40082, 40084, 40085, 40086, 40087, 40088, 40089, 40090, 40091, + 40092, 40093, 40094, 40095, 40096, 40097, 40098, 40099, 40100, + 40101, 40102, 40103, 40104, 40105, 40106, 40107, 40108, 40109, + 40110, 40111, 40112, 40113, 40114, 40115, 40116, 40117, 40118, + 40119, 40120, 40121, 40122, 40123, 40124, 40125, 40126, 40127, + 40128, 40129, 40130, 40131, 40132, 40133, 40134, 40135, 40136, + 40137, 40138, 40139, 40140, 40141, 40142, 40143, 40144, 40145, + 40146, 40147] + + ids = np.asarray(ids_n, dtype='|U17') + + current_age = np.ones(len(ids), dtype=' 2) + assert_false(filter_1({'c': 2, 'd': 3})) + assert_true(filter_1({'c': 2, 'd': 3, 'e': 4})) + + +def test_result_filter_combinations(): + filter_0 = neurovault.ResultFilter(a=0, b=1) + filter_1 = neurovault.ResultFilter(c=2, d=3) + + filter_0_and_1 = filter_0.AND(filter_1) + assert_true(filter_0_and_1({'a': 0, 'b': 1, 'c': 2, 'd': 3})) + assert_false(filter_0_and_1({'a': 0, 'b': 1, 'c': 2, 'd': None})) + assert_false(filter_0_and_1({'a': None, 'b': 1, 'c': 2, 'd': 3})) + + filter_0_or_1 = filter_0.OR(filter_1) + assert_true(filter_0_or_1({'a': 0, 'b': 1, 'c': 2, 'd': 3})) + assert_true(filter_0_or_1({'a': 0, 'b': 1, 'c': 2, 'd': None})) + assert_true(filter_0_or_1({'a': None, 'b': 1, 'c': 2, 'd': 3})) + assert_false(filter_0_or_1({'a': None, 'b': 1, 'c': 2, 'd': None})) + + filter_0_xor_1 = filter_0.XOR(filter_1) + assert_false(filter_0_xor_1({'a': 0, 'b': 1, 'c': 2, 'd': 3})) + assert_true(filter_0_xor_1({'a': 0, 'b': 1, 'c': 2, 'd': None})) + assert_true(filter_0_xor_1({'a': None, 'b': 1, 'c': 2, 'd': 3})) + assert_false(filter_0_xor_1({'a': None, 'b': 1, 'c': 2, 'd': None})) + + not_filter_0 = filter_0.NOT() + assert_true(not_filter_0({})) + assert_false(not_filter_0({'a': 0, 'b': 1})) + + filter_2 = neurovault.ResultFilter( + {'a': neurovault.NotNull()}).AND(lambda d: len(d) < 2) + assert_true(filter_2({'a': 'a'})) + assert_false(filter_2({'a': ''})) + assert_false(filter_2({'a': 'a', 'b': 0})) + + filt = neurovault.ResultFilter( + a=0).AND(neurovault.ResultFilter(b=1).OR(neurovault.ResultFilter(b=2))) + assert_true(filt({'a': 0, 'b': 1})) + assert_false(filt({'a': 0, 'b': 0})) + + +def _fail(*args, **kwargs): + raise neurovault.URLError('problem') + + +class _FailingDownloads(): + + def __init__(self): + self.original_fetch = None + + def __enter__(self): + self.original_fetch = neurovault._fetch_file + neurovault._fetch_file = _fail + + def __exit__(self, *args): + neurovault._fetch_file = self.original_fetch + + +@ignore_connection_errors +def test_simple_download(): + with _TestTemporaryDirectory() as temp_dir: + downloaded_file = neurovault._simple_download( + 'http://neurovault.org/media/images/35/Fig3B_zstat1.nii.gz', + os.path.join(temp_dir, 'image_35.nii.gz'), temp_dir) + assert_true(os.path.isfile(downloaded_file)) + with _FailingDownloads(): + assert_raises(neurovault.URLError, + neurovault._simple_download, 'http://', + os.path.join(temp_dir, 'bad.nii.gz'), temp_dir) + + +def test_neurosynth_words_vectorized(): + n_im = 5 + with _TestTemporaryDirectory() as temp_dir: + words_files = [ + os.path.join(temp_dir, 'words_for_image_{0}.json'.format(i)) for + i in range(n_im)] + words = [str(i) for i in range(n_im)] + for i, file_name in enumerate(words_files): + word_weights = np.zeros(n_im) + word_weights[i] = 1 + words_dict = {'data': + {'values': + dict([(k, v) for + k, v in zip(words, word_weights)])}} + with open(file_name, 'wb') as words_file: + words_file.write(json.dumps(words_dict).encode('utf-8')) + freq, voc = neurovault.neurosynth_words_vectorized(words_files) + assert_equal(freq.shape, (n_im, n_im)) + assert((freq.sum(axis=0) == np.ones(n_im)).all()) + assert_warns(UserWarning, neurovault.neurosynth_words_vectorized, + (os.path.join(temp_dir, 'no_words_here.json'),)) + + +def test_write_read_metadata(): + metadata = {'relative_path': 'collection_1', + 'absolute_path': os.path.join('tmp', 'collection_1')} + with _TestTemporaryDirectory() as temp_dir: + neurovault._write_metadata( + metadata, os.path.join(temp_dir, 'metadata.json')) + with open(os.path.join(temp_dir, 'metadata.json'), 'rb') as meta_file: + written_metadata = json.loads(meta_file.read().decode('utf-8')) + assert_true('relative_path' in written_metadata) + assert_false('absolute_path' in written_metadata) + read_metadata = neurovault._add_absolute_paths('tmp', written_metadata) + assert_equal(read_metadata['absolute_path'], + os.path.join('tmp', 'collection_1')) + + +def test_add_absolute_paths(): + meta = {'col_relative_path': 'collection_1', + 'col_absolute_path': os.path.join( + 'dir_0', 'neurovault', 'collection_1')} + meta = neurovault._add_absolute_paths(os.path.join('dir_1', 'neurovault'), + meta, force=False) + assert_equal(meta['col_absolute_path'], + os.path.join('dir_0', 'neurovault', 'collection_1')) + meta = neurovault._add_absolute_paths(os.path.join('dir_1', 'neurovault'), + meta, force=True) + assert_equal(meta['col_absolute_path'], + os.path.join('dir_1', 'neurovault', 'collection_1')) + meta = {'id': 0} + meta_transformed = neurovault._add_absolute_paths( + os.path.join('dir_1', 'neurovault'), meta, force=True) + assert_equal(meta, meta_transformed) + + +def test_json_add_collection_dir(): + with _TestTemporaryDirectory() as data_temp_dir: + coll_dir = os.path.join(data_temp_dir, 'collection_1') + os.makedirs(coll_dir) + coll_file_name = os.path.join(coll_dir, 'collection_1.json') + with open(coll_file_name, 'wb') as coll_file: + coll_file.write(json.dumps({'id': 1}).encode('utf-8')) + loaded = neurovault._json_add_collection_dir(coll_file_name) + assert_equal(loaded['absolute_path'], coll_dir) + assert_equal(loaded['relative_path'], 'collection_1') + + +def test_json_add_im_files_paths(): + with _TestTemporaryDirectory() as data_temp_dir: + coll_dir = os.path.join(data_temp_dir, 'collection_1') + os.makedirs(coll_dir) + im_file_name = os.path.join(coll_dir, 'image_1.json') + with open(im_file_name, 'wb') as im_file: + im_file.write(json.dumps({'id': 1}).encode('utf-8')) + loaded = neurovault._json_add_im_files_paths(im_file_name) + assert_equal(loaded['relative_path'], + os.path.join('collection_1', 'image_1.nii.gz')) + assert_true(loaded.get('neurosynth_words_relative_path') is None) + + +def test_split_terms(): + terms, server_terms = neurovault._split_terms( + {'DOI': neurovault.NotNull(), + 'name': 'my_name', 'unknown_term': 'something'}, + neurovault._COL_FILTERS_AVAILABLE_ON_SERVER) + assert_equal(terms, + {'DOI': neurovault.NotNull(), 'unknown_term': 'something'}) + assert_equal(server_terms, {'name': 'my_name'}) + + +def test_move_unknown_terms_to_local_filter(): + terms, new_filter = neurovault._move_unknown_terms_to_local_filter( + {'a': 0, 'b': 1}, neurovault.ResultFilter(), ('a',)) + assert_equal(terms, {'a': 0}) + assert_false(new_filter({'b': 0})) + assert_true(new_filter({'b': 1})) + + +def test_move_col_id(): + im_terms, col_terms = neurovault._move_col_id( + {'collection_id': 1, 'not_mni': False}, {}) + assert_equal(im_terms, {'not_mni': False}) + assert_equal(col_terms, {'id': 1}) + + assert_warns(UserWarning, neurovault._move_col_id, + {'collection_id': 1, 'not_mni': False}, {'id': 2}) + + +def test_download_image_terms(): + with _TestTemporaryDirectory() as temp_dir: + image_info = {'id': 'a'} + collection = {'relative_path': 'collection', + 'absolute_path': os.path.join(temp_dir, 'collection')} + os.makedirs(collection['absolute_path']) + download_params = {'temp_dir': temp_dir, 'verbose': 3, + 'fetch_neurosynth_words': True} + with _FailingDownloads(): + neurovault._download_image_terms( + image_info, collection, download_params) + download_params['allow_neurosynth_failure'] = False + assert_raises(RuntimeError, + neurovault._download_image_terms, + image_info, collection, download_params) + with open(os.path.join( + collection['absolute_path'], + 'neurosynth_words_for_image_a.json'), 'w'): + pass + neurovault._download_image_terms( + image_info, collection, download_params) + + +def test_download_image(): + image = neurovault._download_image(None, {}) + assert image is None + + +def test_fetch_neurovault(): + with _TestTemporaryDirectory() as temp_dir: + # check that nothing is downloaded in offline mode + data = neurovault.fetch_neurovault( + mode='offline', data_dir=temp_dir) + assert_equal(len(data.images), 0) + # try to download an image + data = neurovault.fetch_neurovault( + max_images=1, fetch_neurosynth_words=True, + mode='overwrite', data_dir=temp_dir) + # specifying a filter while leaving the default term + # filters in place should raise a warning. + assert_warns(UserWarning, neurovault.fetch_neurovault, + image_filter=lambda x: True, max_images=1, + mode='offline') + # if neurovault was available one image matching + # default filters should have been downloaded + if data.images: + assert_equal(len(data.images), 1) + meta = data.images_meta[0] + assert_false(meta['not_mni']) + assert_true(meta['is_valid']) + assert_false(meta['not_mni']) + assert_false(meta['is_thresholded']) + assert_false(meta['map_type'] in [ + 'ROI/mask', 'anatomical', 'parcellation']) + assert_false(meta['image_type'] == 'atlas') + + # using a data directory we can't write into should raise a + # warning unless mode is 'offline' + os.chmod(temp_dir, stat.S_IREAD | stat.S_IEXEC) + os.chmod(os.path.join(temp_dir, 'neurovault'), + stat.S_IREAD | stat.S_IEXEC) + if os.access(os.path.join(temp_dir, 'neurovault'), os.W_OK): + return + assert_warns(UserWarning, neurovault.fetch_neurovault, + data_dir=temp_dir) + + +def test_fetch_neurovault_ids(): + # test using explicit id list instead of filters, and downloading + # an image which has no collection dir or metadata yet. + with _TestTemporaryDirectory() as data_dir: + assert_raises(ValueError, neurovault.fetch_neurovault_ids, mode='bad') + data = neurovault.fetch_neurovault_ids( + image_ids=[111], collection_ids=[307], data_dir=data_dir) + if len(data.images) == 2: + assert_equal([img['id'] for img in data['images_meta']], + [1750, 111]) + assert_equal(os.path.dirname(data['images'][0]), + data['collections_meta'][0]['absolute_path']) + # check image can be loaded again from disk + data = neurovault.fetch_neurovault_ids( + image_ids=[111], data_dir=data_dir, mode='offline') + assert_equal(len(data.images), 1) + # check that download_new mode forces overwrite + modified_meta = data['images_meta'][0] + assert_equal(modified_meta['figure'], '3A') + modified_meta['figure'] = '3B' + # mess it up on disk + meta_path = os.path.join( + os.path.dirname(modified_meta['absolute_path']), + 'image_111_metadata.json') + with open(meta_path, 'wb') as meta_f: + meta_f.write(json.dumps(modified_meta).encode('UTF-8')) + # fresh download + data = neurovault.fetch_neurovault_ids( + image_ids=[111], data_dir=data_dir, mode='download_new') + data = neurovault.fetch_neurovault_ids( + image_ids=[111], data_dir=data_dir, mode='offline') + # should not have changed + assert_equal(data['images_meta'][0]['figure'], '3B') + data = neurovault.fetch_neurovault_ids( + image_ids=[111], data_dir=data_dir, mode='overwrite') + data = neurovault.fetch_neurovault_ids( + image_ids=[111], data_dir=data_dir, mode='offline') + # should be back to the original version + assert_equal(data['images_meta'][0]['figure'], '3A') diff --git a/nilearn/datasets/tests/test_struct.py b/nilearn/datasets/tests/test_struct.py index 66833d092b..f94a99833e 100644 --- a/nilearn/datasets/tests/test_struct.py +++ b/nilearn/datasets/tests/test_struct.py @@ -6,8 +6,8 @@ import os import shutil +import nibabel import numpy as np -from tempfile import mkdtemp from nose import with_setup from nose.tools import assert_true, assert_equal, assert_not_equal @@ -141,4 +141,37 @@ def test_load_mni152_template(): # All subjects template_nii = struct.load_mni152_template() assert_equal(template_nii.shape, (91, 109, 91)) - assert_equal(template_nii.get_header().get_zooms(), (2.0, 2.0, 2.0)) + assert_equal(template_nii.header.get_zooms(), (2.0, 2.0, 2.0)) + + +def test_load_mni152_brain_mask(): + brain_mask = struct.load_mni152_brain_mask() + assert_true(isinstance(brain_mask, nibabel.Nifti1Image)) + # standard MNI template shape + assert_equal(brain_mask.shape, (91, 109, 91)) + + +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_icbm152_brain_gm_mask(): + dataset = struct.fetch_icbm152_2009(data_dir=tst.tmpdir, verbose=0) + struct.load_mni152_template().to_filename(dataset.gm) + grey_matter_img = struct.fetch_icbm152_brain_gm_mask(data_dir=tst.tmpdir, + verbose=0) + assert_true(isinstance(grey_matter_img, nibabel.Nifti1Image)) + + +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_surf_fsaverage(): + # for mesh in ['fsaverage5', 'fsaverage']: + for mesh in ['fsaverage']: + + dataset = struct.fetch_surf_fsaverage( + mesh, data_dir=tst.tmpdir) + + keys = {'pial_left', 'pial_right', 'infl_left', 'infl_right', + 'sulc_left', 'sulc_right'} + + assert keys.issubset(set(dataset.keys())) + assert_not_equal(dataset.description, '') diff --git a/nilearn/datasets/tests/test_utils.py b/nilearn/datasets/tests/test_utils.py index 06736d3b61..74c62dfaae 100644 --- a/nilearn/datasets/tests/test_utils.py +++ b/nilearn/datasets/tests/test_utils.py @@ -20,7 +20,6 @@ from nilearn._utils.testing import (mock_request, wrap_chunk_read_, FetchFilesMock, assert_raises_regex) - currdir = os.path.dirname(os.path.abspath(__file__)) datadir = os.path.join(currdir, 'data') tmpdir = None @@ -263,6 +262,9 @@ def test_filter_columns(): f = datasets.utils._filter_columns(values, {'STR': b'b'}) assert_equal(np.sum(f), 167) + f = datasets.utils._filter_columns(values, {'STR': u'b'}) + assert_equal(np.sum(f), 167) + f = datasets.utils._filter_columns(values, {'INT': 1, 'STR': b'b'}) assert_equal(np.sum(f), 84) @@ -272,35 +274,46 @@ def test_filter_columns(): def test_uncompress(): - # Create dummy file - fd, temp = mkstemp() - os.close(fd) - # Create a zipfile + # for each kind of compression, we create: + # - a temporary directory (dtemp) + # - a compressed object (ztemp) + # - a temporary file-like object to compress into ztemp + # we then uncompress the ztemp object into dtemp under the name ftemp + # and check if ftemp exists dtemp = mkdtemp() ztemp = os.path.join(dtemp, 'test.zip') - with contextlib.closing(zipfile.ZipFile(ztemp, 'w')) as testzip: - testzip.write(temp) - datasets.utils._uncompress_file(ztemp, verbose=0) - assert(os.path.exists(os.path.join(dtemp, temp))) - shutil.rmtree(dtemp) - - dtemp = mkdtemp() - ztemp = os.path.join(dtemp, 'test.tar') - with contextlib.closing(tarfile.open(ztemp, 'w')) as tar: - tar.add(temp) - datasets.utils._uncompress_file(ztemp, verbose=0) - assert(os.path.exists(os.path.join(dtemp, temp))) - shutil.rmtree(dtemp) - - dtemp = mkdtemp() - ztemp = os.path.join(dtemp, 'test.gz') - f = gzip.open(ztemp, 'wb') - f.close() - datasets.utils._uncompress_file(ztemp, verbose=0) - assert(os.path.exists(os.path.join(dtemp, temp))) - shutil.rmtree(dtemp) - - os.remove(temp) + ftemp = 'test' + try: + with contextlib.closing(zipfile.ZipFile(ztemp, 'w')) as testzip: + testzip.writestr(ftemp, ' ') + datasets.utils._uncompress_file(ztemp, verbose=0) + assert (os.path.exists(os.path.join(dtemp, ftemp))) + shutil.rmtree(dtemp) + + dtemp = mkdtemp() + ztemp = os.path.join(dtemp, 'test.tar') + + # Create dummy file in the dtemp folder, so that the finally statement + # can easily remove it + fd, temp = mkstemp(dir=dtemp) + os.close(fd) + with contextlib.closing(tarfile.open(ztemp, 'w')) as tar: + tar.add(temp, arcname=ftemp) + datasets.utils._uncompress_file(ztemp, verbose=0) + assert (os.path.exists(os.path.join(dtemp, ftemp))) + shutil.rmtree(dtemp) + + dtemp = mkdtemp() + ztemp = os.path.join(dtemp, 'test.gz') + gzip.open(ztemp, 'wb').close() + datasets.utils._uncompress_file(ztemp, verbose=0) + # test.gz gets uncompressed into test + assert (os.path.exists(os.path.join(dtemp, 'test'))) + shutil.rmtree(dtemp) + finally: + # all temp files are created into dtemp except temp + if os.path.exists(dtemp): + shutil.rmtree(dtemp) @with_setup(setup_mock, teardown_mock) @@ -326,6 +339,7 @@ def test_fetch_file_overwrite(): with open(fil, 'r') as fp: assert_equal(fp.read(), 'some content') + # Overwrite existing file. # Overwrite existing file. fil = datasets.utils._fetch_file(url='http://foo/', data_dir=tmpdir, verbose=0, overwrite=True) @@ -335,7 +349,6 @@ def test_fetch_file_overwrite(): assert_equal(fp.read(), '') - @with_setup(setup_mock, teardown_mock) @with_setup(setup_tmpdata, teardown_tmpdata) def test_fetch_files_overwrite(): diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index 353da74a15..14324f47ad 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -63,14 +63,11 @@ def readlinkabs(link): return os.path.join(os.path.dirname(link), path) -def _chunk_report_(cur_chunk_size, bytes_so_far, total_size, initial_size, t0): +def _chunk_report_(bytes_so_far, total_size, initial_size, t0): """Show downloading percentage. Parameters ---------- - cur_chunk_size: int - Number of bytes downloaded on current iteration (0=>end of download) - bytes_so_far: int Number of downloaded bytes @@ -103,7 +100,7 @@ def _chunk_report_(cur_chunk_size, bytes_so_far, total_size, initial_size, t0): # Trailing whitespace is to erase extra char when message length # varies sys.stderr.write( - "\rDownloaded %d of %d bytes (%0.2f%%, %s remaining)" + "\rDownloaded %d of %d bytes (%.1f%%, %s remaining)" % (bytes_so_far, total_size, total_percent * 100, _format_time(time_remaining))) @@ -153,13 +150,18 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, total_size = None bytes_so_far = initial_size - t0 = time.time() + t0 = time_last_display = time.time() while True: chunk = response.read(chunk_size) bytes_so_far += len(chunk) - if report_hook: - _chunk_report_(len(chunk), bytes_so_far, total_size, initial_size, t0) - + time_last_read = time.time() + if (report_hook and + # Refresh report every half second or when download is + # finished. + (time_last_read > time_last_display + 0.5 or not chunk)): + _chunk_report_(bytes_so_far, + total_size, initial_size, t0) + time_last_display = time_last_read if chunk: local_file.write(chunk) else: @@ -168,6 +170,56 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, return +def get_data_dirs(data_dir=None): + """ Returns the directories in which nilearn looks for data. + + This is typically useful for the end-user to check where the data is + downloaded and stored. + + Parameters + ---------- + data_dir: string, optional + Path of the data directory. Used to force data storage in a specified + location. Default: None + + Returns + ------- + paths: list of strings + Paths of the dataset directories. + + Notes + ----- + This function retrieves the datasets directories using the following + priority : + 1. defaults system paths + 2. the keyword argument data_dir + 3. the global environment variable NILEARN_SHARED_DATA + 4. the user environment variable NILEARN_DATA + 5. nilearn_data in the user home folder + """ + # We build an array of successive paths by priority + # The boolean indicates if it is a pre_dir: in that case, we won't add the + # dataset name to the path. + paths = [] + + # Check data_dir which force storage in a specific location + if data_dir is not None: + paths.extend(data_dir.split(os.pathsep)) + + # If data_dir has not been specified, then we crawl default locations + if data_dir is None: + global_data = os.getenv('NILEARN_SHARED_DATA') + if global_data is not None: + paths.extend(global_data.split(os.pathsep)) + + local_data = os.getenv('NILEARN_DATA') + if local_data is not None: + paths.extend(local_data.split(os.pathsep)) + + paths.append(os.path.expanduser('~/nilearn_data')) + return paths + + def _get_dataset_dir(dataset_name, data_dir=None, default_paths=None, verbose=1): """ Create if necessary and returns data directory of given dataset. @@ -203,29 +255,13 @@ def _get_dataset_dir(dataset_name, data_dir=None, default_paths=None, 4. the user environment variable NILEARN_DATA 5. nilearn_data in the user home folder """ - # We build an array of successive paths by priority - # The boolean indicates if it is a pre_dir: in that case, we won't add the - # dataset name to the path. paths = [] - - # Search given environment variables + # Search possible data-specific system paths if default_paths is not None: for default_path in default_paths: paths.extend([(d, True) for d in default_path.split(os.pathsep)]) - # Check data_dir which force storage in a specific location - if data_dir is not None: - paths.extend([(d, False) for d in data_dir.split(os.pathsep)]) - else: - global_data = os.getenv('NILEARN_SHARED_DATA') - if global_data is not None: - paths.extend([(d, False) for d in global_data.split(os.pathsep)]) - - local_data = os.getenv('NILEARN_DATA') - if local_data is not None: - paths.extend([(d, False) for d in local_data.split(os.pathsep)]) - - paths.append((os.path.expanduser('~/nilearn_data'), False)) + paths.extend([(d, False) for d in get_data_dirs(data_dir=data_dir)]) if verbose > 2: print('Dataset search paths: %s' % paths) @@ -292,8 +328,11 @@ def _uncompress_file(file_, delete_archive=True, verbose=1): processed = False if zipfile.is_zipfile(file_): z = zipfile.ZipFile(file_) - z.extractall(data_dir) + z.extractall(path=data_dir) z.close() + if delete_archive: + os.remove(file_) + file_ = filename processed = True elif ext == '.gz' or header.startswith(b'\x1f\x8b'): import gzip @@ -308,17 +347,17 @@ def _uncompress_file(file_, delete_archive=True, verbose=1): if delete_archive: os.remove(file_) file_ = filename - filename, ext = os.path.splitext(file_) processed = True - if tarfile.is_tarfile(file_): + if os.path.isfile(file_) and tarfile.is_tarfile(file_): with contextlib.closing(tarfile.open(file_, "r")) as tar: tar.extractall(path=data_dir) + if delete_archive: + os.remove(file_) processed = True if not processed: raise IOError( "[Uncompress] unknown archive file format: %s" % file_) - if delete_archive: - os.remove(file_) + if verbose > 0: sys.stderr.write('.. done.\n') except Exception as e: @@ -355,7 +394,6 @@ def _filter_column(array, col, criteria): not isinstance(criteria, bytes) and not isinstance(criteria, tuple) and isinstance(criteria, collections.Iterable)): - filter = np.zeros(array.shape[0], dtype=np.bool) for criterion in criteria: filter = np.logical_or(filter, @@ -372,6 +410,10 @@ def _filter_column(array, col, criteria): filter = array[col] <= criteria[1] return np.logical_and(filter, array[col] >= criteria[0]) + # Handle strings with different encodings + if isinstance(criteria, (_basestring, bytes)): + criteria = np.array(criteria).astype(array[col].dtype) + return array[col] == criteria @@ -531,15 +573,11 @@ def _fetch_file(url, data_dir, resume=True, overwrite=False, dt = time.time() - t0 if verbose > 0: # Complete the reporting hook - sys.stderr.write(' ...done. (%i seconds, %i min)\n' % (dt, dt // 60)) - except (_urllib.error.HTTPError, _urllib.error.URLError) as e: - if 'Error while fetching' not in str(e): - # For some odd reason, the error message gets doubled up - # (possibly from the re-raise), so only add extra info - # if it's not already there. - e.reason = ("%s| Error while fetching file %s; " - "dataset fetching aborted." % ( - str(e.reason), file_name)) + sys.stderr.write(' ...done. ({0:.0f} seconds, {1:.0f} min)\n' + .format(dt, dt // 60)) + except (_urllib.error.HTTPError, _urllib.error.URLError): + sys.stderr.write("Error while fetching file %s; dataset " + "fetching aborted." % (file_name)) raise finally: if local_file is not None: @@ -558,8 +596,8 @@ def _get_dataset_descr(ds_name): fname = ds_name try: - with open(os.path.join(module_path, 'description', fname + '.rst'))\ - as rst_file: + with open(os.path.join(module_path, 'description', fname + '.rst'), + 'rb') as rst_file: descr = rst_file.read() except IOError: descr = '' diff --git a/nilearn/decoding/__init__.py b/nilearn/decoding/__init__.py index 50826353a2..caac41de05 100644 --- a/nilearn/decoding/__init__.py +++ b/nilearn/decoding/__init__.py @@ -4,3 +4,5 @@ from .searchlight import SearchLight from .space_net import SpaceNetClassifier, SpaceNetRegressor + +__all__ = ['SearchLight', 'SpaceNetClassifier', 'SpaceNetRegressor'] diff --git a/nilearn/decoding/searchlight.py b/nilearn/decoding/searchlight.py index 02a487c164..705e099c13 100644 --- a/nilearn/decoding/searchlight.py +++ b/nilearn/decoding/searchlight.py @@ -20,19 +20,20 @@ import sklearn from sklearn.externals.joblib import Parallel, delayed, cpu_count from sklearn import svm -from sklearn.cross_validation import cross_val_score from sklearn.base import BaseEstimator from .. import masking from ..image.resampling import coord_transform from ..input_data.nifti_spheres_masker import _apply_mask_and_get_affinity from .._utils.compat import _basestring +from .._utils import check_niimg_4d +from sklearn.model_selection import cross_val_score ESTIMATOR_CATALOG = dict(svc=svm.LinearSVC, svr=svm.SVR) -def search_light(X, y, estimator, A, scoring=None, cv=None, n_jobs=-1, - verbose=0): +def search_light(X, y, estimator, A, groups=None, scoring=None, + cv=None, n_jobs=-1, verbose=0): """Function for computing a search_light Parameters @@ -50,6 +51,10 @@ def search_light(X, y, estimator, A, scoring=None, cv=None, n_jobs=-1, adjacency matrix. Defines for each feature the neigbhoring features following a given structure of the data. + groups : array-like, optional + group label for each sample for cross validation. default None + NOTE: will have no effect for scikit learn < 0.18 + scoring : string or callable, optional The scoring strategy to use. See the scikit-learn documentation for possible values. @@ -78,7 +83,7 @@ def search_light(X, y, estimator, A, scoring=None, cv=None, n_jobs=-1, scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_group_iter_search_light)( A.rows[list_i], - estimator, X, y, scoring, cv, + estimator, X, y, groups, scoring, cv, thread_id + 1, A.shape[0], verbose) for thread_id, list_i in enumerate(group_iter)) return np.concatenate(scores) @@ -111,7 +116,7 @@ def __iter__(self): yield list_i -def _group_iter_search_light(list_rows, estimator, X, y, +def _group_iter_search_light(list_rows, estimator, X, y, groups, scoring, cv, thread_id, total, verbose=0): """Function for grouped iterations of search_light @@ -130,6 +135,8 @@ def _group_iter_search_light(list_rows, estimator, X, y, y : array-like target variable to predict. + groups : array-like, optional + group label for each sample for cross validation. scoring : string or callable, optional Scoring strategy to use. See the scikit-learn documentation. If callable, takes as arguments the fitted estimator, the @@ -158,11 +165,8 @@ def _group_iter_search_light(list_rows, estimator, X, y, t0 = time.time() for i, row in enumerate(list_rows): kwargs = dict() - if not LooseVersion(sklearn.__version__) < LooseVersion('0.15'): - kwargs['scoring'] = scoring - elif scoring is not None: - warnings.warn('Scikit-learn version is too old. ' - 'scoring argument ignored', stacklevel=2) + kwargs['scoring'] = scoring + kwargs['groups'] = groups par_scores[i] = np.mean(cross_val_score(estimator, X[:, row], y, cv=cv, n_jobs=1, **kwargs)) @@ -196,11 +200,11 @@ class SearchLight(BaseEstimator): Parameters ----------- mask_img : Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html boolean image giving location of voxels containing usable signals. process_mask_img : Niimg-like object, optional - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html boolean image giving voxels on which searchlight should be computed. @@ -260,21 +264,29 @@ def __init__(self, mask_img, process_mask_img=None, radius=2., self.cv = cv self.verbose = verbose - def fit(self, imgs, y): + def fit(self, imgs, y, groups=None): """Fit the searchlight Parameters ---------- imgs : Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html 4D image. y : 1D array-like Target variable to predict. Must have exactly as many elements as 3D images in img. + groups : array-like, optional + group label for each sample for cross validation. Must have + exactly as many elements as 3D images in img. default None + NOTE: will have no effect for scikit learn < 0.18 + """ + # check if image is 4D + imgs = check_niimg_4d(imgs) + # Get the seeds process_mask_img = self.process_mask_img if self.process_mask_img is None: @@ -291,13 +303,13 @@ def fit(self, imgs, y): X, A = _apply_mask_and_get_affinity( process_mask_coords, imgs, self.radius, True, - mask_img=self.process_mask_img) + mask_img=self.mask_img) estimator = self.estimator if isinstance(estimator, _basestring): estimator = ESTIMATOR_CATALOG[estimator]() - scores = search_light(X, y, estimator, A, + scores = search_light(X, y, estimator, A, groups, self.scoring, self.cv, self.n_jobs, self.verbose) scores_3D = np.zeros(process_mask.shape) diff --git a/nilearn/decoding/space_net.py b/nilearn/decoding/space_net.py index 6c808a477d..55fab8d730 100644 --- a/nilearn/decoding/space_net.py +++ b/nilearn/decoding/space_net.py @@ -11,6 +11,8 @@ # THIRION Bertrand # License: simplified BSD +from distutils.version import LooseVersion +import sklearn import warnings import numbers import time @@ -18,88 +20,36 @@ from functools import partial import numpy as np from scipy import stats, ndimage -from sklearn.base import RegressorMixin, clone +from sklearn.base import RegressorMixin from sklearn.utils.extmath import safe_sparse_dot -from sklearn.linear_model.base import LinearModel, center_data +from sklearn.utils import check_array +from sklearn.linear_model.base import LinearModel from sklearn.feature_selection import (SelectPercentile, f_regression, f_classif) from sklearn.externals.joblib import Memory, Parallel, delayed -from sklearn.cross_validation import check_cv from sklearn.preprocessing import LabelBinarizer from sklearn.metrics import accuracy_score +from ..input_data.masker_validation import check_embedded_nifti_masker +from .._utils.param_validation import _adjust_screening_percentile +from sklearn.utils import check_X_y +from sklearn.model_selection import check_cv +from sklearn.linear_model.base import _preprocess_data as center_data from .._utils.compat import _basestring -from .._utils.fixes import atleast2d_or_csr from .._utils.cache_mixin import CacheMixin -from ..input_data import NiftiMasker from .objective_functions import _unmask from .space_net_solvers import (tvl1_solver, _graph_net_logistic, _graph_net_squared_loss) -# Volume of a standard (MNI152) brain mask in mm^3 -MNI152_BRAIN_VOLUME = 1827243. - - -def _get_mask_volume(mask_img): - """Computes the volume of a brain mask in mm^3 - - Parameters - ---------- - mask_img : nibabel image object - Input image whose voxel dimensions are to be computed. - - Returns - ------- - vol : float - The computed volume. - """ - vox_dims = mask_img.get_header().get_zooms()[:3] - return 1. * np.prod(vox_dims) * mask_img.get_data().astype(np.bool).sum() - - -def _adjust_screening_percentile(screening_percentile, mask_img, - verbose=0): - original_screening_percentile = screening_percentile - # correct screening_percentile according to the volume of the data mask - mask_volume = _get_mask_volume(mask_img) - if mask_volume > MNI152_BRAIN_VOLUME: - warnings.warn( - "Brain mask is bigger than the volume of a standard " - "humain brain. SpaceNet is probably not tuned to " - "be used on such data.", stacklevel=2) - elif mask_volume < .005 * MNI152_BRAIN_VOLUME: - warnings.warn( - "Brain mask is smaller than .5% of the volume " - "humain brain. SpaceNet is probably not tuned to" - "be used on such data.", stacklevel=2) - - if screening_percentile < 100: - screening_percentile = screening_percentile * ( - MNI152_BRAIN_VOLUME / mask_volume) - screening_percentile = min(screening_percentile, 100) - # if screening_percentile is 100, we don't do anything - - if verbose > 1: - print("Mask volume = %gmm^3 = %gcm^3" % ( - mask_volume, mask_volume / 1.e3)) - print("Standard brain volume = %gmm^3 = %gcm^3" % ( - MNI152_BRAIN_VOLUME, MNI152_BRAIN_VOLUME / 1.e3)) - print("Original screening-percentile: %g" % ( - original_screening_percentile)) - print("Volume-corrected screening-percentile: %g" % ( - screening_percentile)) - return screening_percentile - - def _crop_mask(mask): """Crops input mask to produce tighter (i.e smaller) bounding box with the same support (active voxels)""" idx = np.where(mask) if idx[0].size == 0: raise ValueError("Empty mask: if you have given a mask, it is " - "empty, and if you have not given a mask, the " - "mask-extraction routines have failed. Please " - "provide an appropriate mask.") + "empty, and if you have not given a mask, the " + "mask-extraction routines have failed. Please " + "provide an appropriate mask.") i_min = max(idx[0].min() - 1, 0) i_max = idx[0].max() j_min = max(idx[1].min() - 1, 0) @@ -123,7 +73,7 @@ def _univariate_feature_screening( Response Vector. mask: ndarray or booleans, shape (nx, ny, nz) - Mask definining brain Rois. + Mask defining brain Rois. is_classif: bool Flag telling whether the learning task is classification or regression. @@ -248,7 +198,7 @@ class _EarlyStoppingCallback(object): """Out-of-bag early stopping A callable that returns True when the test error starts - rising. We use a Spearman correlation (btween X_test.w and y_test) + rising. We use a Spearman correlation (between X_test.w and y_test) for scoring. """ @@ -511,7 +461,7 @@ class BaseSpaceNet(LinearModel, RegressorMixin, CacheMixin): penalty : string, optional (default 'graph-net') Penalty to used in the model. Can be 'graph-net' or 'tv-l1'. - loss : string, optional (default "mse") + loss : string, optional (default None) Loss to be used in the model. Must be an one of "mse", or "logistic". is_classif : bool, optional (default False) @@ -538,14 +488,14 @@ class BaseSpaceNet(LinearModel, RegressorMixin, CacheMixin): Length of the path. For example, ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` - mask : filename, niimg, NiftiMasker instance, optional default None) + mask : filename, niimg, NiftiMasker instance, optional (default None) Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is it will be computed automatically by a NiftiMasker. target_affine : 3x3 or 4x4 matrix, optional (default None) This parameter is passed to image.resample_img. An important use-case - of this parameter is for downsamping the input data to a coarser + of this parameter is for downsampling the input data to a coarser resolution (to speed of the model fit). Please see the related documentation for details. @@ -553,13 +503,13 @@ class BaseSpaceNet(LinearModel, RegressorMixin, CacheMixin): This parameter is passed to image.resample_img. Please see the related documentation for details. - low_pass : False or float, optional, (default None) + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related - documentation for details. + documentation for details - high_pass : False or float, optional (default None) - This parameter is passed to signal. Clean. Please see the related - documentation for details. + high_pass: None or float, optional + This parameter is passed to signal.clean. Please see the related + documentation for details t_r : float, optional (default None) This parameter is passed to signal.clean. Please see the related @@ -568,7 +518,7 @@ class BaseSpaceNet(LinearModel, RegressorMixin, CacheMixin): screening_percentile : float in the interval [0, 100]; Optional ( default 20) Percentile value for ANOVA univariate feature selection. A value of - 100 means 'keep all features'. This percentile is is expressed + 100 means 'keep all features'. This percentile is expressed w.r.t the volume of a standard (MNI152) brain, and so is corrected at runtime to correspond to the volume of the user-supplied mask (which is typically smaller). If '100' is given, all the features @@ -586,7 +536,7 @@ class BaseSpaceNet(LinearModel, RegressorMixin, CacheMixin): Defines the iterations for the solver. tol : float, optional (default 5e-4) - Defines the tolerance for convergence for the backend fista solver. + Defines the tolerance for convergence for the backend FISTA solver. verbose : int, optional (default 1) Verbosity level. @@ -614,12 +564,34 @@ class BaseSpaceNet(LinearModel, RegressorMixin, CacheMixin): Attributes ---------- - `alpha_` : float - Best alpha found by cross-validation. + `all_coef_` : ndarray, shape (n_l1_ratios, n_folds, n_features) + Coefficients for all folds and features. - `coef_` : ndarray, shape (n_classes-1, n_features) + `alpha_grids_` : ndarray, shape (n_folds, n_alphas) + Alpha values considered for selection of the best ones + (saved in `best_model_params_`) + + `best_model_params_` : ndarray, shape (n_folds, n_parameter) + Best model parameters (alpha, l1_ratio) saved for the different + cross-validation folds. + + `classes_` : ndarray of labels (`n_classes_`) + Labels of the classes (for classification problems) + + `n_classes_` : int + Number of classes (for classification problems) + + `coef_` : ndarray, shape + (1, n_features) for 2 class classification problems (i.e n_classes = 2) + (n_classes, n_features) for n_classes > 2 Coefficient of the features in the decision function. + `coef_img_` : nifti image + Masked model coefficients + + `mask_` : ndarray 3D + An array contains values of the mask image. + `masker_` : instance of NiftiMasker The nifti masker used to mask the data. @@ -628,20 +600,39 @@ class BaseSpaceNet(LinearModel, RegressorMixin, CacheMixin): this attribute is the mask image computed automatically from the data `X`. - `intercept_` : narray, shape (nclasses -1,) - Intercept (a.k.a. bias) added to the decision function. - It is available only when parameter intercept is set to True. + `memory_` : joblib memory cache + + `intercept_` : narray, shape + (1,) for 2 class classification problems (i.e n_classes = 2) + (n_classes,) for n_classes > 2 + Intercept (a.k.a. bias) added to the decision function. + It is available only when parameter intercept is set to True. `cv_` : list of pairs of lists - Each pair are are the list of indices for the train and test - samples for the corresponding fold. + Each pair is the list of indices for the train and test samples + for the corresponding fold. - `cv_scores_` : ndarray, shape (n_alphas, n_folds) or (n_l1_ratios, n_alphas, n_folds) + `cv_scores_` : ndarray, shape (n_folds, n_alphas) or (n_l1_ratios, n_folds, n_alphas) Scores (misclassification) for each alpha, and on each fold `screening_percentile_` : float Screening percentile corrected according to volume of mask, relative to the volume of standard brain. + + `w_` : ndarray, shape + (1, n_features + 1) for 2 class classification problems (i.e n_classes = 2) + (n_classes, n_features + 1) for n_classes > 2, and (n_features,) for + regression + Model weights + + `ymean_` : array, shape (n_samples,) + Mean of prediction targets + + `Xmean_` : array, shape (n_features,) + Mean of X across samples + + `Xstd_` : array, shape (n_features,) + Standard deviation of X across samples """ SUPPORTED_PENALTIES = ["graph-net", "tv-l1"] SUPPORTED_LOSSES = ["mse", "logistic"] @@ -650,10 +641,10 @@ def __init__(self, penalty="graph-net", is_classif=False, loss=None, l1_ratios=.5, alphas=None, n_alphas=10, mask=None, target_affine=None, target_shape=None, low_pass=None, high_pass=None, t_r=None, max_iter=1000, tol=5e-4, - memory=Memory(None), memory_level=1, - standardize=True, verbose=1, n_jobs=1, eps=1e-3, - cv=8, fit_intercept=True, screening_percentile=20., - debias=False): + memory=None, memory_level=1, standardize=True, verbose=1, + mask_args=None, + n_jobs=1, eps=1e-3, cv=8, fit_intercept=True, + screening_percentile=20., debias=False): self.penalty = penalty self.is_classif = is_classif self.loss = loss @@ -678,13 +669,14 @@ def __init__(self, penalty="graph-net", is_classif=False, loss=None, self.t_r = t_r self.target_affine = target_affine self.target_shape = target_shape + self.mask_args = mask_args # sanity check on params self.check_params() def check_params(self): """Makes sure parameters are sane""" - if not self.l1_ratios is None: + if self.l1_ratios is not None: l1_ratios = self.l1_ratios if isinstance(l1_ratios, numbers.Number): l1_ratios = [l1_ratios] @@ -695,7 +687,7 @@ def check_params(self): l1_ratio)) elif l1_ratio == 0. or l1_ratio == 1.: warnings.warn( - ("Specified l1_ratio = %g. It's adived to only " + ("Specified l1_ratio = %g. It's advised to only " "specify values of l1_ratio strictly between 0 " "and 1." % l1_ratio)) if not (0. <= self.screening_percentile <= 100.): @@ -714,7 +706,7 @@ def check_params(self): ",".join(self.SUPPORTED_LOSSES[:-1]), "," if len( self.SUPPORTED_LOSSES) > 2 else "", self.SUPPORTED_LOSSES[-1], self.loss)) - if not self.loss is None and not self.is_classif and ( + if self.loss is not None and not self.is_classif and ( self.loss == "logistic"): raise ValueError( ("'logistic' loss is only available for classification " @@ -738,7 +730,7 @@ def fit(self, X, y): Parameters ---------- X : list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg + See http://nilearn.github.io/manipulating_images/input_output.html Data on which model is to be fitted. If this is a list, the affine is considered the same for all. @@ -761,22 +753,16 @@ def fit(self, X, y): tic = time.time() # nifti masking - if isinstance(self.mask, NiftiMasker): - self.masker_ = clone(self.mask) - else: - self.masker_ = NiftiMasker(mask_img=self.mask, - target_affine=self.target_affine, - target_shape=self.target_shape, - standardize=self.standardize, - low_pass=self.low_pass, - high_pass=self.high_pass, - mask_strategy='epi', t_r=self.t_r, - memory=self.memory_) + self.masker_ = check_embedded_nifti_masker(self, multi_subject=False) X = self.masker_.fit_transform(X) + X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, + multi_output=True, y_numeric=not self.is_classif) + # misc self.Xmean_ = X.mean(axis=0) self.Xstd_ = X.std(axis=0) + self.Xstd_[self.Xstd_ < 1e-8] = 1 self.mask_img_ = self.masker_.mask_img_ self.mask_ = self.mask_img_.get_data().astype(np.bool) n_samples, _ = X.shape @@ -787,7 +773,7 @@ def fit(self, X, y): alphas = self.alphas if isinstance(alphas, numbers.Number): alphas = [alphas] - if not self.loss is None: + if self.loss is not None: loss = self.loss elif self.is_classif: loss = "logistic" @@ -808,10 +794,10 @@ def fit(self, X, y): # generate fold indices case1 = (None in [alphas, l1_ratios]) and self.n_alphas > 1 - case2 = (not alphas is None) and min(len(l1_ratios), len(alphas)) > 1 + case2 = (alphas is not None) and min(len(l1_ratios), len(alphas)) > 1 if case1 or case2: - self.cv_ = list(check_cv(self.cv, X=X, y=y, - classifier=self.is_classif)) + self.cv_ = list(check_cv( + self.cv, y=y, classifier=self.is_classif).split(X, y)) else: # no cross-validation needed, user supplied all params self.cv_ = [(np.arange(n_samples), [])] @@ -833,13 +819,12 @@ def fit(self, X, y): y = y[:, 0] # scores & mean weights map over all folds - self.cv_scores_ = [[] for _ in range(n_problems)] + self.cv_scores_ = [[] for i in range(n_problems)] w = np.zeros((n_problems, X.shape[1] + 1)) self.all_coef_ = np.ndarray((n_problems, n_folds, X.shape[1])) self.screening_percentile_ = _adjust_screening_percentile( - self.screening_percentile, self.mask_img_, - verbose=self.verbose) + self.screening_percentile, self.mask_img_, verbose=self.verbose) # main loop: loop on classes and folds solver_params = dict(tol=self.tol, max_iter=self.max_iter) @@ -867,6 +852,7 @@ def fit(self, X, y): # misc self.cv_scores_ = np.array(self.cv_scores_) + self.best_model_params_ = np.array(self.best_model_params_) self.alpha_grids_ = np.array(self.alpha_grids_) self.ymean_ /= n_folds if not self.is_classif: @@ -913,7 +899,7 @@ class would be predicted. if not self.is_classif: return LinearModel.decision_function(self, X) - X = atleast2d_or_csr(X) + X = check_array(X) n_features = self.coef_.shape[1] if X.shape[1] != n_features: raise ValueError("X has %d features per sample; expecting %d" @@ -929,7 +915,7 @@ def predict(self, X): Parameters ---------- X : list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg + See http://nilearn.github.io/manipulating_images/input_output.html Data on prediction is to be made. If this is a list, the affine is considered the same for all. @@ -994,7 +980,7 @@ class SpaceNetClassifier(BaseSpaceNet): Length of the path. For example, ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. - mask : filename, niimg, NiftiMasker instance, optional default None) + mask : filename, niimg, NiftiMasker instance, optional (default None) Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is it will be computed automatically by a MultiNiftiMasker with default parameters. @@ -1007,13 +993,13 @@ class SpaceNetClassifier(BaseSpaceNet): This parameter is passed to image.resample_img. Please see the related documentation for details. - low_pass : False or float, optional, (default None) + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related - documentation for details. + documentation for details - high_pass : False or float, optional (default None) - This parameter is passed to signal. Clean. Please see the related - documentation for details. + high_pass: None or float, optional + This parameter is passed to signal.clean. Please see the related + documentation for details t_r : float, optional (default None) This parameter is passed to signal.clean. Please see the related @@ -1056,7 +1042,7 @@ class SpaceNetClassifier(BaseSpaceNet): Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. - cv : int, a cv generator instance, or None (default 10) + cv : int, a cv generator instance, or None (default 8) The input specifying which cross-validation generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that @@ -1067,34 +1053,74 @@ class SpaceNetClassifier(BaseSpaceNet): Attributes ---------- - `alpha_` : float - Best alpha found by cross-validation. + `all_coef_` : ndarray, shape (n_l1_ratios, n_folds, n_features) + Coefficients for all folds and features. + + `alpha_grids_` : ndarray, shape (n_folds, n_alphas) + Alpha values considered for selection of the best ones + (saved in `best_model_params_`) + + `best_model_params_` : ndarray, shape (n_folds, n_parameter) + Best model parameters (alpha, l1_ratio) saved for the different + cross-validation folds. + + `classes_` : ndarray of labels (`n_classes_`) + Labels of the classes - `coef_` : array, shape = [n_classes-1, n_features] + `n_classes_` : int + Number of classes + + `coef_` : ndarray, shape + (1, n_features) for 2 class classification problems (i.e n_classes = 2) + (n_classes, n_features) for n_classes > 2 Coefficient of the features in the decision function. + `coef_img_` : nifti image + Masked model coefficients + + `mask_` : ndarray 3D + An array contains values of the mask image. + `masker_` : instance of NiftiMasker The nifti masker used to mask the data. `mask_img_` : Nifti like image - The mask of the data. If no mask was given at masker creation, contains - the automatically computed mask. + The mask of the data. If no mask was supplied by the user, + this attribute is the mask image computed automatically from the + data `X`. - `intercept_` : array, shape = [n_classes-1] + `memory_` : joblib memory cache + + `intercept_` : narray, shape + (1, ) for 2 class classification problems (i.e n_classes = 2) + (n_classes, ) for n_classes > 2 Intercept (a.k.a. bias) added to the decision function. It is available only when parameter intercept is set to True. `cv_` : list of pairs of lists - Each pair are are the list of indices for the train and test + Each pair is the list of indices for the train and test samples for the corresponding fold. - `cv_scores_` : 2d array of shape (n_alphas, n_folds) - Scores (misclassification) for each alpha, and on each fold. + `cv_scores_` : ndarray, shape (n_folds, n_alphas) or (n_l1_ratios, n_folds, n_alphas) + Scores (misclassification) for each alpha, and on each fold `screening_percentile_` : float Screening percentile corrected according to volume of mask, relative to the volume of standard brain. + `w_` : ndarray, shape + (1, n_features + 1) for 2 class classification problems (i.e n_classes = 2) + (n_classes, n_features + 1) for n_classes > 2 + Model weights + + `ymean_` : array, shape (n_samples,) + Mean of prediction targets + + `Xmean_` : array, shape (n_features,) + Mean of X across samples + + `Xstd_` : array, shape (n_features,) + Standard deviation of X across samples """ def __init__(self, penalty="graph-net", loss="logistic", @@ -1133,7 +1159,7 @@ def score(self, X, y): Parameters ---------- X : list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg + See http://nilearn.github.io/manipulating_images/input_output.html Data on which model is to be fitted. If this is a list, the affine is considered the same for all. @@ -1151,7 +1177,7 @@ def score(self, X, y): class SpaceNetRegressor(BaseSpaceNet): """Regression learners with sparsity and spatial priors. - `SpaceNetClassifier` implements Graph-Net and TV-L1 priors / penalties + `SpaceNetRegressor` implements Graph-Net and TV-L1 priors / penalties for regression problems. Thus, the penalty is a sum an L1 term and a spatial term. The aim of such a hybrid prior is to obtain weights maps which are structured (due to the spatial prior) and sparse (enforced @@ -1182,7 +1208,7 @@ class SpaceNetRegressor(BaseSpaceNet): Length of the path. For example, ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` - mask : filename, niimg, NiftiMasker instance, optional default None) + mask : filename, niimg, NiftiMasker instance, optional (default None) Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is it will be computed automatically by a MultiNiftiMasker with default parameters. @@ -1195,12 +1221,12 @@ class SpaceNetRegressor(BaseSpaceNet): This parameter is passed to image.resample_img. Please see the related documentation for details. - low_pass : False or float, optional, (default None) + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details - high_pass : False or float, optional (default None) - This parameter is passed to signal. Clean. Please see the related + high_pass: None or float, optional + This parameter is passed to signal.clean. Please see the related documentation for details t_r : float, optional (default None) @@ -1243,7 +1269,7 @@ class SpaceNetRegressor(BaseSpaceNet): Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. - cv : int, a cv generator instance, or None (default 10) + cv : int, a cv generator instance, or None (default 8) The input specifying which cross-validation generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that @@ -1254,29 +1280,61 @@ class SpaceNetRegressor(BaseSpaceNet): Attributes ---------- - `alpha_` : float - Best alpha found by cross-validation + `all_coef_` : ndarray, shape (n_l1_ratios, n_folds, n_features) + Coefficients for all folds and features. - `coef_` : array, shape = [n_classes-1, n_features] + `alpha_grids_` : ndarray, shape (n_folds, n_alphas) + Alpha values considered for selection of the best ones + (saved in `best_model_params_`) + + `best_model_params_` : ndarray, shape (n_folds, n_parameter) + Best model parameters (alpha, l1_ratio) saved for the different + cross-validation folds. + + `coef_` : ndarray, shape (n_features,) Coefficient of the features in the decision function. + `coef_img_` : nifti image + Masked model coefficients + + `mask_` : ndarray 3D + An array contains values of the mask image. + `masker_` : instance of NiftiMasker The nifti masker used to mask the data. `mask_img_` : Nifti like image - The mask of the data. If no mask was given at masker creation, contains - the automatically computed mask. + The mask of the data. If no mask was supplied by the user, this + attribute is the mask image computed automatically from the data `X`. - `intercept_` : array, shape = [n_classes-1] + `memory_` : joblib memory cache + + `intercept_` : narray, shape (1) Intercept (a.k.a. bias) added to the decision function. It is available only when parameter intercept is set to True. - `cv_scores_` : 2d array of shape (n_alphas, n_folds) + `cv_` : list of pairs of lists + Each pair is the list of indices for the train and test + samples for the corresponding fold. + + `cv_scores_` : ndarray, shape (n_folds, n_alphas) or (n_l1_ratios, n_folds, n_alphas) Scores (misclassification) for each alpha, and on each fold `screening_percentile_` : float Screening percentile corrected according to volume of mask, relative to the volume of standard brain. + + `w_` : ndarray, shape (n_features,) + Model weights + + `ymean_` : array, shape (n_samples,) + Mean of prediction targets + + `Xmean_` : array, shape (n_features,) + Mean of X across samples + + `Xstd_` : array, shape (n_features,) + Standard deviation of X across samples """ def __init__(self, penalty="graph-net", l1_ratios=.5, alphas=None, diff --git a/nilearn/decoding/tests/test_graph_net.py b/nilearn/decoding/tests/test_graph_net.py index 2cf7e9b67a..9a7ddb1a65 100644 --- a/nilearn/decoding/tests/test_graph_net.py +++ b/nilearn/decoding/tests/test_graph_net.py @@ -29,7 +29,7 @@ def _make_data(task="regression", size=4): task=task) X_, _ = to_niimgs(X, [size] * 3) mask_ = nibabel.Nifti1Image(mask.astype(np.float), - X_.get_affine()) + X_.affine) return X, y, w, mask, mask_, X_ X, y, w, mask, mask_, X_ = _make_data() diff --git a/nilearn/decoding/tests/test_same_api.py b/nilearn/decoding/tests/test_same_api.py index 0c65958f7f..5a06b33467 100644 --- a/nilearn/decoding/tests/test_same_api.py +++ b/nilearn/decoding/tests/test_same_api.py @@ -28,7 +28,7 @@ def _make_data(rng=None, masked=False, dim=(2, 2, 2)): if rng is None: rng = check_random_state(42) mask = np.ones(dim).astype(np.bool) - mask[rng.rand() < .7] = 0 + mask[rng.rand(*dim) < .7] = 0 w = np.zeros(dim) w[dim[0] // 2:, dim[1] // 2:, :dim[2] // 2] = 1 n = 5 diff --git a/nilearn/decoding/tests/test_searchlight.py b/nilearn/decoding/tests/test_searchlight.py index b7f3a46070..ccbac9d7b7 100644 --- a/nilearn/decoding/tests/test_searchlight.py +++ b/nilearn/decoding/tests/test_searchlight.py @@ -4,9 +4,11 @@ # Author: Alexandre Abraham # License: simplified BSD -from nose.tools import assert_equal import numpy as np import nibabel +import sklearn +from distutils.version import LooseVersion +from nose.tools import assert_equal from nilearn.decoding import searchlight @@ -20,7 +22,7 @@ def test_searchlight(): mask = np.ones((5, 5, 5), np.bool) mask_img = nibabel.Nifti1Image(mask.astype(np.int), np.eye(4)) # Create a condition array - cond = np.arange(frames, dtype=int) > frames // 2 + cond = np.arange(frames, dtype=int) > (frames // 2) # Create an activation pixel. data[2, 2, 2, :] = 0 @@ -28,9 +30,8 @@ def test_searchlight(): data_img = nibabel.Nifti1Image(data, np.eye(4)) # Define cross validation - from sklearn.cross_validation import check_cv - # avoid using KFold for compatibility with sklearn 0.10-0.13 - cv = check_cv(4, cond) + from sklearn.model_selection import KFold + cv = KFold(n_splits=4) n_jobs = 1 # Run Searchlight with different radii @@ -42,8 +43,18 @@ def test_searchlight(): assert_equal(np.where(sl.scores_ == 1)[0].size, 1) assert_equal(sl.scores_[2, 2, 2], 1.) - # Medium radius : little ball selected + # The voxel selected in process_mask_img is too far from the signal + process_mask = np.zeros((5, 5, 5), np.bool) + process_mask[0, 0, 0] = True + process_mask_img = nibabel.Nifti1Image(process_mask.astype(np.int), + np.eye(4)) + sl = searchlight.SearchLight(mask_img, process_mask_img=process_mask_img, + radius=0.5, n_jobs=n_jobs, + scoring='accuracy', cv=cv) + sl.fit(data_img, cond) + assert_equal(np.where(sl.scores_ == 1)[0].size, 0) + # Medium radius : little ball selected sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1, n_jobs=n_jobs, scoring='accuracy', cv=cv) sl.fit(data_img, cond) @@ -62,3 +73,41 @@ def test_searchlight(): sl.fit(data_img, cond) assert_equal(np.where(sl.scores_ == 1)[0].size, 33) assert_equal(sl.scores_[2, 2, 2], 1.) + + # group cross validation + try: + from sklearn.model_selection import LeaveOneGroupOut + gcv = LeaveOneGroupOut() + except ImportError: + # won't import model selection if it's not there. + # the groups variable should have no effect. + gcv = cv + + groups = np.random.permutation(np.arange(frames, dtype=int) > + (frames // 2)) + sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1, + n_jobs=n_jobs, scoring='accuracy', cv=gcv) + sl.fit(data_img, cond, groups) + assert_equal(np.where(sl.scores_ == 1)[0].size, 7) + assert_equal(sl.scores_[2, 2, 2], 1.) + + # adding superfluous group variable + sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1, + n_jobs=n_jobs, scoring='accuracy', cv=cv) + sl.fit(data_img, cond, groups) + assert_equal(np.where(sl.scores_ == 1)[0].size, 7) + assert_equal(sl.scores_[2, 2, 2], 1.) + + # Check whether searchlight works on list of 3D images + rand = np.random.RandomState(0) + data = rand.rand(5, 5, 5) + data_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + imgs = [data_img, data_img, data_img, data_img, data_img, data_img] + + # labels + y = [0, 1, 0, 1, 0, 1] + + # run searchlight on list of 3D images + sl = searchlight.SearchLight(mask_img) + sl.fit(imgs, y) + diff --git a/nilearn/decoding/tests/test_space_net.py b/nilearn/decoding/tests/test_space_net.py index d60d666be9..938ec1e648 100644 --- a/nilearn/decoding/tests/test_space_net.py +++ b/nilearn/decoding/tests/test_space_net.py @@ -1,12 +1,9 @@ -import os -import warnings import itertools from functools import partial from nose import SkipTest from nose.tools import (assert_equal, assert_true, assert_false, assert_raises) import numpy as np -import nibabel from sklearn.datasets import load_iris from sklearn.utils import extmath from sklearn.linear_model import Lasso @@ -15,10 +12,10 @@ from sklearn.metrics import accuracy_score from nilearn._utils.testing import assert_raises_regex, assert_warns from nilearn.decoding.space_net import ( - _EarlyStoppingCallback, _space_net_alpha_grid, MNI152_BRAIN_VOLUME, - path_scores, BaseSpaceNet, _crop_mask, _univariate_feature_screening, - _get_mask_volume, SpaceNetClassifier, SpaceNetRegressor, - _adjust_screening_percentile) + _EarlyStoppingCallback, _space_net_alpha_grid, path_scores, BaseSpaceNet, + _crop_mask, _univariate_feature_screening, SpaceNetClassifier, + SpaceNetRegressor) +from nilearn._utils.param_validation import _adjust_screening_percentile from nilearn.decoding.space_net_solvers import (_graph_net_logistic, _graph_net_squared_loss) @@ -112,8 +109,12 @@ def test_params_correctly_propagated_in_constructors(): def test_screening_space_net(): + for verbose in [0, 2]: + screening_percentile = assert_warns(UserWarning, + _adjust_screening_percentile, 10, + mask, verbose) screening_percentile = assert_warns(UserWarning, - _adjust_screening_percentile, 10, mask) + _adjust_screening_percentile, 10, mask) # We gave here a very small mask, judging by standards of brain size # thus the screening_percentile_ corrected for brain size should # be 100% @@ -283,16 +284,6 @@ def test_univariate_feature_screening(dim=(11, 12, 13), n_samples=10): assert_true(n_features_ <= n_features) -def test_get_mask_volume(): - # Test that hard-coded standard mask volume can be corrected computed - if os.path.isfile(mni152_brain_mask): - assert_equal(MNI152_BRAIN_VOLUME, _get_mask_volume(nibabel.load( - mni152_brain_mask))) - else: - warnings.warn("Couldn't find %s (for testing)" % ( - mni152_brain_mask)) - - def test_space_net_classifier_subclass(): for penalty, alpha, l1_ratio, verbose in itertools.product( ["graph-net", "tv-l1"], [.4, .01], [.5, 1.], [True, False]): @@ -354,3 +345,20 @@ def test_space_net_one_alpha_no_crash(): for model in [SpaceNetRegressor, SpaceNetClassifier]: model(n_alphas=1, mask=mask).fit(X, y) model(alphas=None, n_alphas=2, mask=mask).fit(X, y) + + +def test_checking_inputs_length(): + iris = load_iris() + X, y = iris.data, iris.target + y = 2 * (y > 0) - 1 + X_, mask = to_niimgs(X, (2, 2, 2)) + + # Remove ten samples from y + y = y[:-10] + + for model in [SpaceNetRegressor, SpaceNetClassifier]: + + assert_raises(ValueError, model(mask=mask, + alphas=1. / .01 / X.shape[0], + l1_ratios=1., tol=1e-10, + screening_percentile=100.).fit, X_, y) diff --git a/nilearn/decomposition/base.py b/nilearn/decomposition/base.py index 8e621056bf..aef70a7219 100644 --- a/nilearn/decomposition/base.py +++ b/nilearn/decomposition/base.py @@ -4,21 +4,87 @@ """ from __future__ import division from math import ceil - import itertools +import glob +from distutils.version import LooseVersion + import numpy as np + from scipy import linalg -from sklearn.base import BaseEstimator +import sklearn +import nilearn +from sklearn.base import BaseEstimator, TransformerMixin from sklearn.externals.joblib import Memory, Parallel, delayed from sklearn.linear_model import LinearRegression from sklearn.utils import check_random_state -from sklearn.utils.extmath import randomized_svd +from sklearn.utils.extmath import randomized_svd, svd_flip from .._utils.cache_mixin import CacheMixin, cache from .._utils.niimg import _safe_get_data +from .._utils.niimg_conversions import _resolve_globbing +from .._utils.compat import _basestring from ..input_data import NiftiMapsMasker from ..input_data.masker_validation import check_embedded_nifti_masker +def fast_svd(X, n_components, random_state=None): + """ Automatically switch between randomized and lapack SVD (heuristic + of scikit-learn). + + Parameters + ========== + X: array, shape (n_samples, n_features) + The data to decompose + + n_components: integer + The order of the dimensionality of the truncated SVD + + random_state: int or RandomState + Pseudo number generator state used for random sampling. + + Returns + ======== + + U: array, shape (n_samples, n_components) + The first matrix of the truncated svd + + S: array, shape (n_components) + The second matric of the truncated svd + + V: array, shape (n_components, n_features) + The last matric of the truncated svd + + """ + random_state = check_random_state(random_state) + # Small problem, just call full PCA + if max(X.shape) <= 500: + svd_solver = 'full' + elif n_components >= 1 and n_components < .8 * min(X.shape): + svd_solver = 'randomized' + # This is also the case of n_components in (0,1) + else: + svd_solver = 'full' + + # Call different fits for either full or truncated SVD + if svd_solver == 'full': + U, S, V = linalg.svd(X, full_matrices=False) + # flip eigenvectors' sign to enforce deterministic output + U, V = svd_flip(U, V) + # The "copy" are there to free the reference on the non reduced + # data, and hence clear memory early + U = U[:, :n_components].copy() + S = S[:n_components] + V = V[:n_components].copy() + else: + n_iter = 'auto' + + U, S, V = randomized_svd(X, n_components=n_components, + n_iter=n_iter, + flip_sign=True, + random_state=random_state) + return U, S, V + + + def mask_and_reduce(masker, imgs, confounds=None, reduction_ratio='auto', @@ -39,7 +105,7 @@ def mask_and_reduce(masker, imgs, Instance used to mask provided data. imgs: list of 4D Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html List of subject data to mask, reduce and stack. confounds: CSV file path or 2D matrix, optional @@ -113,9 +179,11 @@ def mask_and_reduce(masker, imgs, for subject_data in data_list] n_samples = np.sum(subject_n_samples) - n_voxels = np.sum(_safe_get_data(masker.mask_img_)) + n_voxels = int(np.sum(_safe_get_data(masker.mask_img_))) + dtype = (np.float64 if data_list[0].dtype.type is np.float64 + else np.float32) data = np.empty((n_samples, n_voxels), order='F', - dtype='float64') + dtype=dtype) current_position = 0 for i, next_position in enumerate(np.cumsum(subject_n_samples)): @@ -149,26 +217,17 @@ def _mask_and_reduce_single(masker, else: n_samples = int(ceil(data_n_samples * reduction_ratio)) - if n_samples <= data_n_samples // 4: - U, S, _ = cache(randomized_svd, memory, + U, S, V = cache(fast_svd, memory, memory_level=memory_level, func_memory_level=3)(this_data.T, n_samples, - transpose=True, random_state=random_state) - U = U.T - else: - U, S, _ = cache(linalg.svd, memory, - memory_level=memory_level, - func_memory_level=3)(this_data.T, - full_matrices=False) - U = U.T[:n_samples].copy() - S = S[:n_samples] + U = U.T.copy() U = U * S[:, np.newaxis] return U -class BaseDecomposition(BaseEstimator, CacheMixin): +class BaseDecomposition(BaseEstimator, CacheMixin, TransformerMixin): """Base class for matrix factorization based decomposition estimators. Handles mask logic, provides transform and inverse_transform methods @@ -201,11 +260,11 @@ class BaseDecomposition(BaseEstimator, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details - low_pass: False or float, optional + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details - high_pass: False or float, optional + high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details @@ -221,12 +280,15 @@ class BaseDecomposition(BaseEstimator, CacheMixin): This parameter is passed to image.resample_img. Please see the related documentation for details. - mask_strategy: {'background', 'epi'}, optional + mask_strategy: {'background', 'epi' or 'template'}, optional The strategy used to compute the mask: use 'background' if your - images present a clear homogeneous background, and 'epi' if they - are raw EPI images. Depending on this value, the mask will be - computed from masking.compute_background_mask or - masking.compute_epi_mask. Default is 'background'. + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'epi'. mask_args: dict, optional If mask is None, these are additional parameters passed to @@ -253,7 +315,7 @@ class BaseDecomposition(BaseEstimator, CacheMixin): Attributes ---------- `mask_img_` : Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html The mask of the data. If no mask was given at masker creation, contains the automatically computed mask. @@ -289,22 +351,44 @@ def __init__(self, n_components=20, self.verbose = verbose def fit(self, imgs, y=None, confounds=None): - """Base fit for decomposition estimators : compute the embedded masker + """Compute the mask and the components across subjects Parameters ---------- imgs: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Data on which the mask is calculated. If this is a list, the affine is considered the same for all. + confounds : list of CSV file paths or 2D matrices + This parameter is passed to nilearn.signal.clean. Please see the + related documentation for details. Should match with the list + of imgs given. + + Returns + ------- + self : object + Returns the instance itself. Contains attributes listed + at the object level. + """ - if hasattr(imgs, '__iter__') and len(imgs) == 0: + # Base fit for decomposition estimators : compute the embedded masker + + if isinstance(imgs, _basestring): + if nilearn.EXPAND_PATH_WILDCARDS and glob.has_magic(imgs): + imgs = _resolve_globbing(imgs) + + if isinstance(imgs, _basestring) or not hasattr(imgs, '__iter__'): + # these classes are meant for list of 4D images + # (multi-subject), we want it to work also on a single + # subject, so we hack it. + imgs = [imgs, ] + + if len(imgs) == 0: # Common error that arises from a null glob. Capture # it early and raise a helpful message raise ValueError('Need one or more Niimg-like objects as input, ' 'an empty list was given.') - self.masker_ = check_embedded_nifti_masker(self) # Avoid warning with imgs != None @@ -315,19 +399,26 @@ def fit(self, imgs, y=None, confounds=None): self.masker_.fit() self.mask_img_ = self.masker_.mask_img_ + # mask_and_reduce step for decomposition estimators i.e. + # MultiPCA, CanICA and Dictionary Learning + if self.verbose: + print("[{0}] Loading data".format(self.__class__.__name__)) + data = mask_and_reduce( + self.masker_, imgs, confounds=confounds, + n_components=self.n_components, + random_state=self.random_state, + memory=self.memory, + memory_level=max(0, self.memory_level + 1), + n_jobs=self.n_jobs) + self._raw_fit(data) + return self def _check_components_(self): if not hasattr(self, 'components_'): - if self.__class__.__name__ == 'BaseDecomposition': - raise ValueError("Object has no components_ attribute. " - "This may be because " - "BaseDecomposition is directly " - "being used.") - else: - raise ValueError("Object has no components_ attribute. " - "This is probably because fit has not " - "been called.") + raise ValueError("Object has no components_ attribute. " + "This is probably because fit has not " + "been called.") def transform(self, imgs, confounds=None): """Project the data into a reduced representation @@ -335,7 +426,7 @@ def transform(self, imgs, confounds=None): Parameters ---------- imgs: iterable of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Data to be projected confounds: CSV file path or 2D matrix @@ -381,7 +472,7 @@ def inverse_transform(self, loadings): if not hasattr(self, 'components_'): ValueError('Object has no components_ attribute. This is either ' 'because fit has not been called or because' - '_DecompositionEstimator has direcly been used') + '_DecompositionEstimator has directly been used') self._check_components_() components_img_ = self.masker_.inverse_transform(self.components_) nifti_maps_masker = NiftiMapsMasker( @@ -412,7 +503,7 @@ def score(self, imgs, confounds=None): Parameters ---------- imgs: iterable of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Data to be scored confounds: CSV file path or 2D matrix diff --git a/nilearn/decomposition/canica.py b/nilearn/decomposition/canica.py index 949e9d6911..ef6d77d63a 100644 --- a/nilearn/decomposition/canica.py +++ b/nilearn/decomposition/canica.py @@ -27,15 +27,10 @@ class CanICA(MultiPCA): it will be computed automatically by a MultiNiftiMasker with default parameters. - data: array-like, shape = [[n_samples, n_features], ...] - Training vector, where n_samples is the number of samples, - n_features is the number of features. There is one vector per - subject. - n_components: int - Number of components to extract + Number of components to extract. By default n_components=20. - smoothing_fwhm: float, optional + smoothing_fwhm: float, optional, default 6mm If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. @@ -43,17 +38,22 @@ class CanICA(MultiPCA): Indicate if a Canonical Correlation Analysis must be run after the PCA. - standardize: boolean, optional + standardize: boolean, optional, default True If standardize is True, the time-series are centered and normed: their variance is put to 1 in the time dimension. + detrend : boolean, optional, default True + If detrend is True, the time-series will be detrended before + components extraction. + threshold: None, 'auto' or float If None, no thresholding is applied. If 'auto', then we apply a thresholding that will keep the n_voxels, more intense voxels across all the maps, n_voxels being the number of voxels in a brain volume. A float value indicates the ratio of voxels to keep (2. means that the maps will together - have 2 x n_voxels non-zero voxels ). + have 2 x n_voxels non-zero voxels ). The float value + must be bounded by [0. and n_components]. n_init: int, optional The number of times the fastICA algorithm is restarted @@ -81,6 +81,22 @@ class CanICA(MultiPCA): This parameter is passed to signal.clean. Please see the related documentation for details + mask_strategy: {'background', 'epi' or 'template'}, optional + The strategy used to compute the mask: use 'background' if your + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'epi'. + + mask_args: dict, optional + If mask is None, these are additional parameters passed to + masking.compute_background_mask or masking.compute_epi_mask + to fine-tune mask computation. Please see the related documentation + for details. + memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the @@ -97,6 +113,31 @@ class CanICA(MultiPCA): verbose: integer, optional Indicate the level of verbosity. By default, nothing is printed + Attributes + ---------- + `components_` : 2D numpy array (n_components x n-voxels) + Masked ICA components extracted from the input images. They can be + unmasked thanks to the `masker_` attribute. + + Deprecated since version 0.4.1. Use `components_img_` instead. + + `components_img_` : 4D Nifti image + 4D image giving the extracted ICA components. Each 3D image is a + component. + + New in version 0.4.1. + + `masker_` : instance of MultiNiftiMasker + Masker used to filter and mask data as first step. If an instance of + MultiNiftiMasker is given in `mask` parameter, + this is a copy of it. Otherwise, a masker is created using the value + of `mask` and other NiftiMasker related parameters as initialization. + + `mask_img_` : Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + The mask of the data. If no mask was given at masker creation, contains + the automatically computed mask. + References ---------- * G. Varoquaux et al. "A group model for stable multi-subject ICA on @@ -132,19 +173,26 @@ def __init__(self, mask=None, n_components=20, smoothing_fwhm=6, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose) + + if isinstance(threshold, float) and threshold > n_components: + raise ValueError("Threshold must not be higher than number " + "of maps. " + "Number of maps is %s and you provided " + "threshold=%s" % + (str(n_components), str(threshold))) self.threshold = threshold self.n_init = n_init - def _unmix_components(self): + def _unmix_components(self, components): """Core function of CanICA than rotate components_ to maximize independance""" - random_state = check_random_state(self.random_state) seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init) + # Note: fastICA is very unstable, hence we use 64bit on it results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(self._cache(fastica, func_memory_level=2)) - (self.components_.T, whiten=True, fun='cube', + (components.astype(np.float64), whiten=True, fun='cube', random_state=seed) for seed in seeds) @@ -170,16 +218,19 @@ def _unmix_components(self): abs_ica_maps, 100. - (100. / len(ica_maps)) * ratio) ica_maps[abs_ica_maps < threshold] = 0. - self.components_ = ica_maps + # We make sure that we keep the dtype of components + self.components_ = ica_maps.astype(self.components_.dtype) # flip signs in each component so that peak is +ve for component in self.components_: if component.max() < -component.min(): component *= -1 + if hasattr(self, "masker_"): + self.components_img_ = self.masker_.inverse_transform(self.components_) # Overriding MultiPCA._raw_fit overrides MultiPCA.fit behavior def _raw_fit(self, data): - """Helper function that direcly process unmasked data. + """Helper function that directly process unmasked data. Useful when called by another estimator that has already unmasked data. @@ -190,6 +241,6 @@ def _raw_fit(self, data): Unmasked data to process """ - MultiPCA._raw_fit(self, data) - self._unmix_components() + components = MultiPCA._raw_fit(self, data) + self._unmix_components(components) return self diff --git a/nilearn/decomposition/dict_learning.py b/nilearn/decomposition/dict_learning.py index 9b015b8538..e31b2e734f 100644 --- a/nilearn/decomposition/dict_learning.py +++ b/nilearn/decomposition/dict_learning.py @@ -14,18 +14,15 @@ import numpy as np import sklearn -from sklearn.base import TransformerMixin from sklearn.decomposition import dict_learning_online from sklearn.externals.joblib import Memory from sklearn.linear_model import Ridge -from .base import BaseDecomposition, mask_and_reduce +from .base import BaseDecomposition from .canica import CanICA - -if LooseVersion(sklearn.__version__) >= LooseVersion('0.17'): - # check_input=False is an optimization available only in sklearn >=0.17 - sparse_encode_args = {'check_input': False} +# check_input=False is an optimization available in sklearn. +sparse_encode_args = {'check_input': False} def _compute_loadings(components, data): @@ -39,7 +36,7 @@ def _compute_loadings(components, data): return loadings -class DictLearning(BaseDecomposition, TransformerMixin): +class DictLearning(BaseDecomposition): """Perform a map learning algorithm based on spatial component sparsity, over a CanICA initialization. This yields more stable maps than CanICA. @@ -54,17 +51,20 @@ class DictLearning(BaseDecomposition, TransformerMixin): parameters. n_components: int - Number of components to extract + Number of components to extract. By default n_components=20. + + batch_size : int, optional, default=20 + The number of samples to take in each batch. - n_epochs: float - Number of epochs the algorithm should run on the data + n_epochs: float, default=1 + Number of epochs the algorithm should run on the data. - alpha: float, optional, default=1 - Sparsity controlling parameter + alpha: float, optional, default=10 + Sparsity controlling parameter. dict_init: Niimg-like object, optional Initial estimation of dictionary maps. Would be computed from CanICA if - not provided + not provided. reduction_ratio: 'auto' or float between 0. and 1. - Between 0. or 1. : controls data reduction in the temporal domain. @@ -72,17 +72,29 @@ class DictLearning(BaseDecomposition, TransformerMixin): - if set to 'auto', estimator will set the number of components per reduced session to be n_components. + method : {'lars', 'cd'}, default='cd' + Coding method used by sklearn backend. Below are the possible values. + lars: uses the least angle regression method to solve the lasso problem + (linear_model.lars_path) + cd: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). Lars will be faster if + the estimated components are sparse. + random_state: int or RandomState Pseudo number generator state used for random sampling. - smoothing_fwhm: float, optional + smoothing_fwhm: float, optional, default=4mm If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. - standardize : boolean, optional + standardize : boolean, optional, default=True If standardize is True, the time-series are centered and normed: their variance is put to 1 in the time dimension. + detrend : boolean, optional, default=True + If detrend is True, the time-series will be detrended before + components extraction. + target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. @@ -93,15 +105,31 @@ class DictLearning(BaseDecomposition, TransformerMixin): low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related - documentation for details + documentation for details. high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related - documentation for details + documentation for details. t_r: float, optional This parameter is passed to signal.clean. Please see the related - documentation for details + documentation for details. + + mask_strategy: {'background', 'epi' or 'template'}, optional + The strategy used to compute the mask: use 'background' if your + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'epi'. + + mask_args: dict, optional + If mask is None, these are additional parameters passed to + masking.compute_background_mask or masking.compute_epi_mask + to fine-tune mask computation. Please see the related documentation + for details. memory: instance of joblib.Memory or string Used to cache the masking process. @@ -117,45 +145,62 @@ class DictLearning(BaseDecomposition, TransformerMixin): 'all CPUs', -2 'all CPUs but one', and so on. verbose: integer, optional - Indicate the level of verbosity. By default, nothing is printed + Indicate the level of verbosity. By default, nothing is printed. + + Attributes + ---------- + `components_` : 2D numpy array (n_components x n-voxels) + Masked dictionary components extracted from the input images. + + Deprecated since version 0.4.1. Use `components_img_` instead + + `components_img_` : 4D Nifti image + 4D image giving the extracted components. Each 3D image is a component. + + New in version 0.4.1. + + `masker_` : instance of MultiNiftiMasker + Masker used to filter and mask data as first step. If an instance of + MultiNiftiMasker is given in `mask` parameter, + this is a copy of it. Otherwise, a masker is created using the value + of `mask` and other NiftiMasker related parameters as initialization. + + `mask_img_` : Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + The mask of the data. If no mask was given at masker creation, contains + the automatically computed mask. References ---------- - * Gael Varoquaux et al. - Multi-subject dictionary learning to segment an atlas of brain - spontaneous activity - Information Processing in Medical Imaging, 2011, pp. 562-573, - Lecture Notes in Computer Science + * Arthur Mensch, Gael Varoquaux, Bertrand Thirion, + Compressed online dictionary learning for fast resting-state fMRI + decomposition. + IEEE 13th International Symposium on Biomedical Imaging (ISBI), 2016. + pp. 1282-1285 """ def __init__(self, n_components=20, n_epochs=1, alpha=10, reduction_ratio='auto', dict_init=None, - random_state=None, - mask=None, smoothing_fwhm=4, - standardize=True, detrend=True, - low_pass=None, high_pass=None, t_r=None, - target_affine=None, target_shape=None, - mask_strategy='epi', mask_args=None, - memory=Memory(cachedir=None), memory_level=0, - n_jobs=1, verbose=0, - ): + random_state=None, batch_size=20, method="cd", mask=None, + smoothing_fwhm=4, standardize=True, detrend=True, + low_pass=None, high_pass=None, t_r=None, target_affine=None, + target_shape=None, mask_strategy='epi', mask_args=None, + n_jobs=1, verbose=0, memory=Memory(cachedir=None), + memory_level=0): BaseDecomposition.__init__(self, n_components=n_components, - random_state=random_state, - mask=mask, + random_state=random_state, mask=mask, smoothing_fwhm=smoothing_fwhm, - standardize=standardize, - detrend=detrend, + standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, - t_r=t_r, - target_affine=target_affine, + t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, - mask_args=mask_args, - memory=memory, - memory_level=memory_level, - n_jobs=n_jobs, + mask_args=mask_args, memory=memory, + memory_level=memory_level, n_jobs=n_jobs, verbose=verbose) self.n_epochs = n_epochs + self.batch_size = batch_size + self.method = method self.alpha = alpha self.reduction_ratio = reduction_ratio self.dict_init = dict_init @@ -169,13 +214,9 @@ def _init_dict(self, data): do_cca=True, threshold=float(self.n_components), n_init=1, # mask parameter is not useful as we bypass masking - mask=self.masker_, - random_state=self.random_state, - memory=self.memory, - memory_level=self.memory_level, - n_jobs=self.n_jobs, - verbose=self.verbose - ) + mask=self.masker_, random_state=self.random_state, + memory=self.memory, memory_level=self.memory_level, + n_jobs=self.n_jobs, verbose=self.verbose) with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) # We use protected function _raw_fit as data @@ -188,78 +229,38 @@ def _init_dict(self, data): self.components_init_ = components def _init_loadings(self, data): - self._loadings_init = self._cache(_compute_loadings, - func_memory_level=2)( - self.components_init_, - data) + self.loadings_init_ = self._cache(_compute_loadings)( + self.components_init_, data) - def fit(self, imgs, y=None, confounds=None): - """Compute the mask and the ICA maps across subjects + def _raw_fit(self, data): + """Helper function that direcly process unmasked data Parameters ---------- - imgs: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. - Data on which PCA must be calculated. If this is a list, - the affine is considered the same for all. - - confounds: CSV file path or 2D matrix - This parameter is passed to nilearn.signal.clean. Please see the - related documentation for details + data: ndarray, + Shape (n_samples, n_features) """ - # Base logic for decomposition estimators - BaseDecomposition.fit(self, imgs) - - if self.verbose: - print('[DictLearning] Loading data') - data = mask_and_reduce(self.masker_, imgs, confounds, - reduction_ratio=self.reduction_ratio, - n_components=self.n_components, - random_state=self.random_state, - memory_level=max(0, self.memory_level - 1), - n_jobs=self.n_jobs, - memory=self.memory) if self.verbose: print('[DictLearning] Learning initial components') self._init_dict(data) - self._raw_fit(data) - - def _raw_fit(self, data): - """Compute the mask and the maps across subjects, using raw_data. Can - only be called directly is dict_init and mask_img, or - components_init_ is provided - - Parameters - ---------- - data: ndarray, - Shape (n_samples, n_features) - """ - n_samples, n_features = data.shape + _, n_features = data.shape if self.verbose: print('[DictLearning] Computing initial loadings') self._init_loadings(data) - dict_init = self._loadings_init + dict_init = self.loadings_init_ - n_iter = ((n_features - 1) // 20 + 1) * self.n_epochs + n_iter = ((n_features - 1) // self.batch_size + 1) * self.n_epochs if self.verbose: print('[DictLearning] Learning dictionary') - self.components_, dictionary = self._cache(dict_learning_online, - func_memory_level=2)( - data.T, - self.n_components, - alpha=self.alpha, - n_iter=n_iter, - batch_size=20, - method='cd', - dict_init=dict_init, - verbose=max(0, self.verbose - 1), - random_state=self.random_state, - return_code=True, - shuffle=True, + self.components_, _ = self._cache(dict_learning_online)( + data.T, self.n_components, alpha=self.alpha, n_iter=n_iter, + batch_size=self.batch_size, method=self.method, + dict_init=dict_init, verbose=max(0, self.verbose - 1), + random_state=self.random_state, return_code=True, shuffle=True, n_jobs=1) self.components_ = self.components_.T # Unit-variance scaling @@ -267,12 +268,13 @@ def _raw_fit(self, data): S[S == 0] = 1 self.components_ /= S[:, np.newaxis] - # flip signs in each composant so that positive part is l1 larger - # than negative part - # Empirically this yield more positive looking maps - # than with setting the max to be positive + # Flip signs in each composant so that positive part is l1 larger + # than negative part. Empirically this yield more positive looking maps + # than with setting the max to be positive. for component in self.components_: if np.sum(component > 0) < np.sum(component < 0): component *= -1 + if hasattr(self, "masker_"): + self.components_img_ = self.masker_.inverse_transform(self.components_) return self diff --git a/nilearn/decomposition/multi_pca.py b/nilearn/decomposition/multi_pca.py index a975ea5f94..1cc60f6229 100644 --- a/nilearn/decomposition/multi_pca.py +++ b/nilearn/decomposition/multi_pca.py @@ -5,12 +5,11 @@ import numpy as np from sklearn.externals.joblib import Memory from sklearn.utils.extmath import randomized_svd -from sklearn.base import TransformerMixin -from .base import BaseDecomposition, mask_and_reduce +from .base import BaseDecomposition -class MultiPCA(BaseDecomposition, TransformerMixin): +class MultiPCA(BaseDecomposition): """Perform Multi Subject Principal Component Analysis. Perform a PCA on each subject, stack the results, and reduce them @@ -20,7 +19,7 @@ class MultiPCA(BaseDecomposition, TransformerMixin): Parameters ---------- n_components: int - Number of components to extract + Number of components to extract. By default n_components=20. do_cca: boolean, optional Indicate if a Canonical Correlation Analysis must be run after the @@ -39,10 +38,30 @@ class MultiPCA(BaseDecomposition, TransformerMixin): it will be computed automatically by a MultiNiftiMasker with default parameters. + mask_strategy: {'background', 'epi' or 'template'}, optional + The strategy used to compute the mask: use 'background' if your + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'epi'. + + mask_args: dict, optional + If mask is None, these are additional parameters passed to + masking.compute_background_mask or masking.compute_epi_mask + to fine-tune mask computation. Please see the related documentation + for details. + standardize : boolean, optional If standardize is True, the time-series are centered and normed: their variance is put to 1 in the time dimension. + detrend : boolean, optional + If detrend is True, the time-series will be detrended before + components extraction. + target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. @@ -51,11 +70,11 @@ class MultiPCA(BaseDecomposition, TransformerMixin): This parameter is passed to image.resample_img. Please see the related documentation for details. - low_pass: False or float, optional + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details - high_pass: False or float, optional + high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details @@ -88,7 +107,7 @@ class MultiPCA(BaseDecomposition, TransformerMixin): of `mask` and other NiftiMasker related parameters as initialization. `mask_img_` : Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html The mask of the data. If no mask was given at masker creation, contains the automatically computed mask. @@ -96,6 +115,17 @@ class MultiPCA(BaseDecomposition, TransformerMixin): Array of masked extracted components. They can be unmasked thanks to the `masker_` attribute. + Deprecated since version 0.4.1. Use `components_img_` instead. + + `components_img_` : 4D Nifti image + 4D image giving the extracted PCA components. Each 3D image is a + component. + + New in version 0.4.1. + + `variance_` : numpy array (n_components,) + The amount of variance explained by each of the selected components. + """ def __init__(self, n_components=20, @@ -131,44 +161,21 @@ def __init__(self, n_components=20, n_jobs=n_jobs, verbose=verbose) - def fit(self, imgs, y=None, confounds=None): - """Compute the mask and the components - - Parameters - ---------- - imgs: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. - Data on which the PCA must be calculated. If this is a list, - the affine is considered the same for all. - - confounds: CSV file path or 2D matrix - This parameter is passed to nilearn.signal.clean. Please see the - related documentation for details - - """ - BaseDecomposition.fit(self, imgs) - - data = mask_and_reduce(self.masker_, imgs, - confounds=confounds, - n_components=self.n_components, - random_state=self.random_state, - memory=self.memory, - memory_level=max(0, self.memory_level - 1), - n_jobs=self.n_jobs) - self._raw_fit(data) - return self - def _raw_fit(self, data): - """Helper function that direcly process unmasked data""" + """Helper function that directly process unmasked data""" if self.do_cca: S = np.sqrt(np.sum(data ** 2, axis=1)) S[S == 0] = 1 data /= S[:, np.newaxis] - self.components_, self.variance_, _ = self._cache( + components_, self.variance_, _ = self._cache( randomized_svd, func_memory_level=2)( data.T, n_components=self.n_components, transpose=True, random_state=self.random_state, n_iter=3) if self.do_cca: data *= S[:, np.newaxis] - self.components_ = self.components_.T + self.components_ = components_.T + if hasattr(self, "masker_"): + self.components_img_ = self.masker_.inverse_transform( + components_.T) + return components_ diff --git a/nilearn/decomposition/tests/test_base.py b/nilearn/decomposition/tests/test_base.py index a419e1f4ee..6e36782eda 100644 --- a/nilearn/decomposition/tests/test_base.py +++ b/nilearn/decomposition/tests/test_base.py @@ -1,10 +1,41 @@ import numpy as np +from scipy import linalg from nose.tools import assert_true import nibabel from numpy.testing import assert_equal, assert_array_almost_equal from nilearn._utils.testing import assert_raises_regex from nilearn.input_data import MultiNiftiMasker from nilearn.decomposition.base import BaseDecomposition, mask_and_reduce +from nilearn.decomposition.base import fast_svd + + +def test_fast_svd(): + n_samples = 100 + k = 10 + + rng = np.random.RandomState(42) + + # We need to use n_features > 500 to trigger the randomized_svd + for n_features in (30, 100, 550): + # generate a matrix X of approximate effective rank `rank` and no noise + # component (very structured signal): + U = rng.normal(size=(n_samples, k)) + V = rng.normal(size=(k, n_features)) + X = np.dot(U, V) + assert_equal(X.shape, (n_samples, n_features)) + + # compute the singular values of X using the slow exact method + U_, s_, V_ = linalg.svd(X, full_matrices=False) + + Ur, Sr, Vr = fast_svd(X, k, random_state=0) + assert_equal(Vr.shape, (k, n_features)) + assert_equal(Ur.shape, (n_samples, k)) + + # check the singular vectors too (while not checking the sign) + assert_array_almost_equal( + np.abs(np.diag(np.corrcoef(V_[:k], Vr)))[:k], + np.ones(k)) + def test_mask_reducer(): shape = (6, 8, 10, 5) @@ -58,43 +89,3 @@ def test_mask_reducer(): data2 = mask_and_reduce(masker, [imgs[0]] * 2, n_components=3, random_state=0) assert_array_almost_equal(np.tile(data1, (2, 1)), data2) - - -def test_base_decomposition(): - shape = (6, 8, 10, 5) - affine = np.eye(4) - rng = np.random.RandomState(0) - data = [] - for i in range(8): - this_data = rng.normal(size=shape) - # Create fake activation to get non empty mask - this_data[2:4, 2:4, 2:4, :] += 10 - data.append(nibabel.Nifti1Image(this_data, affine)) - mask = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) - masker = MultiNiftiMasker(mask_img=mask) - base_decomposition = BaseDecomposition(mask=masker, n_components=3) - base_decomposition.fit(data) - assert_true(base_decomposition.mask_img_ == mask) - assert_true(base_decomposition.mask_img_ == - base_decomposition.masker_.mask_img_) - - # Testing fit on data - masker = MultiNiftiMasker() - base_decomposition = BaseDecomposition(mask=masker, n_components=3) - base_decomposition.fit(data) - assert_true(base_decomposition.mask_img_ == - base_decomposition.masker_.mask_img_) - - assert_raises_regex(ValueError, - "Object has no components_ attribute. " - "This may be because " - "BaseDecomposition is directly " - "being used.", - base_decomposition.transform, data) - assert_raises_regex(ValueError, - 'Need one or more Niimg-like objects as input, ' - 'an empty list was given.', - base_decomposition.fit, []) - - -# Score is tested in multi_pca diff --git a/nilearn/decomposition/tests/test_canica.py b/nilearn/decomposition/tests/test_canica.py index a39073dc00..016ccf72c7 100644 --- a/nilearn/decomposition/tests/test_canica.py +++ b/nilearn/decomposition/tests/test_canica.py @@ -5,9 +5,12 @@ from nose.tools import assert_true, assert_raises import nibabel -from nilearn._utils.testing import assert_less_equal +from nilearn._utils.testing import (assert_less_equal, write_tmp_imgs, + assert_raises_regex) from nilearn.decomposition.canica import CanICA +from nilearn.input_data import MultiNiftiMasker from nilearn.image import iter_img +from nilearn.decomposition.tests.test_multi_pca import _tmp_dir def _make_data_from_components(components, affine, shape, rng=None, @@ -15,12 +18,19 @@ def _make_data_from_components(components, affine, shape, rng=None, data = [] if rng is None: rng = np.random.RandomState(0) + background = -.01 * rng.normal(size=shape) - 2 + background = background[..., np.newaxis] for _ in range(n_subjects): this_data = np.dot(rng.normal(size=(40, 4)), components) this_data += .01 * rng.normal(size=this_data.shape) # Get back into 3D for CanICA this_data = np.reshape(this_data, (40,) + shape) this_data = np.rollaxis(this_data, 0, 4) + # Put the border of the image to zero, to mimic a brain image + this_data[:5] = background[:5] + this_data[-5:] = background[-5:] + this_data[:, :5] = background[:, :5] + this_data[:, -5:] = background[:, -5:] data.append(nibabel.Nifti1Image(this_data, affine)) return data @@ -50,7 +60,7 @@ def _make_canica_components(shape): def _make_canica_test_data(rng=None, n_subjects=8, noisy=False): if rng is None: rng = np.random.RandomState(0) - shape = (20, 20, 1) + shape = (30, 30, 5) affine = np.eye(4) components = _make_canica_components(shape) if noisy: # Creating noisy non positive data @@ -62,7 +72,15 @@ def _make_canica_test_data(rng=None, n_subjects=8, noisy=False): # Create a "multi-subject" dataset data = _make_data_from_components(components, affine, shape, rng=rng, n_subjects=n_subjects) - mask_img = nibabel.Nifti1Image(np.ones(shape, dtype=np.int8), affine) + mask = np.ones(shape) + mask[:5] = 0 + mask[-5:] = 0 + mask[:, :5] = 0 + mask[:, -5:] = 0 + mask[..., -2:] = 0 + mask[..., :2] = 0 + + mask_img = nibabel.Nifti1Image(mask, affine) return data, mask_img, components, rng @@ -73,13 +91,15 @@ def test_canica_square_img(): canica = CanICA(n_components=4, random_state=rng, mask=mask_img, smoothing_fwhm=0., n_init=50) canica.fit(data) - maps = canica.masker_.inverse_transform(canica.components_).get_data() + maps = canica.components_img_.get_data() maps = np.rollaxis(maps, 3, 0) # FIXME: This could be done more efficiently, e.g. thanks to hungarian # Find pairs of matching components # compute the cross-correlation matrix between components - K = np.corrcoef(components, maps.reshape(4, 400))[4:, :4] + mask = mask_img.get_data() != 0 + K = np.corrcoef(components[:, mask.ravel()], + maps[:, mask])[4:, :4] # K should be a permutation matrix, hence its coefficients # should all be close to 0 1 or -1 K_abs = np.abs(K) @@ -91,6 +111,17 @@ def test_canica_square_img(): assert_raises(TypeError, canica.fit) +def test_canica_single_subject(): + # Check that canica runs on a single-subject dataset + data, mask_img, components, rng = _make_canica_test_data(n_subjects=1) + + # We do a large number of inits to be sure to find the good match + canica = CanICA(n_components=4, random_state=rng, + smoothing_fwhm=0., n_init=1) + # This is a smoke test: we just check that things run + canica.fit(data[0]) + + def test_component_sign(): # We should have a heuristic that flips the sign of components in # CanICA to have more positive values than negative values, for @@ -103,7 +134,85 @@ def test_component_sign(): canica = CanICA(n_components=4, random_state=rng, mask=mask_img) for _ in range(3): canica.fit(data) - for mp in iter_img(canica.masker_.inverse_transform( - canica.components_)): + for mp in iter_img(canica.components_img_): mp = mp.get_data() assert_less_equal(-mp.min(), mp.max()) + + +def test_threshold_bound(): + # Smoke test to make sure an error is raised when threshold + # is higher than number of components + assert_raises(ValueError, CanICA, n_components=4, threshold=5.) + + +def test_masker_attributes_with_fit(): + # Test base module at sub-class + data, mask_img, components, rng = _make_canica_test_data(n_subjects=3) + # Passing mask_img + canica = CanICA(n_components=3, mask=mask_img, random_state=0) + canica.fit(data) + assert_true(canica.mask_img_ == mask_img) + assert_true(canica.mask_img_ == canica.masker_.mask_img_) + # Passing masker + masker = MultiNiftiMasker(mask_img=mask_img) + canica = CanICA(n_components=3, mask=masker, random_state=0) + canica.fit(data) + assert_true(canica.mask_img_ == canica.masker_.mask_img_) + canica = CanICA(mask=mask_img, n_components=3) + assert_raises_regex(ValueError, + "Object has no components_ attribute. " + "This is probably because fit has not been called", + canica.transform, data) + # Test if raises an error when empty list of provided. + assert_raises_regex(ValueError, + 'Need one or more Niimg-like objects as input, ' + 'an empty list was given.', + canica.fit, []) + # Test passing masker arguments to estimator + canica = CanICA(n_components=3, + target_affine=np.eye(4), + target_shape=(6, 8, 10), + mask_strategy='background') + canica.fit(data) + + +def test_components_img(): + data, mask_img, _, _ = _make_canica_test_data(n_subjects=3) + n_components = 3 + canica = CanICA(n_components=n_components, mask=mask_img) + canica.fit(data) + components_img = canica.components_img_ + assert_true(isinstance(components_img, nibabel.Nifti1Image)) + check_shape = data[0].shape[:3] + (n_components,) + assert_true(components_img.shape, check_shape) + + +def test_with_globbing_patterns_with_single_subject(): + # single subject + data, mask_img, _, _ = _make_canica_test_data(n_subjects=1) + n_components = 3 + canica = CanICA(n_components=n_components, mask=mask_img) + with write_tmp_imgs(data[0], create_files=True, use_wildcards=True) as img: + input_image = _tmp_dir() + img + canica.fit(input_image) + components_img = canica.components_img_ + assert_true(isinstance(components_img, nibabel.Nifti1Image)) + # n_components = 3 + check_shape = data[0].shape[:3] + (3,) + assert_true(components_img.shape, check_shape) + + +def test_with_globbing_patterns_with_multi_subjects(): + # Multi subjects + data, mask_img, _, _ = _make_canica_test_data(n_subjects=3) + n_components = 3 + canica = CanICA(n_components=n_components, mask=mask_img) + with write_tmp_imgs(data[0], data[1], data[2], create_files=True, + use_wildcards=True) as img: + input_image = _tmp_dir() + img + canica.fit(input_image) + components_img = canica.components_img_ + assert_true(isinstance(components_img, nibabel.Nifti1Image)) + # n_components = 3 + check_shape = data[0].shape[:3] + (3,) + assert_true(components_img.shape, check_shape) diff --git a/nilearn/decomposition/tests/test_dict_learning.py b/nilearn/decomposition/tests/test_dict_learning.py index 73f2a5ca94..40229f0ae7 100644 --- a/nilearn/decomposition/tests/test_dict_learning.py +++ b/nilearn/decomposition/tests/test_dict_learning.py @@ -1,16 +1,22 @@ import numpy as np +import nibabel -from nilearn._utils.testing import assert_less_equal +from nose.tools import assert_true +from nilearn._utils.testing import (assert_less_equal, assert_raises_regex, + write_tmp_imgs) from nilearn.decomposition.dict_learning import DictLearning from nilearn.decomposition.tests.test_canica import _make_canica_test_data from nilearn.image import iter_img from nilearn.input_data import NiftiMasker +from nilearn.decomposition.tests.test_multi_pca import _tmp_dir def test_dict_learning(): data, mask_img, components, rng = _make_canica_test_data(n_subjects=8) - mask = NiftiMasker(mask_img=mask_img).fit() - dict_init = mask.inverse_transform(components) + masker = NiftiMasker(mask_img=mask_img).fit() + mask = mask_img.get_data() != 0 + flat_mask = mask.ravel() + dict_init = masker.inverse_transform(components[:, flat_mask]) dict_learning = DictLearning(n_components=4, random_state=0, dict_init=dict_init, mask=mask_img, @@ -24,22 +30,23 @@ def test_dict_learning(): for estimator in [dict_learning, dict_learning_auto_init]: estimator.fit(data) - maps[estimator] = estimator.masker_. \ - inverse_transform(estimator.components_).get_data() - maps[estimator] = np.reshape(np.rollaxis(maps[estimator], 3, 0), - (4, 400)) + maps[estimator] = estimator.components_img_.get_data() + maps[estimator] = np.reshape( + np.rollaxis(maps[estimator], 3, 0)[:, mask], + (4, flat_mask.sum())) + masked_components = components[:, flat_mask] for this_dict_learning in [dict_learning]: these_maps = maps[this_dict_learning] - S = np.sqrt(np.sum(components ** 2, axis=1)) + S = np.sqrt(np.sum(masked_components ** 2, axis=1)) S[S == 0] = 1 - components /= S[:, np.newaxis] + masked_components /= S[:, np.newaxis] S = np.sqrt(np.sum(these_maps ** 2, axis=1)) S[S == 0] = 1 these_maps /= S[:, np.newaxis] - K = np.abs(components.dot(these_maps.T)) + K = np.abs(masked_components.dot(these_maps.T)) recovered_maps = np.sum(K > 0.9) assert(recovered_maps >= 2) @@ -66,7 +73,79 @@ def test_component_sign(): mask=mask_img, smoothing_fwhm=0., alpha=1) dict_learning.fit(data) - for mp in iter_img(dict_learning.masker_.inverse_transform( - dict_learning.components_)): + for mp in iter_img(dict_learning.components_img_): mp = mp.get_data() assert_less_equal(np.sum(mp[mp <= 0]), np.sum(mp[mp > 0])) + + +def test_masker_attributes_with_fit(): + # Test base module at sub-class + data, mask_img, components, rng = _make_canica_test_data(n_subjects=3) + # Passing mask_img + dict_learning = DictLearning(n_components=3, mask=mask_img, random_state=0) + dict_learning.fit(data) + assert_true(dict_learning.mask_img_ == mask_img) + assert_true(dict_learning.mask_img_ == dict_learning.masker_.mask_img_) + # Passing masker + masker = NiftiMasker(mask_img=mask_img) + dict_learning = DictLearning(n_components=3, mask=masker, random_state=0) + dict_learning.fit(data) + assert_true(dict_learning.mask_img_ == dict_learning.masker_.mask_img_) + dict_learning = DictLearning(mask=mask_img, n_components=3) + assert_raises_regex(ValueError, + "Object has no components_ attribute. " + "This is probably because fit has not been called", + dict_learning.transform, data) + # Test if raises an error when empty list of provided. + assert_raises_regex(ValueError, + 'Need one or more Niimg-like objects as input, ' + 'an empty list was given.', + dict_learning.fit, []) + # Test passing masker arguments to estimator + dict_learning = DictLearning(n_components=3, + target_affine=np.eye(4), + target_shape=(6, 8, 10), + mask_strategy='background') + dict_learning.fit(data) + + +def test_components_img(): + data, mask_img, _, _ = _make_canica_test_data(n_subjects=3) + n_components = 3 + dict_learning = DictLearning(n_components=n_components, mask=mask_img) + dict_learning.fit(data) + components_img = dict_learning.components_img_ + assert_true(isinstance(components_img, nibabel.Nifti1Image)) + check_shape = data[0].shape + (n_components,) + assert_true(components_img.shape, check_shape) + + +def test_with_globbing_patterns_with_single_subject(): + # single subject + data, mask_img, _, _ = _make_canica_test_data(n_subjects=1) + n_components = 3 + dictlearn = DictLearning(n_components=n_components, mask=mask_img) + with write_tmp_imgs(data[0], create_files=True, use_wildcards=True) as img: + input_image = _tmp_dir() + img + dictlearn.fit(input_image) + components_img = dictlearn.components_img_ + assert_true(isinstance(components_img, nibabel.Nifti1Image)) + # n_components = 3 + check_shape = data[0].shape[:3] + (3,) + assert_true(components_img.shape, check_shape) + + +def test_with_globbing_patterns_with_multi_subjects(): + # multi subjects + data, mask_img, _, _ = _make_canica_test_data(n_subjects=3) + n_components = 3 + dictlearn = DictLearning(n_components=n_components, mask=mask_img) + with write_tmp_imgs(data[0], data[1], data[2], create_files=True, + use_wildcards=True) as img: + input_image = _tmp_dir() + img + dictlearn.fit(input_image) + components_img = dictlearn.components_img_ + assert_true(isinstance(components_img, nibabel.Nifti1Image)) + # n_components = 3 + check_shape = data[0].shape[:3] + (3,) + assert_true(components_img.shape, check_shape) diff --git a/nilearn/decomposition/tests/test_multi_pca.py b/nilearn/decomposition/tests/test_multi_pca.py index 6bc30debbc..40a86bf5ee 100644 --- a/nilearn/decomposition/tests/test_multi_pca.py +++ b/nilearn/decomposition/tests/test_multi_pca.py @@ -1,7 +1,8 @@ """ Test the multi-PCA module """ - +import os +import tempfile import numpy as np from nose.tools import assert_raises, assert_true import nibabel @@ -9,7 +10,14 @@ from nilearn.decomposition.multi_pca import MultiPCA from nilearn.input_data import MultiNiftiMasker, NiftiMasker -from nilearn._utils.testing import assert_raises_regex +from nilearn._utils.testing import assert_raises_regex, write_tmp_imgs + + +def _tmp_dir(): + """For testing globbing patterns in input images + """ + tmp_dir = tempfile.tempdir + os.sep + return tmp_dir def test_multi_pca(): @@ -30,10 +38,14 @@ def test_multi_pca(): mask_img = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) multi_pca = MultiPCA(mask=mask_img, n_components=3, random_state=0) + # fit to the data and test for masker attributes + multi_pca.fit(data) + assert_true(multi_pca.mask_img_ == mask_img) + assert_true(multi_pca.mask_img_ == multi_pca.masker_.mask_img_) # Test that the components are the same if we put twice the same data, and # that fit output is deterministic - components1 = multi_pca.fit(data).components_ + components1 = multi_pca.components_ components2 = multi_pca.fit(data).components_ components3 = multi_pca.fit(2 * data).components_ np.testing.assert_array_equal(components1, components2) @@ -50,6 +62,12 @@ def test_multi_pca(): multi_pca = MultiPCA() assert_raises(ValueError, multi_pca.fit, data[:2]) + # Test fit on data with the use of a masker + masker = MultiNiftiMasker() + multi_pca = MultiPCA(mask=masker, n_components=3) + multi_pca.fit(data) + assert_true(multi_pca.mask_img_ == multi_pca.masker_.mask_img_) + # Smoke test the use of a masker and without CCA multi_pca = MultiPCA(mask=MultiNiftiMasker(mask_args=dict(opening=0)), do_cca=False, n_components=3) @@ -66,7 +84,17 @@ def test_multi_pca(): "Object has no components_ attribute. " "This is probably because fit has not been called", multi_pca.transform, data) - + # Test if raises an error when empty list of provided. + assert_raises_regex(ValueError, + 'Need one or more Niimg-like objects as input, ' + 'an empty list was given.', + multi_pca.fit, []) + # Test passing masker arguments to estimator + multi_pca = MultiPCA(target_affine=affine, + target_shape=shape[:3], + n_components=3, + mask_strategy='background') + multi_pca.fit(data) def test_multi_pca_score(): @@ -115,3 +143,66 @@ def test_multi_pca_score(): assert_equal(s.shape, (5,)) assert_true(np.all(s <= 1)) assert_true(np.all(0 <= s)) + + +def test_components_img(): + shape = (6, 8, 10, 5) + affine = np.eye(4) + rng = np.random.RandomState(0) + + # Create a "multi-subject" dataset + data = [] + for i in range(8): + this_data = rng.normal(size=shape) + # Create fake activation to get non empty mask + this_data[2:4, 2:4, 2:4, :] += 10 + data.append(nibabel.Nifti1Image(this_data, affine)) + + mask_img = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) + n_components = 3 + multi_pca = MultiPCA(mask=mask_img, n_components=n_components, + random_state=0) + # fit to the data and test for components images + multi_pca.fit(data) + components_img = multi_pca.components_img_ + assert_true(isinstance(components_img, nibabel.Nifti1Image)) + check_shape = data[0].shape[:3] + (n_components,) + assert_equal(components_img.shape, check_shape) + assert_equal(len(components_img.shape), 4) + + +def test_with_globbing_patterns_with_single_image(): + # With single image + data_4d = np.zeros((40, 40, 40, 3)) + data_4d[20, 20, 20] = 1 + img_4d = nibabel.Nifti1Image(data_4d, affine=np.eye(4)) + multi_pca = MultiPCA(n_components=3) + + with write_tmp_imgs(img_4d, create_files=True, use_wildcards=True) as img: + input_image = _tmp_dir() + img + multi_pca.fit(input_image) + components_img = multi_pca.components_img_ + assert_true(isinstance(components_img, nibabel.Nifti1Image)) + # n_components = 3 + check_shape = img_4d.shape[:3] + (3,) + assert_equal(components_img.shape, check_shape) + assert_equal(len(components_img.shape), 4) + + +def test_with_globbing_patterns_with_multiple_images(): + # With multiple images + data_4d = np.zeros((40, 40, 40, 3)) + data_4d[20, 20, 20] = 1 + img_4d = nibabel.Nifti1Image(data_4d, affine=np.eye(4)) + multi_pca = MultiPCA(n_components=3) + + with write_tmp_imgs(img_4d, img_4d, create_files=True, + use_wildcards=True) as imgs: + input_image = _tmp_dir() + imgs + multi_pca.fit(input_image) + components_img = multi_pca.components_img_ + assert_true(isinstance(components_img, nibabel.Nifti1Image)) + # n_components = 3 + check_shape = img_4d.shape[:3] + (3,) + assert_equal(components_img.shape, check_shape) + assert_equal(len(components_img.shape), 4) diff --git a/nilearn/image/__init__.py b/nilearn/image/__init__.py index 6d7c299338..833f1f684a 100644 --- a/nilearn/image/__init__.py +++ b/nilearn/image/__init__.py @@ -2,14 +2,18 @@ Mathematical operations working on Niimg-like objects like -a (3+n)-D block of data, and an affine. """ -from .resampling import resample_img, reorder_img +from .resampling import resample_img, resample_to_img, reorder_img, \ + coord_transform from .image import high_variance_confounds, smooth_img, crop_img, \ - mean_img, swap_img_hemispheres, index_img, iter_img, threshold_img + mean_img, swap_img_hemispheres, index_img, iter_img, threshold_img, \ + math_img, load_img, clean_img, largest_connected_component_img from .image import new_img_like # imported this way to avoid circular imports from .._utils.niimg_conversions import concat_niimgs as concat_imgs from .._utils.niimg import copy_img -__all__ = ['resample_img', 'high_variance_confounds', 'smooth_img', - 'crop_img', 'mean_img', 'reorder_img', +__all__ = ['resample_img', 'resample_to_img', 'high_variance_confounds', + 'smooth_img', 'crop_img', 'mean_img', 'reorder_img', 'swap_img_hemispheres', 'concat_imgs', 'copy_img', - 'index_img', 'iter_img', 'new_img_like', 'threshold_img'] + 'index_img', 'iter_img', 'new_img_like', 'threshold_img', + 'math_img', 'load_img', 'clean_img', + 'largest_connected_component_img', 'coord_transform'] diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 756f853a1b..ee8adc0162 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -7,7 +7,7 @@ # License: simplified BSD import collections -from distutils.version import LooseVersion +import warnings import numpy as np from scipy import ndimage @@ -31,13 +31,13 @@ def high_variance_confounds(imgs, n_confounds=5, percentile=2., variance. Parameters - ========== + ---------- imgs: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html 4D image. mask_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html If provided, confounds are extracted from voxels inside the mask. If not provided, all voxels are used. @@ -53,12 +53,12 @@ def high_variance_confounds(imgs, n_confounds=5, percentile=2., If True, detrend signals before processing. Returns - ======= + ------- v: numpy.ndarray highest variance confounds. Shape: (number of scans, n_confounds) Notes - ====== + ------ This method is related to what has been published in the literature as 'CompCor' (Behzadi NeuroImage 2007). @@ -71,7 +71,7 @@ def high_variance_confounds(imgs, n_confounds=5, percentile=2., highest singular values. See also - ======== + -------- nilearn.signal.high_variance_confounds """ from .. import masking @@ -87,8 +87,8 @@ def high_variance_confounds(imgs, n_confounds=5, percentile=2., sigs = np.reshape(sigs, (-1, sigs.shape[-1])).T return signal.high_variance_confounds(sigs, n_confounds=n_confounds, - percentile=percentile, - detrend=detrend) + percentile=percentile, + detrend=detrend) def _fast_smooth_array(arr): @@ -100,18 +100,18 @@ def _fast_smooth_array(arr): normalisation to preserve the local average value. Parameters - ========== + ---------- arr: numpy.ndarray 4D array, with image number as last dimension. 3D arrays are also accepted. Returns - ======= + ------- smoothed_arr: numpy.ndarray Smoothed array. - Note - ==== + Notes + ----- Rather than calling this function directly, users are encouraged to call the high-level function :func:`smooth_img` with fwhm='fast'. @@ -146,7 +146,7 @@ def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True): Apply a Gaussian filter along the three first dimensions of arr. Parameters - ========== + ---------- arr: numpy.ndarray 4D array, with image number as last dimension. 3D arrays are also accepted. @@ -176,14 +176,21 @@ def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True): is performed in-place. Returns - ======= + ------- filtered_arr: numpy.ndarray arr, filtered. Notes - ===== + ----- This function is most efficient with arr in C order. """ + # Here, we have to investigate use cases of fwhm. Particularly, if fwhm=0. + # See issue #1537 + if fwhm == 0.: + warnings.warn("The parameter 'fwhm' for smoothing is specified " + "as {0}. Converting to None (no smoothing option)" + .format(fwhm)) + fwhm = None if arr.dtype.kind == 'i': if arr.dtype == np.int64: @@ -221,9 +228,9 @@ def smooth_img(imgs, fwhm): In all cases, non-finite values in input image are replaced by zeros. Parameters - ========== + ---------- imgs: Niimg-like object or iterable of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Image(s) to smooth. fwhm: scalar, numpy.ndarray, 'fast' or None @@ -234,10 +241,13 @@ def smooth_img(imgs, fwhm): a filter [0.2, 1, 0.2] in each direction and a normalisation to preserve the scale. If fwhm is None, no filtering is performed (useful when just removal - of non-finite values is needed) + of non-finite values is needed). + + In corner case situations, fwhm is simply kept to None when fwhm is + specified as fwhm=0. Returns - ======= + ------- filtered_img: nibabel.Nifti1Image or list of. Input image, filtered. If imgs is an iterable, then filtered_img is a list. @@ -255,7 +265,7 @@ def smooth_img(imgs, fwhm): ret = [] for img in imgs: img = check_niimg(img) - affine = img.get_affine() + affine = img.affine filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm, ensure_finite=True, copy=True) ret.append(new_img_like(img, filtered, affine, copy_header=True)) @@ -273,9 +283,9 @@ def _crop_img_to(img, slices, copy=True): accordingly Parameters - ========== + ---------- img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Img to be cropped. If slices has less entries than img has dimensions, the slices will be applied to the first len(slices) dimensions @@ -290,16 +300,16 @@ def _crop_img_to(img, slices, copy=True): Default: True Returns - ======= + ------- cropped_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Cropped version of the input image """ img = check_niimg(img) data = img.get_data() - affine = img.get_affine() + affine = img.affine cropped_data = data[slices] if copy: @@ -326,9 +336,9 @@ def crop_img(img, rtol=1e-8, copy=True): avoid sampling issues later on. Parameters - ========== + ---------- img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html img to be cropped. rtol: float @@ -340,7 +350,7 @@ def crop_img(img, rtol=1e-8, copy=True): Specifies whether cropped data is copied or not. Returns - ======= + ------- cropped_img: image Cropped version of the input image """ @@ -373,7 +383,7 @@ def _compute_mean(imgs, target_affine=None, imgs = check_niimg(imgs) mean_data = _safe_get_data(imgs) - affine = imgs.get_affine() + affine = imgs.affine # Free memory ASAP imgs = None if not mean_data.ndim in (3, 4): @@ -388,7 +398,7 @@ def _compute_mean(imgs, target_affine=None, nibabel.Nifti1Image(mean_data, affine), target_affine=target_affine, target_shape=target_shape, copy=False) - affine = mean_data.get_affine() + affine = mean_data.affine mean_data = mean_data.get_data() if smooth: @@ -408,10 +418,10 @@ def mean_img(imgs, target_affine=None, target_shape=None, computed separately, and the resulting mean is computed after. Parameters - ========== + ---------- imgs: Niimg-like object or iterable of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to mean. target_affine: numpy.ndarray, optional @@ -432,10 +442,14 @@ def mean_img(imgs, target_affine=None, target_shape=None, 'all CPUs'. Returns - ======= + ------- mean: nibabel.Nifti1Image mean image + See Also + -------- + nilearn.image.math_img : For more general operations on images + """ if (isinstance(imgs, _basestring) or not isinstance(imgs, collections.Iterable)): @@ -476,7 +490,7 @@ def swap_img_hemispheres(img): Parameters ---------- img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to swap. Returns @@ -502,7 +516,7 @@ def swap_img_hemispheres(img): img = reorder_img(img) # create swapped nifti object - out_img = new_img_like(img, img.get_data()[::-1], img.get_affine(), + out_img = new_img_like(img, img.get_data()[::-1], img.affine, copy_header=True) return out_img @@ -517,7 +531,7 @@ def index_img(imgs, index): Parameters ---------- imgs: 4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html index: Any type compatible with numpy array indexing Used for indexing the 4D data array in the fourth dimension. @@ -526,8 +540,32 @@ def index_img(imgs, index): ------- output: nibabel.Nifti1Image + See Also + -------- + nilearn.image.concat_imgs + nilearn.image.iter_img + + Examples + -------- + First we concatenate two mni152 images to create a 4D-image:: + + >>> from nilearn import datasets + >>> from nilearn.image import concat_imgs, index_img + >>> joint_mni_image = concat_imgs([datasets.load_mni152_template(), + ... datasets.load_mni152_template()]) + >>> print(joint_mni_image.shape) + (91, 109, 91, 2) + + We can now select one slice from the last dimension of this 4D-image:: + + >>> single_mni_image = index_img(joint_mni_image, 1) + >>> print(single_mni_image.shape) + (91, 109, 91) """ imgs = check_niimg_4d(imgs) + # duck-type for pandas arrays, and select the 'values' attr + if hasattr(index, 'values') and hasattr(index, 'iloc'): + index = index.values.flatten() return _index_img(imgs, index) @@ -537,11 +575,16 @@ def iter_img(imgs): Parameters ---------- imgs: 4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Returns ------- output: iterator of 3D nibabel.Nifti1Image + + See Also + -------- + nilearn.image.index_img + """ return check_niimg_4d(imgs, return_iterator=True) @@ -576,34 +619,44 @@ def new_img_like(ref_niimg, data, affine=None, copy_header=False): and hasattr(ref_niimg, '__iter__')): ref_niimg = ref_niimg[0] if not (hasattr(ref_niimg, 'get_data') - and hasattr(ref_niimg,'get_affine')): + and hasattr(ref_niimg, 'affine')): if isinstance(ref_niimg, _basestring): ref_niimg = nibabel.load(ref_niimg) else: raise TypeError(('The reference image should be a niimg, %r ' - 'was passed') % orig_ref_niimg ) + 'was passed') % orig_ref_niimg) if affine is None: - affine = ref_niimg.get_affine() + affine = ref_niimg.affine if data.dtype == bool: default_dtype = np.int8 - if (LooseVersion(nibabel.__version__) >= LooseVersion('1.2.0') and - isinstance(ref_niimg, nibabel.freesurfer.mghformat.MGHImage)): + if isinstance(ref_niimg, nibabel.freesurfer.mghformat.MGHImage): default_dtype = np.uint8 data = as_ndarray(data, dtype=default_dtype) header = None if copy_header: - header = copy.deepcopy(ref_niimg.get_header()) + header = copy.deepcopy(ref_niimg.header) header['scl_slope'] = 0. header['scl_inter'] = 0. - header['glmax'] = 0. + # 'glmax' is removed for Nifti2Image. Modify only if 'glmax' is + # available in header. See issue #1611 + if 'glmax' in header: + header['glmax'] = 0. header['cal_max'] = np.max(data) if data.size > 0 else 0. - header['cal_max'] = np.min(data) if data.size > 0 else 0. - return ref_niimg.__class__(data, affine, header=header) + header['cal_min'] = np.min(data) if data.size > 0 else 0. + klass = ref_niimg.__class__ + if klass is nibabel.Nifti1Pair: + # Nifti1Pair is an internal class, without a to_filename, + # we shouldn't return it + klass = nibabel.Nifti1Image + return klass(data, affine, header=header) def threshold_img(img, threshold, mask_img=None): - """ Thresholds the given input image based on specific strategy. + """ Threshold the given input image, mostly statistical or atlas images. + + Thresholding can be done based on direct image intensities or selection + threshold with given percentile. .. versionadded:: 0.2 @@ -635,10 +688,11 @@ def threshold_img(img, threshold, mask_img=None): from .. import masking img = check_niimg(img) - img_data = _safe_get_data(img).copy() - affine = img.get_affine() + img_data = _safe_get_data(img, ensure_finite=True) + affine = img.affine if mask_img is not None: + mask_img = check_niimg_3d(mask_img) if not _check_same_fov(img, mask_img): mask_img = resampling.resample_img(mask_img, target_affine=affine, target_shape=img.shape[:3], @@ -660,3 +714,258 @@ def threshold_img(img, threshold, mask_img=None): threshold_img = new_img_like(img, img_data, affine) return threshold_img + + +def math_img(formula, **imgs): + """Interpret a numpy based string formula using niimg in named parameters. + + .. versionadded:: 0.2.3 + + Parameters + ---------- + formula: str + The mathematical formula to apply to image internal data. It can use + numpy imported as 'np'. + imgs: images (Nifti1Image or file names) + Keyword arguments corresponding to the variables in the formula as + Nifti images. All input images should have the same geometry (shape, + affine). + + Returns + ------- + return_img: Nifti1Image + Result of the formula as a Nifti image. Note that the dimension of the + result image can be smaller than the input image. The affine is the + same as the input image. + + See Also + -------- + nilearn.image.mean_img : To simply compute the mean of multiple images + + Examples + -------- + Let's load an image using nilearn datasets module:: + + >>> from nilearn import datasets + >>> anatomical_image = datasets.load_mni152_template() + + Now we can use any numpy function on this image:: + + >>> from nilearn.image import math_img + >>> log_img = math_img("np.log(img)", img=anatomical_image) + + We can also apply mathematical operations on several images:: + + >>> result_img = math_img("img1 + img2", + ... img1=anatomical_image, img2=log_img) + + Notes + ----- + + This function is the Python equivalent of ImCal in SPM or fslmaths + in FSL. + + """ + try: + # Check that input images are valid niimg and have a compatible shape + # and affine. + niimgs = [] + for image in imgs.values(): + niimgs.append(check_niimg(image)) + _check_same_fov(*niimgs, raise_error=True) + except Exception as exc: + exc.args = (("Input images cannot be compared, you provided '{0}'," + .format(imgs.values()),) + exc.args) + raise + + # Computing input data as a dictionary of numpy arrays. Keep a reference + # niimg for building the result as a new niimg. + niimg = None + data_dict = {} + for key, img in imgs.items(): + niimg = check_niimg(img) + data_dict[key] = _safe_get_data(niimg) + + # Add a reference to numpy in the kwargs of eval so that numpy functions + # can be called from there. + data_dict['np'] = np + try: + result = eval(formula, data_dict) + except Exception as exc: + exc.args = (("Input formula couldn't be processed, you provided '{0}'," + .format(formula),) + exc.args) + raise + + return new_img_like(niimg, result, niimg.affine) + + +def clean_img(imgs, sessions=None, detrend=True, standardize=True, + confounds=None, low_pass=None, high_pass=None, t_r=2.5, + ensure_finite=False): + """Improve SNR on masked fMRI signals. + + This function can do several things on the input signals, in + the following order: + + - detrend + - standardize + - remove confounds + - low- and high-pass filter + + Low-pass filtering improves specificity. + + High-pass filtering should be kept small, to keep some + sensitivity. + + Filtering is only meaningful on evenly-sampled signals. + + .. versionadded:: 0.2.5 + + Parameters + ---------- + imgs: Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + 4D image. The signals in the last dimension are filtered. + + sessions : numpy array, optional + Add a session level to the cleaning process. Each session will be + cleaned independently. Must be a 1D array of n_samples elements. + + detrend: bool + If detrending should be applied on timeseries (before + confound removal) + + standardize: bool + If True, returned signals are set to unit variance. + + confounds: numpy.ndarray, str or list of + Confounds timeseries. Shape must be + (instant number, confound number), or just (instant number,) + The number of time instants in signals and confounds must be + identical (i.e. signals.shape[0] == confounds.shape[0]). + If a string is provided, it is assumed to be the name of a csv file + containing signals as columns, with an optional one-line header. + If a list is provided, all confounds are removed from the input + signal, as if all were in the same array. + + low_pass, high_pass: float + Respectively low and high cutoff frequencies, in Hertz. + + t_r: float, optional + Repetition time, in second (sampling period). + + ensure_finite: bool, optional + If True, the non-finite values (NaNs and infs) found in the images + will be replaced by zeros. + + Returns + ------- + cleaned_img: Niimg-like object + Input images, cleaned. Same shape as `imgs`. + + Notes + ----- + Confounds removal is based on a projection on the orthogonal + of the signal space. See `Friston, K. J., A. P. Holmes, + K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak. + "Statistical Parametric Maps in Functional Imaging: A General + Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210. + `_ + + See Also + -------- + nilearn.signal.clean + """ + # Avoid circular import + from .image import new_img_like + + imgs_ = check_niimg_4d(imgs) + data = signal.clean( + imgs_.get_data().reshape(-1, imgs_.shape[-1]).T, sessions=sessions, + detrend=detrend, standardize=standardize, confounds=confounds, + low_pass=low_pass, high_pass=high_pass, t_r=t_r, + ensure_finite=ensure_finite).T.reshape(imgs_.shape) + return new_img_like(imgs, data, copy_header=True) + + +def load_img(img, wildcards=True, dtype=None): + """Load a Niimg-like object from filenames or list of filenames. + + .. versionadded:: 0.2.5 + + Parameters + ---------- + img: Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + If niimg is a string, consider it as a path to Nifti image and + call nibabel.load on it. The '~' symbol is expanded to the user home + folder. + If it is an object, check if get_data() + and affine attributes are present, raise TypeError otherwise. + + wildcards: bool, optional + Use niimg as a regular expression to get a list of matching input + filenames. + If multiple files match, the returned list is sorted using an ascending + order. + If no file matches the regular expression, a ValueError exception is + raised. + + dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + + Returns + ------- + result: 3D/4D Niimg-like object + Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed + that the returned object has get_data() and affine attributes. + """ + return check_niimg(img, wildcards=wildcards, dtype=dtype) + + +def largest_connected_component_img(imgs): + """ Return the largest connected component of an image or list of images. + + .. versionadded:: 0.3.1 + + Parameters + ---------- + imgs: Niimg-like object or iterable of Niimg-like objects (3D) + See http://nilearn.github.io/manipulating_images/input_output.html + Image(s) to extract the largest connected component from. + + Returns + ------- + img or list of img containing the largest connected component + + Notes + ----- + + **Handling big-endian in given Nifti image** + This function changes the existing byte-ordering information to new byte + order, if the dtype in given Nifti image has non-native data type. + This operation is done internally to avoid big-endian issues with + scipy ndimage module. + """ + from .._utils.ndimage import largest_connected_component + + if hasattr(imgs, "__iter__") and not isinstance(imgs, _basestring): + single_img = False + else: + single_img = True + imgs = [imgs] + + ret = [] + for img in imgs: + img = check_niimg_3d(img) + affine = img.affine + largest_component = largest_connected_component(_safe_get_data(img)) + ret.append(new_img_like(img, largest_component, affine, + copy_header=True)) + + if single_img: + return ret[0] + else: + return ret diff --git a/nilearn/image/resampling.py b/nilearn/image/resampling.py index c5730072ad..cada507d74 100644 --- a/nilearn/image/resampling.py +++ b/nilearn/image/resampling.py @@ -1,12 +1,13 @@ """ Utilities to resample a Niimg-like object -See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. +See http://nilearn.github.io/manipulating_images/input_output.html """ # Author: Gael Varoquaux, Alexandre Abraham, Michael Eickenberg # License: simplified BSD import warnings from distutils.version import LooseVersion +import numbers import numpy as np import scipy @@ -90,35 +91,56 @@ def coord_transform(x, y, z, affine): """ Convert the x, y, z coordinates from one image space to another space. - Parameters - ---------- - x : number or ndarray - The x coordinates in the input space - y : number or ndarray - The y coordinates in the input space - z : number or ndarray - The z coordinates in the input space - affine : 2D 4x4 ndarray - affine that maps from input to output space. + Parameters + ---------- + x : number or ndarray (any shape) + The x coordinates in the input space + y : number or ndarray (same shape as x) + The y coordinates in the input space + z : number or ndarray + The z coordinates in the input space + affine : 2D 4x4 ndarray + affine that maps from input to output space. + + Returns + ------- + x : number or ndarray (same shape as input) + The x coordinates in the output space + y : number or ndarray (same shape as input) + The y coordinates in the output space + z : number or ndarray (same shape as input) + The z coordinates in the output space + + Warning: The x, y and z have their output space (e.g. MNI) coordinate + ordering, not 3D numpy image ordering. + + Examples + -------- + Transform data from coordinates to brain space. The "affine" matrix + can be found as the ".affine" attribute of a nifti image, or using + the "get_affine()" method for older nibabel installations:: + + >>> from nilearn import datasets, image + >>> niimg = datasets.load_mni152_template() + >>> # Find the MNI coordinates of the voxel (10, 10, 10) + >>> image.coord_transform(50, 50, 50, niimg.affine) + (-10.0, -26.0, 28.0) - Returns - ------- - x : number or ndarray - The x coordinates in the output space - y : number or ndarray - The y coordinates in the output space - z : number or ndarray - The z coordinates in the output space - - Warning: The x, y and z have their Talairach ordering, not 3D - numy image ordering. """ + squeeze = (not hasattr(x, '__iter__')) + return_number = isinstance(x, numbers.Number) + x = np.asanyarray(x) + shape = x.shape coords = np.c_[np.atleast_1d(x).flat, np.atleast_1d(y).flat, np.atleast_1d(z).flat, np.ones_like(np.atleast_1d(z).flat)].T x, y, z, _ = np.dot(affine, coords) - return x.squeeze(), y.squeeze(), z.squeeze() + if return_number: + return x.item(), y.item(), z.item() + if squeeze: + return x.squeeze(), y.squeeze(), z.squeeze() + return np.reshape(x, shape), np.reshape(y, shape), np.reshape(z, shape) def get_bounds(shape, affine): @@ -127,7 +149,7 @@ def get_bounds(shape, affine): The coordinates returned correspond to the **center** of the corner voxels. Parameters - ========== + ---------- shape: tuple shape of the array. Must have 3 integer values. @@ -136,7 +158,7 @@ def get_bounds(shape, affine): and world-space coordinates. Returns - ======= + ------- coord: list of tuples coord[i] is a 2-tuple giving minimal and maximal coordinates along i-th axis. @@ -164,7 +186,7 @@ def get_mask_bounds(img): Parameters ---------- img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html The image to inspect. Zero values are considered as background. @@ -185,7 +207,7 @@ def get_mask_bounds(img): """ img = _utils.check_niimg_3d(img) mask = _utils.numpy_conversions._asarray(img.get_data(), dtype=np.bool) - affine = img.get_affine() + affine = img.affine (xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(mask.shape, affine) slices = ndimage.find_objects(mask) if len(slices) == 0: @@ -215,7 +237,7 @@ class BoundingBoxError(ValueError): ############################################################################### # Resampling -def _resample_one_img(data, A, A_inv, b, target_shape, +def _resample_one_img(data, A, b, target_shape, interpolation_order, out, copy=True): "Internal function for resample_img, do not use" if data.dtype.kind in ('i', 'u'): @@ -238,44 +260,40 @@ def _resample_one_img(data, A, A_inv, b, target_shape, data = _extrapolate_out_mask(data, np.logical_not(not_finite), iterations=2)[0] - # See https://github.com/nilearn/nilearn/issues/346 Copying the - # array makes it C continuous and as such the int32 index in the C - # code is a lot less likely to overflow - if (LooseVersion(scipy.__version__) < LooseVersion('0.14.1')): - data = data.copy() - - # The resampling itself - ndimage.affine_transform(data, A, - offset=np.dot(A_inv, b), - output_shape=target_shape, - output=out, - order=interpolation_order) - - # Bug in ndimage.affine_transform when out does not have native endianness - # see https://github.com/nilearn/nilearn/issues/275 - # Bug was fixed in scipy 0.15 - if (LooseVersion(scipy.__version__) < LooseVersion('0.15') and - not out.dtype.isnative): - out.byteswap(True) + # Suppresses warnings in https://github.com/nilearn/nilearn/issues/1363 + with warnings.catch_warnings(): + if LooseVersion(scipy.__version__) >= LooseVersion('0.18'): + warnings.simplefilter("ignore", UserWarning) + # The resampling itself + ndimage.affine_transform(data, A, + offset=b, + output_shape=target_shape, + output=out, + order=interpolation_order) if has_not_finite: - # We need to resample the mask of not_finite values - not_finite = ndimage.affine_transform(not_finite, A, - offset=np.dot(A_inv, b), - output_shape=target_shape, - order=0) + # Suppresses warnings in https://github.com/nilearn/nilearn/issues/1363 + with warnings.catch_warnings(): + if LooseVersion(scipy.__version__) >= LooseVersion('0.18'): + warnings.simplefilter("ignore", UserWarning) + # We need to resample the mask of not_finite values + not_finite = ndimage.affine_transform(not_finite, A, + offset=b, + output_shape=target_shape, + order=0) out[not_finite] = np.nan return out def resample_img(img, target_affine=None, target_shape=None, - interpolation='continuous', copy=True, order="F"): + interpolation='continuous', copy=True, order="F", + clip=True): """Resample a Niimg-like object Parameters ---------- img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Image(s) to resample. target_affine: numpy.ndarray, optional @@ -289,7 +307,8 @@ def resample_img(img, target_affine=None, target_shape=None, must also be given. (See notes) interpolation: str, optional - Can be 'continuous' (default) or 'nearest'. Indicate the resample method + Can be 'continuous' (default), 'linear', or 'nearest'. Indicates the resample + method. copy: bool, optional If True, guarantees that output array has no memory in common with @@ -300,14 +319,25 @@ def resample_img(img, target_affine=None, target_shape=None, Data ordering in output array. This function is slightly faster with Fortran ordering. + clip: bool, optional + If True (default) all resampled image values above max(img) and + under min(img) are clipped to min(img) and max(img). Note that + 0 is added as an image value for clipping, and it is the padding + value when extrapolating out of field of view. + If False no clip is preformed. + Returns - ======= + ------- resampled: nibabel.Nifti1Image input image, resampled to have respectively target_shape and target_affine as shape and affine. + See Also + -------- + nilearn.image.resample_to_img + Notes - ===== + ----- **BoundingBoxError** If a 4x4 transformation matrix (target_affine) is given and all of the @@ -338,6 +368,15 @@ def resample_img(img, target_affine=None, target_shape=None, **NaNs and infinite values** This function handles gracefully NaNs and infinite values in the input data, however they make the execution of the function much slower. + + **Handling non-native endian in given Nifti images** + This function automatically changes the byte-ordering information + in the image dtype to new byte order. From non-native to native, which + implies that if the given image has non-native endianess then the output + data in Nifti image will have native dtype. This is only the case when + if the given target_affine (transformation matrix) is diagonal and + homogenous. + """ from .image import new_img_like # avoid circular imports @@ -358,10 +397,12 @@ def resample_img(img, target_affine=None, target_shape=None, if interpolation == 'continuous': interpolation_order = 3 + elif interpolation == 'linear': + interpolation_order = 1 elif interpolation == 'nearest': interpolation_order = 0 else: - message = ("interpolation must be either 'continuous' " + message = ("interpolation must be either 'continuous', 'linear' " "or 'nearest' but it was set to '{0}'").format(interpolation) raise ValueError(message) @@ -382,7 +423,7 @@ def resample_img(img, target_affine=None, target_shape=None, target_affine = np.asarray(target_affine) shape = img.shape - affine = img.get_affine() + affine = img.affine if (np.all(np.array(target_shape) == shape[:3]) and np.allclose(target_affine, affine)): @@ -440,13 +481,14 @@ def resample_img(img, target_affine=None, target_shape=None, else: transform_affine = np.dot(linalg.inv(affine), target_affine) A, b = to_matrix_vector(transform_affine) - A_inv = linalg.inv(A) # If A is diagonal, ndimage.affine_transform is clever enough to use a # better algorithm. if np.all(np.diag(np.diag(A)) == A): + if LooseVersion(scipy.__version__) < LooseVersion('0.18'): + # Before scipy 0.18, ndimage.affine_transform was applying a + # different logic to the offset for diagonal affine + b = np.dot(linalg.inv(A), b) A = np.diag(A) - else: - b = np.dot(A, b) data_shape = list(data.shape) # Make sure that we have a list here @@ -465,6 +507,19 @@ def resample_img(img, target_affine=None, target_shape=None, else: resampled_data_dtype = data.dtype + # Since the release of 0.17, resampling nifti images have some issues + # when affine is passed as 1D array and if data is of non-native + # endianess. + # See issue https://github.com/nilearn/nilearn/issues/1445. + # If affine is passed as 1D, scipy uses _nd_image.zoom_shift rather + # than _geometric_transform (2D) where _geometric_transform is able + # to swap byte order in scipy later than 0.15 for nonnative endianess. + + # We convert to 'native' order to not have any issues either with + # 'little' or 'big' endian data dtypes (non-native endians). + if len(A.shape) == 1 and not resampled_data_dtype.isnative: + resampled_data_dtype = resampled_data_dtype.newbyteorder('N') + # Code is generic enough to work for both 3D and 4D images other_shape = data_shape[3:] resampled_data = np.empty(list(target_shape) + other_shape, @@ -472,18 +527,87 @@ def resample_img(img, target_affine=None, target_shape=None, all_img = (slice(None), ) * 3 - # Iter overr a set of 3D volumes, as the interpolation problem is + # Iterate over a set of 3D volumes, as the interpolation problem is # separable in the extra dimensions. This reduces the # computational cost for ind in np.ndindex(*other_shape): - _resample_one_img(data[all_img + ind], A, A_inv, b, target_shape, + _resample_one_img(data[all_img + ind], A, b, target_shape, interpolation_order, out=resampled_data[all_img + ind], copy=not input_img_is_string) + if clip: + # force resampled data to have a range contained in the original data + # preventing ringing artefact + # We need to add zero as a value considered for clipping, as it + # appears in padding images. + vmin = min(data.min(), 0) + vmax = max(data.max(), 0) + resampled_data.clip(vmin, vmax, out=resampled_data) + return new_img_like(img, resampled_data, target_affine) +def resample_to_img(source_img, target_img, + interpolation='continuous', copy=True, order='F', clip=False): + """Resample a Niimg-like source image on a target Niimg-like image + (no registration is performed: the image should already be aligned). + + .. versionadded:: 0.2.4 + + Parameters + ---------- + source_img: Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + Image(s) to resample. + + target_img: Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + Reference image taken for resampling. + + interpolation: str, optional + Can be 'continuous' (default), 'linear', or 'nearest'. Indicates the resample + method. + + copy: bool, optional + If True, guarantees that output array has no memory in common with + input array. + In all cases, input images are never modified by this function. + + order: "F" or "C" + Data ordering in output array. This function is slightly faster with + Fortran ordering. + + clip: bool, optional + If False (default) no clip is preformed. + If True all resampled image values above max(img) and under min(img) are + clipped to min(img) and max(img) + + Returns + ------- + resampled: nibabel.Nifti1Image + input image, resampled to have respectively target image shape and + affine as shape and affine. + + See Also + -------- + nilearn.image.resample_img + """ + + target = _utils.check_niimg(target_img) + target_shape = target.shape + + # When target shape is greater than 3, we reduce to 3, to be compatible + # with underlying call to resample_img + if len(target_shape) > 3: + target_shape = target.shape[:3] + + return resample_img(source_img, + target_affine=target.affine, + target_shape=target_shape, + interpolation=interpolation, copy=copy, order=order, clip=clip) + + def reorder_img(img, resample=None): """Returns an image with the affine diagonal (by permuting axes). The orientation of the new image will be RAS (Right, Anterior, Superior). @@ -493,10 +617,10 @@ def reorder_img(img, resample=None): Parameters ----------- img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Image to reorder. - resample: None or string in {'continuous', 'nearest'}, optional + resample: None or string in {'continuous', 'linear', 'nearest'}, optional If resample is None (default), no resampling is performed, the axes are only permuted. Otherwise resampling is performed and 'resample' will @@ -509,7 +633,7 @@ def reorder_img(img, resample=None): img = _utils.check_niimg(img) # The copy is needed in order not to modify the input img affine # see https://github.com/nilearn/nilearn/issues/325 for a concrete bug - affine = img.get_affine().copy() + affine = img.affine.copy() A, b = to_matrix_vector(affine) if not np.all((np.abs(A) > 0.001).sum(axis=0) == 1): @@ -549,13 +673,13 @@ def reorder_img(img, resample=None): else: slice1 = slice(None, None, None) if pixdim[1] < 0: - b[1] = b[1] + 1 + pixdim[1]*(data.shape[1] - 1) + b[1] = b[1] + pixdim[1]*(data.shape[1] - 1) pixdim[1] = -pixdim[1] slice2 = slice(None, None, -1) else: slice2 = slice(None, None, None) if pixdim[2] < 0: - b[2] = b[2] + 1 + pixdim[2]*(data.shape[2] - 1) + b[2] = b[2] + pixdim[2]*(data.shape[2] - 1) pixdim[2] = -pixdim[2] slice3 = slice(None, None, -1) else: diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index 6055e54c55..caaf95287a 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -1,16 +1,20 @@ """ Test image pre-processing functions """ -from nose.tools import assert_true, assert_false -from distutils.version import LooseVersion +from nose.tools import assert_true, assert_false, assert_equal, assert_raises from nose import SkipTest import platform import os +import sys import nibabel +from nibabel import Nifti1Image import numpy as np from numpy.testing import assert_array_equal, assert_allclose +from nilearn._utils.testing import assert_raises_regex, assert_warns +from nilearn._utils.exceptions import DimensionError +from nilearn import signal from nilearn.image import image from nilearn.image import resampling from nilearn.image import concat_imgs @@ -18,6 +22,13 @@ from nilearn.image import new_img_like from nilearn.image import threshold_img from nilearn.image import iter_img +from nilearn.image import math_img +from nilearn.image import largest_connected_component_img + +try: + import pandas as pd +except Exception: + pass X64 = (platform.architecture()[0] == '64bit') @@ -122,6 +133,15 @@ def test__smooth_array(): np.testing.assert_equal(image._smooth_array(data, affine, fwhm='fast'), image._fast_smooth_array(data)) + # Check corner case when fwhm=0. See #1537 + # Test whether function _smooth_array raises a warning when fwhm=0. + assert_warns(UserWarning, image._smooth_array, data, affine, fwhm=0.) + + # Test output equal when fwhm=None and fwhm=0 + out_fwhm_none = image._smooth_array(data, affine, fwhm=None) + out_fwhm_zero = image._smooth_array(data, affine, fwhm=0.) + assert_array_equal(out_fwhm_none, out_fwhm_zero) + def test_smooth_img(): # This function only checks added functionalities compared @@ -150,6 +170,23 @@ def test_smooth_img(): assert_true(isinstance(out, nibabel.Nifti1Image)) assert_true(out.shape == (shapes[0] + (lengths[0],))) + # Check corner case situations when fwhm=0, See issue #1537 + # Test whether function smooth_img raises a warning when fwhm=0. + assert_warns(UserWarning, image.smooth_img, img1, fwhm=0.) + + # Test output equal when fwhm=None and fwhm=0 + out_fwhm_none = image.smooth_img(img1, fwhm=None) + out_fwhm_zero = image.smooth_img(img1, fwhm=0.) + assert_array_equal(out_fwhm_none.get_data(), out_fwhm_zero.get_data()) + + data1 = np.zeros((10, 11, 12)) + data1[2:4, 1:5, 3:6] = 1 + data2 = np.zeros((13, 14, 15)) + data2[2:4, 1:5, 3:6] = 9 + img1_nifti2 = nibabel.Nifti2Image(data1, affine=np.eye(4)) + img2_nifti2 = nibabel.Nifti2Image(data2, affine=np.eye(4)) + out = image.smooth_img([img1_nifti2, img2_nifti2], fwhm=1.) + def test__crop_img_to(): data = np.zeros((5, 6, 7)) @@ -167,7 +204,7 @@ def test__crop_img_to(): assert_true(cropped_img.shape == (2, 4, 3)) # check that affine was adjusted correctly - assert_true((cropped_img.get_affine()[:3, 3] == new_origin).all()) + assert_true((cropped_img.affine[:3, 3] == new_origin).all()) # check that data was really not copied data[2:4, 1:5, 3:6] = 2 @@ -238,13 +275,13 @@ def test_mean_img(): truth = np.mean(arrays, axis=0) mean_img = image.mean_img(imgs) - assert_array_equal(mean_img.get_affine(), affine) + assert_array_equal(mean_img.affine, affine) assert_array_equal(mean_img.get_data(), truth) # Test with files with testing.write_tmp_imgs(*imgs) as imgs: mean_img = image.mean_img(imgs) - assert_array_equal(mean_img.get_affine(), affine) + assert_array_equal(mean_img.affine, affine) if X64: assert_array_equal(mean_img.get_data(), truth) else: @@ -257,7 +294,6 @@ def test_mean_img(): atol=0) - def test_mean_img_resample(): # Test resampling in mean_img with a permutation of the axes rng = np.random.RandomState(42) @@ -273,9 +309,9 @@ def test_mean_img_resample(): target_affine=target_affine) assert_array_equal(resampled_mean_image.get_data(), mean_img_with_resampling.get_data()) - assert_array_equal(resampled_mean_image.get_affine(), - mean_img_with_resampling.get_affine()) - assert_array_equal(mean_img_with_resampling.get_affine(), target_affine) + assert_array_equal(resampled_mean_image.affine, + mean_img_with_resampling.affine) + assert_array_equal(mean_img_with_resampling.affine, target_affine) def test_swap_img_hemispheres(): @@ -301,7 +337,10 @@ def test_concat_imgs(): def test_index_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) - testing.assert_raises_regex(TypeError, '4D Niimg-like', + testing.assert_raises_regex(TypeError, + "Input data has incompatible dimensionality: " + "Expected dimension is 4D and you provided " + "a 3D image.", image.index_img, img_3d, 0) affine = np.array([[1., 2., 3., 4.], @@ -319,21 +358,47 @@ def test_index_img(): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(this_img.get_data(), expected_data_3d) - assert_array_equal(this_img.get_affine(), - img_4d.get_affine()) + assert_array_equal(this_img.affine, img_4d.affine) for i in [fourth_dim_size, - fourth_dim_size - 1, [0, fourth_dim_size], np.repeat(True, fourth_dim_size + 1)]: testing.assert_raises_regex( IndexError, - 'out of bounds|invalid index|out of range', + 'out of bounds|invalid index|out of range|boolean index', image.index_img, img_4d, i) +def test_pd_index_img(): + # confirm indices from pandas dataframes are handled correctly + if 'pandas' not in sys.modules: + raise SkipTest + + affine = np.array([[1., 2., 3., 4.], + [5., 6., 7., 8.], + [9., 10., 11., 12.], + [0., 0., 0., 1.]]) + img_4d, _ = testing.generate_fake_fmri(affine=affine) + + fourth_dim_size = img_4d.shape[3] + + rng = np.random.RandomState(0) + + arr = rng.rand(fourth_dim_size) > 0.5 + df = pd.DataFrame({"arr": arr}) + + np_index_img = image.index_img(img_4d, arr) + pd_index_img = image.index_img(img_4d, df) + assert_array_equal(np_index_img.get_data(), + pd_index_img.get_data()) + + def test_iter_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) - testing.assert_raises_regex(TypeError, '4D Niimg-like', + testing.assert_raises_regex(TypeError, + "Input data has incompatible dimensionality: " + "Expected dimension is 4D and you provided " + "a 3D image.", image.iter_img, img_3d) affine = np.array([[1., 2., 3., 4.], @@ -346,16 +411,14 @@ def test_iter_img(): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) - assert_array_equal(img.get_affine(), - img_4d.get_affine()) + assert_array_equal(img.affine, img_4d.affine) with testing.write_tmp_imgs(img_4d) as img_4d_filename: for i, img in enumerate(image.iter_img(img_4d_filename)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) - assert_array_equal(img.get_affine(), - img_4d.get_affine()) + assert_array_equal(img.affine, img_4d.affine) # enables to delete "img_4d_filename" on windows del img @@ -364,16 +427,14 @@ def test_iter_img(): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) - assert_array_equal(img.get_affine(), - img_4d.get_affine()) + assert_array_equal(img.affine, img_4d.affine) with testing.write_tmp_imgs(*img_3d_list) as img_3d_filenames: for i, img in enumerate(image.iter_img(img_3d_filenames)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) - assert_array_equal(img.get_affine(), - img_4d.get_affine()) + assert_array_equal(img.affine, img_4d.affine) # enables to delete "img_3d_filename" on windows del img @@ -384,13 +445,9 @@ def test_new_img_like_mgz(): when using plot_stap_map """ - if not LooseVersion(nibabel.__version__) >= LooseVersion('1.2.0'): - # Old nibabel do not support MGZ files - raise SkipTest - ref_img = nibabel.load(os.path.join(datadir, 'test.mgz')) data = np.ones(ref_img.get_data().shape, dtype=np.bool) - affine = ref_img.get_affine() + affine = ref_img.affine new_img_like(ref_img, data, affine, copy_header=False) @@ -403,16 +460,20 @@ def test_new_img_like(): img2 = new_img_like([img, ], data) np.testing.assert_array_equal(img.get_data(), img2.get_data()) + # test_new_img_like_with_nifti2image_copy_header + img_nifti2 = nibabel.Nifti2Image(data, affine=affine) + img2_nifti2 = new_img_like([img_nifti2, ], data, copy_header=True) + np.testing.assert_array_equal(img_nifti2.get_data(), img2_nifti2.get_data()) + def test_validity_threshold_value_in_threshold_img(): shape = (6, 8, 10) - maps = testing.generate_maps(shape, n_regions=2) - map_0 = maps[0] + maps, _ = testing.generate_maps(shape, n_regions=2) # testing to raise same error when threshold=None case testing.assert_raises_regex(ValueError, "The input parameter 'threshold' is empty. ", - threshold_img, map_0, threshold=None) + threshold_img, maps, threshold=None) invalid_threshold_values = ['90t%', 's%', 't', '0.1'] name = 'threshold' @@ -420,21 +481,163 @@ def test_validity_threshold_value_in_threshold_img(): testing.assert_raises_regex(ValueError, '{0}.+should be a number followed by ' 'the percent sign'.format(name), - threshold_img, map_0, threshold=thr) + threshold_img, maps, threshold=thr) def test_threshold_img(): # to check whether passes with valid threshold inputs shape = (10, 20, 30) - maps = testing.generate_maps(shape, n_regions=4) - map_0 = maps[0] + maps, _ = testing.generate_maps(shape, n_regions=4) affine = np.eye(4) mask_img = nibabel.Nifti1Image(np.ones((shape), dtype=np.int8), affine) - for img in iter_img(map_0): + for img in iter_img(maps): # when threshold is a float value thr_maps_img = threshold_img(img, threshold=0.8) # when we provide mask image thr_maps_percent = threshold_img(img, threshold=1, mask_img=mask_img) # when threshold is a percentile thr_maps_percent2 = threshold_img(img, threshold='2%') + + +def test_isnan_threshold_img_data(): + shape = (10, 10, 10) + maps, _ = testing.generate_maps(shape, n_regions=2) + data = maps.get_data() + data[:, :, 0] = np.nan + + maps_img = nibabel.Nifti1Image(data, np.eye(4)) + # test threshold_img to converge properly when input image has nans. + threshold_img(maps_img, threshold=0.8) + + +def test_math_img_exceptions(): + img1 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) + img2 = Nifti1Image(np.zeros((10, 20, 10, 10)), np.eye(4)) + img3 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) + img4 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4) * 2) + + formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" + # Images with different shapes should raise a ValueError exception. + assert_raises_regex(ValueError, + "Input images cannot be compared", + math_img, formula, img1=img1, img2=img2) + + # Images with different affines should raise a ValueError exception. + assert_raises_regex(ValueError, + "Input images cannot be compared", + math_img, formula, img1=img1, img2=img4) + + bad_formula = "np.toto(img1, axis=-1) - np.mean(img3, axis=-1)" + assert_raises_regex(AttributeError, + "Input formula couldn't be processed", + math_img, bad_formula, img1=img1, img3=img3) + + +def test_math_img(): + img1 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) + img2 = Nifti1Image(np.zeros((10, 10, 10, 10)), np.eye(4)) + expected_result = Nifti1Image(np.ones((10, 10, 10)), np.eye(4)) + + formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" + for create_files in (True, False): + with testing.write_tmp_imgs(img1, img2, + create_files=create_files) as imgs: + result = math_img(formula, img1=imgs[0], img2=imgs[1]) + assert_array_equal(result.get_data(), + expected_result.get_data()) + assert_array_equal(result.affine, expected_result.affine) + assert_equal(result.shape, expected_result.shape) + + +def test_clean_img(): + + rng = np.random.RandomState(0) + + data = rng.randn(10, 10, 10, 100) + .5 + data_flat = data.T.reshape(100, -1) + data_img = nibabel.Nifti1Image(data, np.eye(4)) + + data_img_ = image.clean_img( + data_img, detrend=True, standardize=False, low_pass=0.1) + data_flat_ = signal.clean( + data_flat, detrend=True, standardize=False, low_pass=0.1) + + np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1), + data_flat_) + # if NANs + data[:, 9, 9] = np.nan + # if infinity + data[:, 5, 5] = np.inf + nan_img = nibabel.Nifti1Image(data, np.eye(4)) + clean_im = image.clean_img(nan_img, ensure_finite=True) + assert_true(np.any(np.isfinite(clean_im.get_data())), True) + + # test_clean_img_passing_nifti2image + data_img_nifti2 = nibabel.Nifti2Image(data, np.eye(4)) + + data_img_nifti2_ = image.clean_img( + data_img_nifti2, detrend=True, standardize=False, low_pass=0.1) + + +def test_largest_cc_img(): + """ Check the extraction of the largest connected component, for niftis + + Similiar to smooth_img tests for largest connected_component_img, here also + only the added features for largest_connected_component are tested. + """ + + # Test whether dimension of 3Dimg and list of 3Dimgs are kept. + shapes = ((10, 11, 12), (13, 14, 15)) + regions = [1, 3] + + img1 = testing.generate_labeled_regions(shape=shapes[0], + n_regions=regions[0]) + img2 = testing.generate_labeled_regions(shape=shapes[1], + n_regions=regions[1]) + + for create_files in (False, True): + with testing.write_tmp_imgs(img1, img2, + create_files=create_files) as imgs: + # List of images as input + out = largest_connected_component_img(imgs) + assert_true(isinstance(out, list)) + assert_true(len(out) == 2) + for o, s in zip(out, shapes): + assert_true(o.shape == (s)) + + # Single image as input + out = largest_connected_component_img(imgs[0]) + assert_true(isinstance(out, Nifti1Image)) + assert_true(out.shape == (shapes[0])) + + # Test whether 4D Nifti throws the right error. + img_4D = testing.generate_fake_fmri(shapes[0], length=17) + assert_raises(DimensionError, largest_connected_component_img, img_4D) + + # tests adapted to non-native endian data dtype + img1_change_dtype = nibabel.Nifti1Image(img1.get_data().astype('>f8'), + affine=img1.affine) + img2_change_dtype = nibabel.Nifti1Image(img2.get_data().astype('>f8'), + affine=img2.affine) + + for create_files in (False, True): + with testing.write_tmp_imgs(img1_change_dtype, img2_change_dtype, + create_files=create_files) as imgs: + # List of images as input + out = largest_connected_component_img(imgs) + assert_true(isinstance(out, list)) + assert_true(len(out) == 2) + for o, s in zip(out, shapes): + assert_true(o.shape == (s)) + + # Single image as input + out = largest_connected_component_img(imgs[0]) + assert_true(isinstance(out, Nifti1Image)) + assert_true(out.shape == (shapes[0])) + + # Test the output with native and without native + out_native = largest_connected_component_img(img1) + + out_non_native = largest_connected_component_img(img1_change_dtype) + np.testing.assert_equal(out_native.get_data(), out_non_native.get_data()) diff --git a/nilearn/image/tests/test_resampling.py b/nilearn/image/tests/test_resampling.py index 8a4364c80a..4e81b79819 100644 --- a/nilearn/image/tests/test_resampling.py +++ b/nilearn/image/tests/test_resampling.py @@ -14,8 +14,10 @@ from nibabel import Nifti1Image -from nilearn.image.resampling import resample_img, BoundingBoxError, \ - reorder_img, from_matrix_vector, coord_transform +from nilearn.image.resampling import resample_img, resample_to_img, reorder_img +from nilearn.image.resampling import from_matrix_vector, coord_transform +from nilearn.image.resampling import get_bounds +from nilearn.image.resampling import BoundingBoxError from nilearn._utils import testing @@ -88,6 +90,22 @@ def test_identity_resample(): interpolation='nearest') np.testing.assert_almost_equal(data, rot_img.get_data()) + # Test with non native endian data + + # Test with big endian data ('>f8') + for interpolation in ['nearest', 'linear', 'continuous']: + rot_img = resample_img(Nifti1Image(data.astype('>f8'), affine), + target_affine=affine.tolist(), + interpolation=interpolation) + np.testing.assert_almost_equal(data, rot_img.get_data()) + + # Test with little endian data ('f8') + for copy in [True, False]: + rot_img = resample_img(Nifti1Image(data.astype('>f8'), affine), + target_affine=2 * affine, + interpolation='nearest', + copy=copy) + np.testing.assert_almost_equal(downsampled, + rot_img.get_data()[:x, :y, :z, ...]) + + # Little endian data + for copy in [True, False]: + rot_img = resample_img(Nifti1Image(data.astype('f8'), np.eye(4)) + for angle in (0, np.pi, np.pi / 2., np.pi / 4., np.pi / 3.): + rot = rotation(0, angle) + rot_img = resample_img(img, target_affine=rot, + interpolation='nearest') + assert_equal(np.max(data), + np.max(rot_img.get_data())) + def test_resampling_continuous_with_affine(): prng = np.random.RandomState(10) @@ -192,7 +244,7 @@ def test_resampling_error_checks(): assert_false(np.may_share_memory(img_r.get_data(), img.get_data())) np.testing.assert_almost_equal(img_r.get_data(), img.get_data()) - np.testing.assert_almost_equal(img_r.get_affine(), img.get_affine()) + np.testing.assert_almost_equal(img_r.affine, img.affine) img_r = resample_img(img, target_affine=affine, target_shape=target_shape, copy=False) @@ -202,7 +254,7 @@ def test_resampling_error_checks(): copy=True) assert_false(np.may_share_memory(img_r.get_data(), img.get_data())) np.testing.assert_almost_equal(img_r.get_data(), img.get_data()) - np.testing.assert_almost_equal(img_r.get_affine(), img.get_affine()) + np.testing.assert_almost_equal(img_r.affine, img.affine) def test_4d_affine_bounding_box_error(): @@ -230,16 +282,16 @@ def l2_norm(arr): # resample using 4D affine and specified target shape small_to_big_with_shape = resample_img( small_img, - target_affine=bigger_img.get_affine(), + target_affine=bigger_img.affine, target_shape=bigger_img.shape) # resample using 3D affine and no target shape small_to_big_without_shape_3D_affine = resample_img( small_img, - target_affine=bigger_img.get_affine()[:3, :3]) + target_affine=bigger_img.affine[:3, :3]) # resample using 4D affine and no target shape small_to_big_without_shape = resample_img( small_img, - target_affine=bigger_img.get_affine()) + target_affine=bigger_img.affine) # The first 2 should pass assert_almost_equal(l2_norm(small_data), @@ -270,6 +322,31 @@ def test_raises_upon_3x3_affine_and_no_shape(): target_shape=(10, 10, 10)) +def test_3x3_affine_bbox(): + # Test that the bounding-box is properly computed when + # transforming with a negative affine component + # This is specifically to test for a change in behavior between + # scipy < 0.18 and scipy >= 0.18, which is an interaction between + # offset and a diagonal affine + image = np.ones((20, 30)) + source_affine = np.eye(4) + # Give the affine an offset + source_affine[:2, 3] = np.array([96, 64]) + + # We need to turn this data into a nibabel image + img = Nifti1Image(image[:, :, np.newaxis], affine=source_affine) + + target_affine_3x3 = np.eye(3) * 2 + # One negative axes + target_affine_3x3[1] *= -1 + + img_3d_affine = resample_img(img, target_affine=target_affine_3x3) + + # If the bounding box is computed wrong, the image will be only + # zeros + np.testing.assert_allclose(img_3d_affine.get_data().max(), image.max()) + + def test_raises_bbox_error_if_data_outside_box(): # Make some cases which should raise exceptions @@ -421,8 +498,54 @@ def test_resampling_nan(): target_affine=np.eye(4)) resampled_data = resampled_img.get_data() - np.testing.assert_allclose(10, - resampled_data[np.isfinite(resampled_data)]) + np.testing.assert_allclose(10, resampled_data[np.isfinite(resampled_data)]) + + +def test_resample_to_img(): + # Testing resample to img function + rand_gen = np.random.RandomState(0) + shape = (6, 3, 6, 3) + data = rand_gen.random_sample(shape) + + source_affine = np.eye(4) + source_img = Nifti1Image(data, source_affine) + + target_affine = 2 * source_affine + target_img = Nifti1Image(data, target_affine) + + + result_img = resample_to_img(source_img, target_img, + interpolation='nearest') + + downsampled = data[::2, ::2, ::2, ...] + x, y, z = downsampled.shape[:3] + np.testing.assert_almost_equal(downsampled, + result_img.get_data()[:x, :y, :z, ...]) + +def test_resample_clip(): + # Resample and image and get larger and smaller + # value than in the original. Use clip to get rid of these images + + shape = (6, 3, 6) + data = np.zeros(shape=shape) + data[1:-2, 1:-1, 1:-2] = 1 + + source_affine = np.diag((2, 2, 2, 1)) + source_img = Nifti1Image(data, source_affine) + + target_affine = np.eye(4) + no_clip_data = resample_img(source_img, target_affine, + clip=False).get_data() + clip_data = resample_img(source_img, + target_affine, clip=True).get_data() + + not_clip = np.where((no_clip_data > data.min()) & (no_clip_data < data.max())) + + assert_true(np.any(no_clip_data > data.max())) + assert_true(np.any(no_clip_data < data.min())) + assert_true(np.all(clip_data <= data.max())) + assert_true(np.all(clip_data >= data.min())) + assert_array_equal(no_clip_data[not_clip], clip_data[not_clip]) def test_reorder_img(): @@ -443,10 +566,10 @@ def test_reorder_img(): b = 0.5 * np.array(shape[:3]) new_affine = from_matrix_vector(rot, b) rot_img = resample_img(ref_img, target_affine=new_affine) - np.testing.assert_array_equal(rot_img.get_affine(), new_affine) + np.testing.assert_array_equal(rot_img.affine, new_affine) np.testing.assert_array_equal(rot_img.get_data().shape, shape) reordered_img = reorder_img(rot_img) - np.testing.assert_array_equal(reordered_img.get_affine()[:3, :3], + np.testing.assert_array_equal(reordered_img.affine[:3, :3], np.eye(3)) np.testing.assert_almost_equal(reordered_img.get_data(), data) @@ -465,7 +588,7 @@ def test_reorder_img(): interpolation = 'nearest' reordered_img = reorder_img(ref_img, resample=interpolation) resampled_img = resample_img(ref_img, - target_affine=reordered_img.get_affine(), + target_affine=reordered_img.affine, interpolation=interpolation) np.testing.assert_array_equal(reordered_img.get_data(), resampled_img.get_data()) @@ -491,15 +614,13 @@ def test_reorder_img(): #sample = img.values_in_world(x, y, z) img2 = reorder_img(img) # Check that img has not been changed - np.testing.assert_array_equal(img.get_affine(), - orig_img.get_affine()) + np.testing.assert_array_equal(img.affine, orig_img.affine) np.testing.assert_array_equal(img.get_data(), orig_img.get_data()) # Test that the affine is indeed diagonal: - np.testing.assert_array_equal(img2.get_affine()[:3, :3], - np.diag(np.diag( - img2.get_affine()[:3, :3]))) - assert_true(np.all(np.diag(img2.get_affine()) >= 0)) + np.testing.assert_array_equal(img2.affine[:3, :3], + np.diag(np.diag(img2.affine[:3, :3]))) + assert_true(np.all(np.diag(img2.affine) >= 0)) def test_reorder_img_non_native_endianness(): @@ -527,6 +648,21 @@ def _get_resampled_img(dtype): np.testing.assert_equal(img_1.get_data(), img_2.get_data()) +def test_reorder_img_mirror(): + affine = np.array([ + [-1.1, -0., 0., 0.], + [-0., -1.2, 0., 0.], + [-0., -0., 1.3, 0.], + [0., 0., 0., 1.] + ]) + img = Nifti1Image(np.zeros((4, 6, 8)), affine=affine) + reordered = reorder_img(img) + np.testing.assert_allclose( + get_bounds(reordered.shape, reordered.affine), + get_bounds(img.shape, img.affine), + ) + + def test_coord_transform_trivial(): sform = np.eye(4) x = np.random.random((10,)) @@ -544,6 +680,27 @@ def test_coord_transform_trivial(): np.testing.assert_array_equal(y + 1, y_) np.testing.assert_array_equal(z + 1, z_) + # Test the output in case of one item array + x, y, z = x[:1], y[:1], z[:1] + x_, y_, z_ = coord_transform(x, y, z, sform) + np.testing.assert_array_equal(x + 1, x_) + np.testing.assert_array_equal(y + 1, y_) + np.testing.assert_array_equal(z + 1, z_) + + # Test the output in case of simple items + x, y, z = x[0], y[0], z[0] + x_, y_, z_ = coord_transform(x, y, z, sform) + np.testing.assert_array_equal(x + 1, x_) + np.testing.assert_array_equal(y + 1, y_) + np.testing.assert_array_equal(z + 1, z_) + + # Test the outputs have the same shape as the inputs + x = np.ones((3, 2, 4)) + y = np.ones((3, 2, 4)) + z = np.ones((3, 2, 4)) + x_, y_, z_ = coord_transform(x, y, z, sform) + assert_equal(x.shape, x_.shape) + def test_resample_img_segmentation_fault(): if os.environ.get('APPVEYOR') == 'True': @@ -576,6 +733,6 @@ def test_resampling_with_int_types_no_crash(): for dtype in [np.int, np.int8, np.int16, np.int32, np.int64, np.uint, np.uint8, np.uint16, np.uint32, np.uint64, - np.float32, np.float64, np.float]: + np.float32, np.float64, np.float, '>i8', ' 0: print("[%s] Cleaning extracted signals" % class_name) sessions = parameters.get('sessions') @@ -133,7 +134,7 @@ def transform_single_imgs(self, imgs, confounds=None, copy=True): Parameters ---------- imgs: 3D/4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to process. It must boil down to a 4D image with scans number as last dimension. @@ -156,7 +157,7 @@ def transform(self, imgs, confounds=None): Parameters ---------- imgs: 3D/4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to process. It must boil down to a 4D image with scans number as last dimension. @@ -181,7 +182,7 @@ def fit_transform(self, X, y=None, confounds=None, **fit_params): Parameters ---------- X : Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html y : numpy array of shape [n_samples] Target values. @@ -219,6 +220,7 @@ def fit_transform(self, X, y=None, confounds=None, **fit_params): def inverse_transform(self, X): """ Transform the 2D data matrix back to an image in brain space. """ + self._check_fitted() img = self._cache(masking.unmask)(X, self.mask_img_) # Be robust again memmapping that will create read-only arrays in # internal structures of the header: remove the memmaped array diff --git a/nilearn/input_data/masker_validation.py b/nilearn/input_data/masker_validation.py index 3888ed439a..6ccda67ee7 100644 --- a/nilearn/input_data/masker_validation.py +++ b/nilearn/input_data/masker_validation.py @@ -1,6 +1,9 @@ import warnings +import numpy as np + from .._utils.class_inspect import get_params +from .._utils.cache_mixin import _check_memory from .multi_nifti_masker import MultiNiftiMasker from .nifti_masker import NiftiMasker @@ -49,14 +52,14 @@ def check_embedded_nifti_masker(estimator, multi_subject=True): if multi_subject and hasattr(estimator, 'n_jobs'): # For MultiNiftiMasker only new_masker_params['n_jobs'] = estimator.n_jobs - new_masker_params['memory'] = estimator.memory + new_masker_params['memory'] = _check_memory(estimator.memory) new_masker_params['memory_level'] = max(0, estimator.memory_level - 1) new_masker_params['verbose'] = estimator.verbose # Raising warning if masker override parameters conflict_string = "" for param_key in sorted(estimator_params): - if new_masker_params[param_key] != estimator_params[param_key]: + if np.any(new_masker_params[param_key] != estimator_params[param_key]): conflict_string += ("Parameter {0} :\n" " Masker parameter {1}" " - overriding estimator parameter {2}\n" diff --git a/nilearn/input_data/multi_nifti_masker.py b/nilearn/input_data/multi_nifti_masker.py index 12da4b96f7..f0f5737684 100644 --- a/nilearn/input_data/multi_nifti_masker.py +++ b/nilearn/input_data/multi_nifti_masker.py @@ -4,20 +4,20 @@ # Author: Gael Varoquaux, Alexandre Abraham # License: simplified BSD -import warnings import collections import itertools +import warnings from sklearn.externals.joblib import Memory, Parallel, delayed -from .. import masking -from .. import image from .. import _utils +from .. import image +from .. import masking from .._utils import CacheMixin -from .nifti_masker import NiftiMasker, filter_and_mask +from .._utils.class_inspect import get_params from .._utils.compat import _basestring, izip from .._utils.niimg_conversions import _iter_check_niimg -from .._utils.class_inspect import get_params +from .nifti_masker import NiftiMasker, filter_and_mask class MultiNiftiMasker(NiftiMasker, CacheMixin): @@ -30,7 +30,7 @@ class MultiNiftiMasker(NiftiMasker, CacheMixin): Parameters ---------- mask_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Mask of the data. If not given, a mask is computed in the fit step. Optional parameters can be set using mask_args and mask_strategy to fine tune the mask extraction. @@ -47,11 +47,11 @@ class MultiNiftiMasker(NiftiMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details - low_pass: False or float, optional + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details - high_pass: False or float, optional + high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details @@ -67,12 +67,15 @@ class MultiNiftiMasker(NiftiMasker, CacheMixin): This parameter is passed to image.resample_img. Please see the related documentation for details. - mask_strategy: {'background' or 'epi'}, optional + mask_strategy: {'background', 'epi' or 'template'}, optional The strategy used to compute the mask: use 'background' if your - images present a clear homogeneous background, and 'epi' if they - are raw EPI images. Depending on this value, the mask will be - computed from masking.compute_background_mask or - masking.compute_epi_mask. Default is 'background'. + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'background'. mask_args : dict, optional If mask is None, these are additional parameters passed to @@ -80,6 +83,11 @@ class MultiNiftiMasker(NiftiMasker, CacheMixin): to fine-tune mask computation. Please see the related documentation for details. + dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the @@ -116,7 +124,7 @@ def __init__(self, mask_img=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, - mask_strategy='background', mask_args=None, + mask_strategy='background', mask_args=None, dtype=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0 ): @@ -133,19 +141,23 @@ def __init__(self, mask_img=None, smoothing_fwhm=None, self.target_shape = target_shape self.mask_strategy = mask_strategy self.mask_args = mask_args + self.dtype = dtype self.memory = memory self.memory_level = memory_level self.n_jobs = n_jobs + self.verbose = verbose + self._shelving = False + def fit(self, imgs=None, y=None): """Compute the mask corresponding to the data Parameters ---------- imgs: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Data on which the mask must be calculated. If this is a list, the affine is considered the same for all. """ @@ -173,25 +185,28 @@ def fit(self, imgs=None, y=None): compute_mask = masking.compute_multi_background_mask elif self.mask_strategy == 'epi': compute_mask = masking.compute_multi_epi_mask + elif self.mask_strategy == 'template': + compute_mask = masking.compute_multi_gray_matter_mask else: raise ValueError("Unknown value of mask_strategy '%s'. " - "Acceptable values are 'background' and 'epi'.") - - self.mask_img_ = self._cache(compute_mask, - ignore=['n_jobs', 'verbose', 'memory'])( - imgs, - target_affine=self.target_affine, - target_shape=self.target_shape, - n_jobs=self.n_jobs, - memory=self.memory, - verbose=max(0, self.verbose - 1), - **mask_args) + "Acceptable values are 'background', 'epi' " + "and 'template'.") + + self.mask_img_ = self._cache( + compute_mask, ignore=['n_jobs', 'verbose', 'memory'])( + imgs, + target_affine=self.target_affine, + target_shape=self.target_shape, + n_jobs=self.n_jobs, + memory=self.memory, + verbose=max(0, self.verbose - 1), + **mask_args) else: if imgs is not None: warnings.warn('[%s.fit] Generation of a mask has been' - ' requested (imgs != None) while a mask has' - ' been provided at masker creation. Given mask' - ' will be used.' % self.__class__.__name__) + ' requested (imgs != None) while a mask has' + ' been provided at masker creation. Given mask' + ' will be used.' % self.__class__.__name__) self.mask_img_ = _utils.check_niimg_3d(self.mask_img) # If resampling is requested, resample the mask as well. @@ -206,7 +221,7 @@ def fit(self, imgs=None, y=None): if self.target_affine is not None: self.affine_ = self.target_affine else: - self.affine_ = self.mask_img_.get_affine() + self.affine_ = self.mask_img_.affine # Load data in memory self.mask_img_.get_data() return self @@ -218,7 +233,7 @@ def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1): ---------- imgs_list: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html List of imgs file to prepare. One item per subject. confounds: list of confounds, optional @@ -232,7 +247,7 @@ def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1): n_jobs: integer, optional The number of cpus to use to do the computation. -1 means 'all cpus'. - + Returns ------- region_signals: list of 2D numpy.ndarray @@ -267,16 +282,20 @@ def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1): 'copy']) func = self._cache(filter_and_mask, - ignore=['verbose', 'memory', 'memory_level', 'copy']) + ignore=['verbose', 'memory', 'memory_level', + 'copy'], + shelve=self._shelving) data = Parallel(n_jobs=n_jobs)( delayed(func)(imgs, self.mask_img_, params, - memory_level=self.memory_level, - memory=self.memory, - verbose=self.verbose, - confounds=cfs, - copy=copy) + memory_level=self.memory_level, + memory=self.memory, + verbose=self.verbose, + confounds=cfs, + copy=copy, + dtype=self.dtype + ) for imgs, cfs in izip(niimg_iter, confounds)) - return [d[0] for d in data] + return data def transform(self, imgs, confounds=None): """ Apply mask, spatial and temporal preprocessing @@ -284,7 +303,7 @@ def transform(self, imgs, confounds=None): Parameters ---------- imgs: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Data to be preprocessed confounds: CSV file path or 2D matrix @@ -297,7 +316,7 @@ def transform(self, imgs, confounds=None): preprocessed images """ self._check_fitted() - if not hasattr(imgs, '__iter__')\ - or isinstance(imgs, _basestring): - return self.transform_single_imgs(imgs) + if not hasattr(imgs, '__iter__') \ + or isinstance(imgs, _basestring): + return self.transform_single_imgs(imgs) return self.transform_imgs(imgs, confounds, n_jobs=self.n_jobs) diff --git a/nilearn/input_data/nifti_labels_masker.py b/nilearn/input_data/nifti_labels_masker.py index 925424b4ef..77e104c841 100644 --- a/nilearn/input_data/nifti_labels_masker.py +++ b/nilearn/input_data/nifti_labels_masker.py @@ -40,16 +40,16 @@ class NiftiLabelsMasker(BaseMasker, CacheMixin): clustering. Parameters - ========== + ---------- labels_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Region definitions, as one image of labels. background_label: number, optional Label used in labels_img to represent background. mask_img: Niimg-like object, optional - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Mask to apply to regions before extracting signals. smoothing_fwhm: float, optional @@ -64,11 +64,11 @@ class NiftiLabelsMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details - low_pass: False or float, optional + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details - high_pass: False or float, optional + high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details @@ -76,6 +76,11 @@ class NiftiLabelsMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details + dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + resampling_target: {"data", "labels", None}, optional. Gives which image gives the final shape/size. For example, if `resampling_target` is "data", the atlas is resampled to the @@ -97,14 +102,14 @@ class NiftiLabelsMasker(BaseMasker, CacheMixin): Indicate the level of verbosity. By default, nothing is printed See also - ======== + -------- nilearn.input_data.NiftiMasker """ # memory and memory_level are used by CacheMixin. def __init__(self, labels_img, background_label=0, mask_img=None, smoothing_fwhm=None, standardize=False, detrend=False, - low_pass=None, high_pass=None, t_r=None, + low_pass=None, high_pass=None, t_r=None, dtype=None, resampling_target="data", memory=Memory(cachedir=None, verbose=0), memory_level=1, verbose=0): @@ -121,6 +126,7 @@ def __init__(self, labels_img, background_label=0, mask_img=None, self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r + self.dtype = dtype # Parameters for resampling self.resampling_target = resampling_target @@ -163,8 +169,8 @@ def fit(self, X=None, y=None): "Regions and mask do not have the same shape", mask_img=self.mask_img, labels_img=self.labels_img)) - if not np.allclose(self.mask_img_.get_affine(), - self.labels_img_.get_affine()): + if not np.allclose(self.mask_img_.affine, + self.labels_img_.affine): raise ValueError(_compose_err_msg( "Regions and mask do not have the same affine.", mask_img=self.mask_img, labels_img=self.labels_img)) @@ -173,7 +179,7 @@ def fit(self, X=None, y=None): logger.log("resampling the mask", verbose=self.verbose) self.mask_img_ = image.resample_img( self.mask_img_, - target_affine=self.labels_img_.get_affine(), + target_affine=self.labels_img_.affine, target_shape=self.labels_img_.shape[:3], interpolation="nearest", copy=True) @@ -202,7 +208,7 @@ def transform_single_imgs(self, imgs, confounds=None): Parameters ---------- imgs: 3D/4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to process. It must boil down to a 4D image with scans number as last dimension. @@ -231,13 +237,13 @@ def transform_single_imgs(self, imgs, confounds=None): image.resample_img, func_memory_level=2)( self.labels_img_, interpolation="nearest", target_shape=imgs_.shape[:3], - target_affine=imgs_.get_affine()) + target_affine=imgs_.affine) target_shape = None target_affine = None if self.resampling_target == 'labels': target_shape = self._resampled_labels_img_.shape[:3] - target_affine = self._resampled_labels_img_.get_affine() + target_affine = self._resampled_labels_img_.affine params = get_params(NiftiLabelsMasker, self, ignore=['resampling_target']) @@ -253,6 +259,7 @@ def transform_single_imgs(self, imgs, confounds=None): # Pre-processing params, confounds=confounds, + dtype=self.dtype, # Caching memory=self.memory, memory_level=self.memory_level, @@ -268,13 +275,13 @@ def inverse_transform(self, signals): Any mask given at initialization is taken into account. Parameters - ========== + ---------- signals (2D numpy.ndarray) Signal for each region. shape: (number of scans, number of regions) Returns - ======= + ------- voxel_signals (Nifti1Image) Signal for each voxel shape: (number of scans, number of voxels) @@ -285,5 +292,5 @@ def inverse_transform(self, signals): logger.log("computing image from signals", verbose=self.verbose) return signal_extraction.signals_to_img_labels( - signals, self.labels_img_, self.mask_img_, + signals, self._resampled_labels_img_, self.mask_img_, background_label=self.background_label) diff --git a/nilearn/input_data/nifti_maps_masker.py b/nilearn/input_data/nifti_maps_masker.py index 5acf3bbd52..0cb57567f9 100644 --- a/nilearn/input_data/nifti_maps_masker.py +++ b/nilearn/input_data/nifti_maps_masker.py @@ -7,7 +7,6 @@ from .. import _utils from .._utils import logger, CacheMixin -from .._utils.niimg import _get_data_dtype from .._utils.class_inspect import get_params from .._utils.niimg_conversions import _check_same_fov from .. import image @@ -37,15 +36,18 @@ class NiftiMapsMasker(BaseMasker, CacheMixin): extracted (contrarily to NiftiLabelsMasker). Use case: Summarize brain signals from large-scale networks obtained by prior PCA or ICA. + Note that, Inf or NaN present in the given input images are automatically + put to zero rather than considered as missing data. + Parameters - ========== + ---------- maps_img: 4D niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Set of continuous maps. One representative time course per map is extracted using least square regression. mask_img: 3D niimg-like object, optional - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Mask to apply to regions before extracting signals. allow_overlap: boolean, optional @@ -64,11 +66,11 @@ class NiftiMapsMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details - low_pass: False or float, optional + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details - high_pass: False or float, optional + high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details @@ -76,12 +78,17 @@ class NiftiMapsMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details - resampling_target: {"mask", "maps", None} optional. + dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + + resampling_target: {"mask", "maps", "data", None} optional. Gives which image gives the final shape/size. For example, if `resampling_target` is "mask" then maps_img and images provided to fit() are resampled to the shape and affine of mask_img. "None" means no resampling: if shapes and affines do not match, a ValueError is - raised. Default value: "maps". + raised. Default value: "data". memory: joblib.Memory or str, optional Used to cache the region extraction process. @@ -96,13 +103,13 @@ class NiftiMapsMasker(BaseMasker, CacheMixin): Indicate the level of verbosity. By default, nothing is printed Notes - ===== - With the default value for resampling_target, every 3D image processed by + ----- + If resampling_target is set to "maps", every 3D image processed by transform() will be resampled to the shape of maps_img. It may lead to a - very large memory consumption if the voxel number in labels_img is large. + very large memory consumption if the voxel number in maps_img is large. See also - ======== + -------- nilearn.input_data.NiftiMasker nilearn.input_data.NiftiLabelsMasker """ @@ -111,7 +118,7 @@ class NiftiMapsMasker(BaseMasker, CacheMixin): def __init__(self, maps_img, mask_img=None, allow_overlap=True, smoothing_fwhm=None, standardize=False, detrend=False, - low_pass=None, high_pass=None, t_r=None, + low_pass=None, high_pass=None, t_r=None, dtype=None, resampling_target="data", memory=Memory(cachedir=None, verbose=0), memory_level=0, verbose=0): @@ -130,6 +137,7 @@ def __init__(self, maps_img, mask_img=None, self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r + self.dtype = dtype # Parameters for resampling self.resampling_target = resampling_target @@ -159,7 +167,10 @@ def fit(self, X=None, y=None): _utils._repr_niimgs(self.maps_img)[:200], verbose=self.verbose) - self.maps_img_ = _utils.check_niimg_4d(self.maps_img) + self.maps_img_ = _utils.check_niimg_4d(self.maps_img, dtype=self.dtype) + self.maps_img_ = image.clean_img(self.maps_img_, detrend=False, + standardize=False, + ensure_finite=True) if self.mask_img is not None: logger.log("loading mask from %s" % @@ -179,7 +190,7 @@ def fit(self, X=None, y=None): print("Resampling maps") self.maps_img_ = image.resample_img( self.maps_img_, - target_affine=self.mask_img_.get_affine(), + target_affine=self.mask_img_.affine, target_shape=self.mask_img_.shape, interpolation="continuous", copy=True) @@ -189,7 +200,7 @@ def fit(self, X=None, y=None): print("Resampling mask") self.mask_img_ = image.resample_img( self.mask_img_, - target_affine=self.maps_img_.get_affine(), + target_affine=self.maps_img_.affine, target_shape=self.maps_img_.shape[:3], interpolation="nearest", copy=True) @@ -213,7 +224,7 @@ def transform_single_imgs(self, imgs, confounds=None): Parameters ---------- imgs: 3D/4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to process. It must boil down to a 4D image with scans number as last dimension. @@ -260,7 +271,7 @@ def transform_single_imgs(self, imgs, confounds=None): self._resampled_maps_img_ = self._cache(image.resample_img)( self.maps_img_, interpolation="continuous", target_shape=ref_img.shape[:3], - target_affine=ref_img.get_affine()) + target_affine=ref_img.affine) if (self.mask_img_ is not None and not _check_same_fov(ref_img, self.mask_img_)): @@ -269,14 +280,14 @@ def transform_single_imgs(self, imgs, confounds=None): self._resampled_mask_img_ = self._cache(image.resample_img)( self.mask_img_, interpolation="nearest", target_shape=ref_img.shape[:3], - target_affine=ref_img.get_affine()) + target_affine=ref_img.affine) if not self.allow_overlap: # Check if there is an overlap. # If float, we set low values to 0 - dtype = _get_data_dtype(self._resampled_maps_img_) data = self._resampled_maps_img_.get_data() + dtype = data.dtype if dtype.kind == 'f': data[data < np.finfo(dtype).eps] = 0. @@ -292,7 +303,7 @@ def transform_single_imgs(self, imgs, confounds=None): target_affine = None if self.resampling_target != 'data': target_shape = self._resampled_maps_img_.shape[:3] - target_affine = self._resampled_maps_img_.get_affine() + target_affine = self._resampled_maps_img_.affine params = get_params(NiftiMapsMasker, self, ignore=['resampling_target']) @@ -307,6 +318,7 @@ def transform_single_imgs(self, imgs, confounds=None): # Pre-treatments params, confounds=confounds, + dtype=self.dtype, # Caching memory=self.memory, memory_level=self.memory_level, @@ -322,13 +334,13 @@ def inverse_transform(self, region_signals): Any mask given at initialization is taken into account. Parameters - ========== + ---------- region_signals: 2D numpy.ndarray Signal for each region. shape: (number of scans, number of regions) Returns - ======= + ------- voxel_signals: nibabel.Nifti1Image Signal for each voxel. shape: that of maps. """ diff --git a/nilearn/input_data/nifti_masker.py b/nilearn/input_data/nifti_masker.py index a566b24e6b..aa579e972a 100644 --- a/nilearn/input_data/nifti_masker.py +++ b/nilearn/input_data/nifti_masker.py @@ -5,34 +5,35 @@ # License: simplified BSD from copy import copy as copy_object + from sklearn.externals.joblib import Memory -from .. import masking -from .. import image +from .base_masker import BaseMasker, filter_and_extract from .. import _utils +from .. import image +from .. import masking from .._utils import CacheMixin from .._utils.class_inspect import get_params -from .base_masker import BaseMasker, filter_and_extract -from nilearn._utils.niimg_conversions import _check_same_fov +from .._utils.niimg_conversions import _check_same_fov class _ExtractionFunctor(object): - func_name = 'nifti_masker_extractor' def __init__(self, mask_img_): self.mask_img_ = mask_img_ def __call__(self, imgs): - return masking.apply_mask(imgs, self.mask_img_), imgs.get_affine() + return(masking.apply_mask(imgs, self.mask_img_, + dtype=imgs.get_data_dtype()), imgs.affine) def filter_and_mask(imgs, mask_img_, parameters, memory_level=0, memory=Memory(cachedir=None), verbose=0, confounds=None, - copy=True): - + copy=True, + dtype=None): imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4) # Check whether resampling is truly necessary. If so, crop mask @@ -43,26 +44,26 @@ def filter_and_mask(imgs, mask_img_, parameters, # now we can crop mask_img_ = image.crop_img(mask_img_, copy=False) parameters['target_shape'] = mask_img_.shape - parameters['target_affine'] = mask_img_.get_affine() + parameters['target_affine'] = mask_img_.affine data, affine = filter_and_extract(imgs, _ExtractionFunctor(mask_img_), parameters, memory_level=memory_level, memory=memory, verbose=verbose, - confounds=confounds, copy=copy) + confounds=confounds, copy=copy, + dtype=dtype) # For _later_: missing value removal or imputing of missing data # (i.e. we want to get rid of NaNs, if smoothing must be done # earlier) # Optionally: 'doctor_nan', remove voxels with NaNs, other option # for later: some form of imputation - - return data, affine + return data class NiftiMasker(BaseMasker, CacheMixin): - """Class for masking of Niimg-like objects. + """Applying a mask to extract time-series from Niimg-like objects. NiftiMasker is useful when preprocessing (detrending, standardization, resampling, etc.) of in-mask voxels is necessary. Use case: working with @@ -71,7 +72,7 @@ class NiftiMasker(BaseMasker, CacheMixin): Parameters ---------- mask_img : Niimg-like object, optional - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Mask for the data. If not given, a mask is computed in the fit step. Optional parameters (mask_args and mask_strategy) can be set to fine tune the mask extraction. @@ -92,11 +93,11 @@ class NiftiMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details - low_pass : False or float, optional + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details - high_pass : False or float, optional + high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details @@ -112,12 +113,15 @@ class NiftiMasker(BaseMasker, CacheMixin): This parameter is passed to image.resample_img. Please see the related documentation for details. - mask_strategy: {'background' or 'epi'}, optional + mask_strategy: {'background', 'epi' or 'template'}, optional The strategy used to compute the mask: use 'background' if your - images present a clear homogeneous background, and 'epi' if they - are raw EPI images. Depending on this value, the mask will be - computed from masking.compute_background_mask or - masking.compute_epi_mask. Default is 'background'. + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'background'. mask_args : dict, optional If mask is None, these are additional parameters passed to @@ -132,6 +136,11 @@ class NiftiMasker(BaseMasker, CacheMixin): This is useful to perform data subselection as part of a scikit-learn pipeline. + `dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + memory : instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the @@ -160,12 +169,13 @@ class NiftiMasker(BaseMasker, CacheMixin): nilearn.masking.apply_mask nilearn.signal.clean """ + def __init__(self, mask_img=None, sessions=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='background', - mask_args=None, sample_mask=None, + mask_args=None, sample_mask=None, dtype=None, memory_level=1, memory=Memory(cachedir=None), verbose=0 ): @@ -184,11 +194,14 @@ def __init__(self, mask_img=None, sessions=None, smoothing_fwhm=None, self.mask_strategy = mask_strategy self.mask_args = mask_args self.sample_mask = sample_mask + self.dtype = dtype self.memory = memory self.memory_level = memory_level self.verbose = verbose + self._shelving = False + def _check_fitted(self): if not hasattr(self, 'mask_img_'): raise ValueError('It seems that %s has not been fitted. ' @@ -201,7 +214,7 @@ def fit(self, imgs=None, y=None): Parameters ---------- imgs: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Data on which the mask must be calculated. If this is a list, the affine is considered the same for all. """ @@ -210,8 +223,8 @@ def fit(self, imgs=None, y=None): # Load data (if filenames are given, load them) if self.verbose > 0: print("[%s.fit] Loading data from %s" % ( - self.__class__.__name__, - _utils._repr_niimgs(imgs)[:200])) + self.__class__.__name__, + _utils._repr_niimgs(imgs)[:200])) # Compute the mask if not given by the user if self.mask_img is None: @@ -221,10 +234,12 @@ def fit(self, imgs=None, y=None): compute_mask = masking.compute_background_mask elif self.mask_strategy == 'epi': compute_mask = masking.compute_epi_mask + elif self.mask_strategy == 'template': + compute_mask = masking.compute_gray_matter_mask else: raise ValueError("Unknown value of mask_strategy '%s'. " - "Acceptable values are 'background' and " - "'epi'." % self.mask_strategy) + "Acceptable values are 'background', " + "'epi' and 'template'." % self.mask_strategy) if self.verbose > 0: print("[%s.fit] Computing the mask" % self.__class__.__name__) self.mask_img_ = self._cache(compute_mask, ignore=['verbose'])( @@ -244,7 +259,7 @@ def fit(self, imgs=None, y=None): if self.target_affine is not None: self.affine_ = self.target_affine else: - self.affine_ = self.mask_img_.get_affine() + self.affine_ = self.mask_img_.affine # Load data in memory self.mask_img_.get_data() if self.verbose > 10: @@ -257,7 +272,7 @@ def transform_single_imgs(self, imgs, confounds=None, copy=True): Parameters ---------- imgs: 3D/4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to process. It must boil down to a 4D image with scans number as last dimension. @@ -279,13 +294,17 @@ def transform_single_imgs(self, imgs, confounds=None, copy=True): params = get_params(self.__class__, self, ignore=['mask_img', 'mask_args', 'mask_strategy']) - data, _ = self._cache(filter_and_mask, - ignore=['verbose', 'memory', 'memory_level', 'copy'])( - imgs, self.mask_img_, params, - memory_level=self.memory_level, - memory=self.memory, - verbose=self.verbose, - confounds=confounds, - copy=copy + data = self._cache(filter_and_mask, + ignore=['verbose', 'memory', 'memory_level', + 'copy'], + shelve=self._shelving)( + imgs, self.mask_img_, params, + memory_level=self.memory_level, + memory=self.memory, + verbose=self.verbose, + confounds=confounds, + copy=copy, + dtype=self.dtype ) + return data diff --git a/nilearn/input_data/nifti_spheres_masker.py b/nilearn/input_data/nifti_spheres_masker.py index 239b95930f..96d163fa60 100644 --- a/nilearn/input_data/nifti_spheres_masker.py +++ b/nilearn/input_data/nifti_spheres_masker.py @@ -1,6 +1,7 @@ """ -Transformer for computing seeds signals. -======= +Transformer for computing seeds signals +---------------------------------------- + Mask nifti images by spherical volumes for seed-region analyses """ import numpy as np @@ -21,7 +22,7 @@ def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap, mask_img=None): seeds = list(seeds) - affine = niimg.get_affine() + affine = niimg.affine # Compute world coordinates of all in-mask voxels. @@ -31,27 +32,38 @@ def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap, target_shape=niimg.shape[:3], interpolation='nearest') mask, _ = masking._load_mask_img(mask_img) - mask_coords = list(np.where(mask != 0)) + mask_coords = list(zip(*np.where(mask != 0))) X = masking._apply_mask_fmri(niimg, mask_img) else: - mask_coords = list(zip(*np.ndindex(niimg.shape[:3]))) + mask_coords = list(np.ndindex(niimg.shape[:3])) X = niimg.get_data().reshape([-1, niimg.shape[3]]).T - mask_coords = np.asarray(mask_coords) + + # For each seed, get coordinates of nearest voxel + nearests = [] + for sx, sy, sz in seeds: + nearest = np.round(coord_transform(sx, sy, sz, np.linalg.inv(affine))) + nearest = nearest.astype(int) + nearest = (nearest[0], nearest[1], nearest[2]) + try: + nearests.append(mask_coords.index(nearest)) + except ValueError: + nearests.append(None) + + mask_coords = np.asarray(list(zip(*mask_coords))) mask_coords = coord_transform(mask_coords[0], mask_coords[1], mask_coords[2], affine) mask_coords = np.asarray(mask_coords).T - if (radius is not None and - LooseVersion(sklearn.__version__) < LooseVersion('0.16')): - # Fix for scikit learn versions below 0.16. See - # https://github.com/scikit-learn/scikit-learn/issues/4072 - radius += 1e-6 - clf = neighbors.NearestNeighbors(radius=radius) A = clf.fit(mask_coords).radius_neighbors_graph(seeds) A = A.tolil() - # Include selfs + for i, nearest in enumerate(nearests): + if nearest is None: + continue + A[i, nearest] = True + + # Include the voxel containing the seed itself if not masked mask_coords = mask_coords.astype(int).tolist() for i, seed in enumerate(seeds): try: @@ -71,22 +83,21 @@ def _iter_signals_from_spheres(seeds, niimg, radius, allow_overlap, mask_img=None): """Utility function to iterate over spheres. Parameters - ========== + ---------- seeds: List of triplets of coordinates in native space Seed definitions. List of coordinates of the seeds in the same space as the images (typically MNI or TAL). imgs: 3D/4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to process. It must boil down to a 4D image with scans number as last dimension. - radius: float, optional + radius: float Indicates, in millimeters, the radius for the sphere around the seed. - Default is None (signal is extracted on a single voxel). allow_overlap: boolean If False, an error is raised if the maps overlaps (ie at least two - maps have a non-zero value for the same voxel). Default is False. + maps have a non-zero value for the same voxel). mask_img: Niimg-like object, optional - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Mask to apply to regions before extracting signals. """ X, A = _apply_mask_and_get_affinity(seeds, niimg, radius, @@ -102,22 +113,23 @@ class _ExtractionFunctor(object): func_name = 'nifti_spheres_masker_extractor' - def __init__(self, seeds_, radius, mask_img, allow_overlap): + def __init__(self, seeds_, radius, mask_img, allow_overlap, dtype): self.seeds_ = seeds_ self.radius = radius self.mask_img = mask_img self.allow_overlap = allow_overlap + self.dtype = dtype def __call__(self, imgs): n_seeds = len(self.seeds_) - imgs = check_niimg_4d(imgs) + imgs = check_niimg_4d(imgs, dtype=self.dtype) - signals = np.empty((imgs.shape[3], n_seeds)) + signals = np.empty((imgs.shape[3], n_seeds), + dtype=imgs.get_data_dtype()) for i, sphere in enumerate(_iter_signals_from_spheres( self.seeds_, imgs, self.radius, self.allow_overlap, mask_img=self.mask_img)): signals[:, i] = np.mean(sphere, axis=1) - return signals, None @@ -129,7 +141,7 @@ class NiftiSpheresMasker(BaseMasker, CacheMixin): obtained from prior knowledge. Parameters - ========== + ---------- seeds: List of triplet of coordinates in native space Seed definitions. List of coordinates of the seeds in the same space as the images (typically MNI or TAL). @@ -139,7 +151,7 @@ class NiftiSpheresMasker(BaseMasker, CacheMixin): Default is None (signal is extracted on a single voxel). mask_img: Niimg-like object, optional - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Mask to apply to regions before extracting signals. allow_overlap: boolean, optional @@ -158,11 +170,11 @@ class NiftiSpheresMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details. - low_pass: False or float, optional + low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details. - high_pass: False or float, optional + high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details. @@ -170,6 +182,11 @@ class NiftiSpheresMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details. + dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + memory: joblib.Memory or str, optional Used to cache the region extraction process. By default, no caching is done. If a string is given, it is the @@ -183,14 +200,14 @@ class NiftiSpheresMasker(BaseMasker, CacheMixin): Indicate the level of verbosity. By default, nothing is printed. See also - ======== + -------- nilearn.input_data.NiftiMasker """ # memory and memory_level are used by CacheMixin. def __init__(self, seeds, radius=None, mask_img=None, allow_overlap=False, smoothing_fwhm=None, standardize=False, detrend=False, - low_pass=None, high_pass=None, t_r=None, + low_pass=None, high_pass=None, t_r=None, dtype=None, memory=Memory(cachedir=None, verbose=0), memory_level=1, verbose=0): self.seeds = seeds @@ -207,6 +224,7 @@ def __init__(self, seeds, radius=None, mask_img=None, allow_overlap=False, self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r + self.dtype = dtype # Parameters for joblib self.memory = memory @@ -269,7 +287,7 @@ def transform_single_imgs(self, imgs, confounds=None): Parameters ---------- imgs: 3D/4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to process. It must boil down to a 4D image with scans number as last dimension. @@ -293,10 +311,11 @@ def transform_single_imgs(self, imgs, confounds=None): ignore=['verbose', 'memory', 'memory_level'])( # Images imgs, _ExtractionFunctor(self.seeds_, self.radius, self.mask_img, - self.allow_overlap), + self.allow_overlap, self.dtype), # Pre-processing params, confounds=confounds, + dtype=self.dtype, # Caching memory=self.memory, memory_level=self.memory_level, diff --git a/nilearn/input_data/tests/test_base_masker.py b/nilearn/input_data/tests/test_base_masker.py index 1d6feae2c1..3903e9935b 100644 --- a/nilearn/input_data/tests/test_base_masker.py +++ b/nilearn/input_data/tests/test_base_masker.py @@ -41,11 +41,9 @@ def test_cropping_code_paths(): } # Now do the two maskings - out_data_uncropped, affine_uncropped = filter_and_mask(img, - mask_img, - parameters) - out_data_cropped, affine_cropped = filter_and_mask(img, - cropped_mask_img, - parameters) + out_data_uncropped = filter_and_mask( + img, mask_img, parameters) + out_data_cropped = filter_and_mask( + img, cropped_mask_img, parameters) assert_array_almost_equal(out_data_cropped, out_data_uncropped) diff --git a/nilearn/input_data/tests/test_multi_nifti_masker.py b/nilearn/input_data/tests/test_multi_nifti_masker.py index a8bf9bcacb..620864d10c 100644 --- a/nilearn/input_data/tests/test_multi_nifti_masker.py +++ b/nilearn/input_data/tests/test_multi_nifti_masker.py @@ -3,19 +3,22 @@ """ # Author: Gael Varoquaux # License: simplified BSD +import shutil +from distutils.version import LooseVersion +from tempfile import mkdtemp -from nose.tools import assert_true, assert_false, assert_raises, assert_equal -from nose import SkipTest +import nibabel import numpy as np -from numpy.testing import assert_array_equal - +import sklearn from nibabel import Nifti1Image -import nibabel -from distutils.version import LooseVersion +from nose import SkipTest +from nose.tools import assert_true, assert_false, assert_raises, assert_equal +from numpy.testing import assert_array_equal +from sklearn.externals.joblib import Memory -from nilearn.input_data.multi_nifti_masker import MultiNiftiMasker -from nilearn._utils.testing import assert_raises_regex, write_tmp_imgs from nilearn._utils.exceptions import DimensionError +from nilearn._utils.testing import assert_raises_regex, write_tmp_imgs +from nilearn.input_data.multi_nifti_masker import MultiNiftiMasker def test_auto_mask(): @@ -105,14 +108,14 @@ def test_3d_images(): mask_img_4d = Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) masker2 = MultiNiftiMasker(mask_img=mask_img_4d) - assert_raises_regex(DimensionError, "Data must be a 3D", + assert_raises_regex(DimensionError, + "Input data has incompatible dimensionality: " + "Expected dimension is 3D and you provided " + "a 4D image.", masker2.fit) def test_joblib_cache(): - if not LooseVersion(nibabel.__version__) > LooseVersion('1.1.0'): - # Old nibabel do not pickle - raise SkipTest from sklearn.externals.joblib import hash # Dummy mask mask = np.zeros((40, 40, 40)) @@ -127,3 +130,69 @@ def test_joblib_cache(): assert_true(mask_hash == hash(masker.mask_img_)) # enables to delete "filename" on windows del masker + + +def test_shelving(): + + mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8), + affine=np.diag((4, 4, 4, 1))) + epi_img1 = Nifti1Image(np.ones((2, 2, 2)), + affine=np.diag((4, 4, 4, 1))) + epi_img2 = Nifti1Image(np.ones((2, 2, 2)), + affine=np.diag((2, 2, 2, 1))) + cachedir = mkdtemp() + try: + masker_shelved = MultiNiftiMasker(mask_img=mask_img, + memory=Memory(cachedir=cachedir, + mmap_mode='r', + verbose=0)) + masker_shelved._shelving = True + masker = MultiNiftiMasker(mask_img=mask_img) + epis_shelved = masker_shelved.fit_transform([epi_img1, epi_img2]) + epis = masker.fit_transform([epi_img1, epi_img2]) + for epi_shelved, epi in zip(epis_shelved, epis): + epi_shelved = epi_shelved.get() + assert_array_equal(epi_shelved, epi) + + epi = masker.fit_transform(epi_img1) + epi_shelved = masker_shelved.fit_transform(epi_img1) + epi_shelved = epi_shelved.get() + assert_array_equal(epi_shelved, epi) + finally: + # enables to delete "filename" on windows + del masker + shutil.rmtree(cachedir, ignore_errors=True) + + +def test_compute_multi_gray_matter_mask(): + # Check mask is correctly is correctly calculated + imgs = [Nifti1Image(np.random.rand(9, 9, 5), np.eye(4)), + Nifti1Image(np.random.rand(9, 9, 5), np.eye(4))] + + masker = MultiNiftiMasker(mask_strategy='template') + masker.fit(imgs) + + # Check that the order of the images does not change the output + masker2 = MultiNiftiMasker(mask_strategy='template') + masker2.fit(imgs[::-1]) + + mask = masker.mask_img_ + mask2 = masker2.mask_img_ + + mask_ref = np.zeros((9, 9, 5)) + mask_ref[2:7, 2:7, 2] = 1 + + np.testing.assert_array_equal(mask.get_data(), mask_ref) + np.testing.assert_array_equal(mask2.get_data(), mask_ref) + + +def test_dtype(): + data = np.zeros((9, 9, 9), dtype=np.float64) + data[2:-2, 2:-2, 2:-2] = 10 + img = Nifti1Image(data, np.eye(4)) + + masker = MultiNiftiMasker(dtype='auto') + masker.fit([[img]]) + + masked_img = masker.transform([[img]]) + assert(masked_img[0].dtype == np.float32) diff --git a/nilearn/input_data/tests/test_nifti_labels_masker.py b/nilearn/input_data/tests/test_nifti_labels_masker.py index 2f02937fe2..7cdfb759a8 100644 --- a/nilearn/input_data/tests/test_nifti_labels_masker.py +++ b/nilearn/input_data/tests/test_nifti_labels_masker.py @@ -5,7 +5,7 @@ test_masking.py and test_signal.py for details. """ -from nose.tools import assert_raises, assert_equal +from nose.tools import assert_raises, assert_equal, assert_true import numpy as np import nibabel @@ -49,7 +49,10 @@ def test_nifti_labels_masker(): # verify that 4D mask arguments are refused masker = NiftiLabelsMasker(labels11_img, mask_img=mask_img_4d) - testing.assert_raises_regex(DimensionError, "Data must be a 3D", + testing.assert_raises_regex(DimensionError, + "Input data has incompatible dimensionality: " + "Expected dimension is 3D and you provided " + "a 4D image.", masker.fit) # check exception when transform() called without prior fit() @@ -98,8 +101,27 @@ def test_nifti_labels_masker(): # Call inverse transform (smoke test) fmri11_img_r = masker11.inverse_transform(signals11) assert_equal(fmri11_img_r.shape, fmri11_img.shape) - np.testing.assert_almost_equal(fmri11_img_r.get_affine(), - fmri11_img.get_affine()) + np.testing.assert_almost_equal(fmri11_img_r.affine, fmri11_img.affine) + + +def test_nifti_labels_masker_with_nans_and_infs(): + length = 3 + n_regions = 9 + fmri_img, mask_img = generate_random_img((13, 11, 12), + affine=np.eye(4), length=length) + labels_img = testing.generate_labeled_regions((13, 11, 12), + affine=np.eye(4), + n_regions=n_regions) + # nans + mask_data = mask_img.get_data() + mask_data[:, :, 7] = np.nan + mask_data[:, :, 4] = np.inf + mask_img = nibabel.Nifti1Image(mask_data, np.eye(4)) + + masker = NiftiLabelsMasker(labels_img, mask_img=mask_img) + sig = masker.fit_transform(fmri_img) + assert_equal(sig.shape, (length, n_regions)) + assert_true(np.all(np.isfinite(sig))) def test_nifti_labels_masker_resampling(): @@ -136,20 +158,20 @@ def test_nifti_labels_masker_resampling(): resampling_target="labels") masker.fit() - np.testing.assert_almost_equal(masker.labels_img_.get_affine(), - labels33_img.get_affine()) + np.testing.assert_almost_equal(masker.labels_img_.affine, + labels33_img.affine) assert_equal(masker.labels_img_.shape, labels33_img.shape) - np.testing.assert_almost_equal(masker.mask_img_.get_affine(), - masker.labels_img_.get_affine()) + np.testing.assert_almost_equal(masker.mask_img_.affine, + masker.labels_img_.affine) assert_equal(masker.mask_img_.shape, masker.labels_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) - np.testing.assert_almost_equal(fmri11_img_r.get_affine(), - masker.labels_img_.get_affine()) + np.testing.assert_almost_equal(fmri11_img_r.affine, + masker.labels_img_.affine) assert_equal(fmri11_img_r.shape, (masker.labels_img_.shape[:3] + (length,))) @@ -176,12 +198,12 @@ def test_nifti_labels_masker_resampling(): resampling_target="labels") masker.fit() - np.testing.assert_almost_equal(masker.labels_img_.get_affine(), - labels33_img.get_affine()) + np.testing.assert_almost_equal(masker.labels_img_.affine, + labels33_img.affine) assert_equal(masker.labels_img_.shape, labels33_img.shape) - np.testing.assert_almost_equal(masker.mask_img_.get_affine(), - masker.labels_img_.get_affine()) + np.testing.assert_almost_equal(masker.mask_img_.affine, + masker.labels_img_.affine) assert_equal(masker.mask_img_.shape, masker.labels_img_.shape[:3]) uniq_labels = np.unique(masker.labels_img_.get_data()) @@ -194,8 +216,8 @@ def test_nifti_labels_masker_resampling(): assert_less((transformed.var(axis=0) == 0).sum(), n_regions) fmri11_img_r = masker.inverse_transform(transformed) - np.testing.assert_almost_equal(fmri11_img_r.get_affine(), - masker.labels_img_.get_affine()) + np.testing.assert_almost_equal(fmri11_img_r.affine, + masker.labels_img_.affine) assert_equal(fmri11_img_r.shape, (masker.labels_img_.shape[:3] + (length,))) @@ -210,11 +232,37 @@ def test_nifti_labels_masker_resampling(): masker = NiftiLabelsMasker(labels33_img, mask_img=mask22_img) masker.fit_transform(fmri22_img) - np.testing.assert_array_equal( - masker._resampled_labels_img_.get_affine(), - affine2) + np.testing.assert_array_equal(masker._resampled_labels_img_.affine, + affine2) # Test with filenames with testing.write_tmp_imgs(fmri22_img) as filename: masker = NiftiLabelsMasker(labels33_img, resampling_target='data') masker.fit_transform(filename) + + # test labels masker with resampling target in 'data', 'labels' to return + # resampled labels having number of labels equal with transformed shape of + # 2nd dimension. This tests are added based on issue #1673 in Nilearn + shape = (13, 11, 12) + affine = np.eye(4) * 2 + + fmri_img, _ = generate_random_img(shape, affine=affine, length=21) + labels_img = testing.generate_labeled_regions((9, 8, 6), affine=np.eye(4), + n_regions=10) + for resampling_target in ['data', 'labels']: + masker = NiftiLabelsMasker(labels_img=labels_img, + resampling_target=resampling_target) + transformed = masker.fit_transform(fmri_img) + resampled_labels_img = masker._resampled_labels_img_ + n_resampled_labels = len(np.unique(resampled_labels_img.get_data())) + assert_equal(n_resampled_labels - 1, transformed.shape[1]) + # inverse transform + compressed_img = masker.inverse_transform(transformed) + + # Test that compressing the image a second time should yield an image + # with the same data as compressed_img. + transformed2 = masker.fit_transform(fmri_img) + # inverse transform again + compressed_img2 = masker.inverse_transform(transformed2) + np.testing.assert_array_equal(compressed_img.get_data(), + compressed_img2.get_data()) diff --git a/nilearn/input_data/tests/test_nifti_maps_masker.py b/nilearn/input_data/tests/test_nifti_maps_masker.py index 4dee53cc41..aae44feeee 100644 --- a/nilearn/input_data/tests/test_nifti_maps_masker.py +++ b/nilearn/input_data/tests/test_nifti_maps_masker.py @@ -5,7 +5,7 @@ test_masking.py and test_signal.py for details. """ -from nose.tools import assert_raises, assert_equal +from nose.tools import assert_raises, assert_equal, assert_true import numpy as np import nibabel @@ -78,6 +78,7 @@ def test_nifti_maps_masker(): masker11 = NiftiMapsMasker(labels11, mask_img=mask12, resampling_target=None) assert_raises(ValueError, masker11.fit) + del masker11 masker11 = NiftiMapsMasker(labels11_img, mask_img=mask21_img, resampling_target=None) @@ -101,8 +102,7 @@ def test_nifti_maps_masker(): # Call inverse transform (smoke test) fmri11_img_r = masker11.inverse_transform(signals11) assert_equal(fmri11_img_r.shape, fmri11_img.shape) - np.testing.assert_almost_equal(fmri11_img_r.get_affine(), - fmri11_img.get_affine()) + np.testing.assert_almost_equal(fmri11_img_r.affine, fmri11_img.affine) # Test with data and atlas of different shape: the atlas should be # resampled to the data @@ -115,9 +115,34 @@ def test_nifti_maps_masker(): masker = NiftiMapsMasker(labels11_img, mask_img=mask21_img) masker.fit_transform(fmri22_img) - np.testing.assert_array_equal( - masker._resampled_maps_img_.get_affine(), - affine2) + np.testing.assert_array_equal(masker._resampled_maps_img_.affine, + affine2) + + +def test_nifti_maps_masker_with_nans(): + length = 3 + n_regions = 8 + fmri_img, mask_img = generate_random_img((13, 11, 12), + affine=np.eye(4), length=length) + maps_img, maps_mask_img = testing.generate_maps((13, 11, 12), n_regions, + affine=np.eye(4)) + + # nans + maps_data = maps_img.get_data() + mask_data = mask_img.get_data() + + maps_data[:, 9, 9] = np.nan + maps_data[:, 5, 5] = np.inf + mask_data[:, :, 7] = np.nan + mask_data[:, :, 5] = np.inf + + maps_img = nibabel.Nifti1Image(maps_data, np.eye(4)) + mask_img = nibabel.Nifti1Image(mask_data, np.eye(4)) + + masker = NiftiMapsMasker(maps_img, mask_img=mask_img) + sig = masker.fit_transform(fmri_img) + assert_equal(sig.shape, (length, n_regions)) + assert_true(np.all(np.isfinite(sig))) def test_nifti_maps_masker_2(): @@ -144,7 +169,10 @@ def test_nifti_maps_masker_2(): # verify that 4D mask arguments are refused masker = NiftiMapsMasker(maps33_img, mask_img=mask_img_4d) - testing.assert_raises_regex(DimensionError, "Data must be a 3D", + testing.assert_raises_regex(DimensionError, + "Input data has incompatible dimensionality: " + "Expected dimension is 3D and you provided " + "a 4D image.", masker.fit) # Test error checking @@ -158,20 +186,20 @@ def test_nifti_maps_masker_2(): resampling_target="mask") masker.fit() - np.testing.assert_almost_equal(masker.mask_img_.get_affine(), - mask22_img.get_affine()) + np.testing.assert_almost_equal(masker.mask_img_.affine, + mask22_img.affine) assert_equal(masker.mask_img_.shape, mask22_img.shape) - np.testing.assert_almost_equal(masker.mask_img_.get_affine(), - masker.maps_img_.get_affine()) + np.testing.assert_almost_equal(masker.mask_img_.affine, + masker.maps_img_.affine) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) - np.testing.assert_almost_equal(fmri11_img_r.get_affine(), - masker.maps_img_.get_affine()) + np.testing.assert_almost_equal(fmri11_img_r.affine, + masker.maps_img_.affine) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,))) # Target: maps @@ -179,20 +207,20 @@ def test_nifti_maps_masker_2(): resampling_target="maps") masker.fit() - np.testing.assert_almost_equal(masker.maps_img_.get_affine(), - maps33_img.get_affine()) + np.testing.assert_almost_equal(masker.maps_img_.affine, + maps33_img.affine) assert_equal(masker.maps_img_.shape, maps33_img.shape) - np.testing.assert_almost_equal(masker.mask_img_.get_affine(), - masker.maps_img_.get_affine()) + np.testing.assert_almost_equal(masker.mask_img_.affine, + masker.maps_img_.affine) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) - np.testing.assert_almost_equal(fmri11_img_r.get_affine(), - masker.maps_img_.get_affine()) + np.testing.assert_almost_equal(fmri11_img_r.affine, + masker.maps_img_.affine) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,))) # Test with clipped maps: mask does not contain all maps. @@ -217,12 +245,12 @@ def test_nifti_maps_masker_2(): resampling_target="maps") masker.fit() - np.testing.assert_almost_equal(masker.maps_img_.get_affine(), - maps33_img.get_affine()) + np.testing.assert_almost_equal(masker.maps_img_.affine, + maps33_img.affine) assert_equal(masker.maps_img_.shape, maps33_img.shape) - np.testing.assert_almost_equal(masker.mask_img_.get_affine(), - masker.maps_img_.get_affine()) + np.testing.assert_almost_equal(masker.mask_img_.affine, + masker.maps_img_.affine) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) @@ -231,8 +259,8 @@ def test_nifti_maps_masker_2(): assert_less((transformed.var(axis=0) == 0).sum(), n_regions) fmri11_img_r = masker.inverse_transform(transformed) - np.testing.assert_almost_equal(fmri11_img_r.get_affine(), - masker.maps_img_.get_affine()) + np.testing.assert_almost_equal(fmri11_img_r.affine, + masker.maps_img_.affine) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,))) diff --git a/nilearn/input_data/tests/test_nifti_masker.py b/nilearn/input_data/tests/test_nifti_masker.py index b7abc5f145..317c11a120 100644 --- a/nilearn/input_data/tests/test_nifti_masker.py +++ b/nilearn/input_data/tests/test_nifti_masker.py @@ -7,25 +7,23 @@ """ # Author: Gael Varoquaux, Philippe Gervais # License: simplified BSD -from tempfile import mkdtemp -import shutil import os -from distutils.version import LooseVersion +import shutil +from tempfile import mkdtemp -from nose.tools import assert_true, assert_false, assert_raises -from nose import SkipTest +import nibabel import numpy as np -from numpy.testing import assert_array_equal - from nibabel import Nifti1Image -import nibabel +from nose import SkipTest +from nose.tools import assert_true, assert_false, assert_raises +from numpy.testing import assert_array_equal, assert_equal -from nilearn.input_data.nifti_masker import NiftiMasker, filter_and_mask from nilearn._utils import testing +from nilearn._utils.class_inspect import get_params from nilearn._utils.exceptions import DimensionError -from nilearn.image import index_img from nilearn._utils.testing import assert_raises_regex -from nilearn._utils.class_inspect import get_params +from nilearn.image import index_img +from nilearn.input_data.nifti_masker import NiftiMasker, filter_and_mask def test_auto_mask(): @@ -198,8 +196,11 @@ def test_5d(): masker = NiftiMasker(mask_img=mask_img) masker.fit() testing.assert_raises_regex( - DimensionError, 'Data must be a 4D Niimg-like object but you provided' - ' a list of 4D images.', masker.transform, data_5d) + DimensionError, + "Input data has incompatible dimensionality: " + "Expected dimension is 4D and you provided " + "a list of 4D images \(5D\).", + masker.transform, data_5d) def test_sessions(): @@ -219,9 +220,6 @@ def test_sessions(): def test_joblib_cache(): - if not LooseVersion(nibabel.__version__) > LooseVersion('1.1.0'): - # Old nibabel do not pickle - raise SkipTest from sklearn.externals.joblib import hash, Memory mask = np.zeros((40, 40, 40)) mask[20, 20, 20] = 1 @@ -302,7 +300,31 @@ def test_compute_epi_mask(): mask4.get_data()[3:12, 3:12])) -def test_filter_and_mask(): +def test_compute_gray_matter_mask(): + # Check masker for template masking strategy + + img = np.random.rand(9, 9, 5) + img = Nifti1Image(img, np.eye(4)) + + masker = NiftiMasker(mask_strategy='template') + + masker.fit(img) + mask1 = masker.mask_img_ + + masker2 = NiftiMasker(mask_strategy='template', + mask_args=dict(threshold=0.)) + + masker2.fit(img) + mask2 = masker2.mask_img_ + + mask_ref = np.zeros((9, 9, 5)) + mask_ref[2:7, 2:7, 2] = 1 + + np.testing.assert_array_equal(mask1.get_data(), mask_ref) + np.testing.assert_array_equal(mask2.get_data(), mask_ref) + + +def test_filter_and_mask_error(): data = np.zeros([20, 30, 40, 5]) mask = np.zeros([20, 30, 40, 2]) mask[10, 15, 20, :] = 1 @@ -313,5 +335,45 @@ def test_filter_and_mask(): masker = NiftiMasker() params = get_params(NiftiMasker, masker) - assert_raises_regex(DimensionError, "Data must be a 3D", filter_and_mask, - data_img, mask_img, params) + assert_raises_regex(DimensionError, + "Input data has incompatible dimensionality: " + "Expected dimension is 3D and you provided " + "a 4D image.", + filter_and_mask, + data_img, mask_img, params) + + +def test_filter_and_mask(): + data = np.zeros([20, 30, 40, 5]) + mask = np.ones([20, 30, 40]) + + data_img = nibabel.Nifti1Image(data, np.eye(4)) + mask_img = nibabel.Nifti1Image(mask, np.eye(4)) + + masker = NiftiMasker() + params = get_params(NiftiMasker, masker) + + # Test return_affine = False + data = filter_and_mask(data_img, mask_img, params) + assert_equal(data.shape, (5, 24000)) + + +def test_dtype(): + data_32 = np.zeros((9, 9, 9), dtype=np.float32) + data_64 = np.zeros((9, 9, 9), dtype=np.float64) + data_32[2:-2, 2:-2, 2:-2] = 10 + data_64[2:-2, 2:-2, 2:-2] = 10 + + affine_32 = np.eye(4, dtype=np.float32) + affine_64 = np.eye(4, dtype=np.float64) + + img_32 = Nifti1Image(data_32, affine_32) + img_64 = Nifti1Image(data_64, affine_64) + + masker_1 = NiftiMasker(dtype='auto') + assert(masker_1.fit_transform(img_32).dtype == np.float32) + assert(masker_1.fit_transform(img_64).dtype == np.float32) + + masker_2 = NiftiMasker(dtype='float64') + assert(masker_2.fit_transform(img_32).dtype == np.float64) + assert(masker_2.fit_transform(img_64).dtype == np.float64) diff --git a/nilearn/input_data/tests/test_nifti_spheres_masker.py b/nilearn/input_data/tests/test_nifti_spheres_masker.py index b2568a4205..5ada0e8731 100644 --- a/nilearn/input_data/tests/test_nifti_spheres_masker.py +++ b/nilearn/input_data/tests/test_nifti_spheres_masker.py @@ -82,13 +82,47 @@ def test_nifti_spheres_masker_overlap(): seeds = [(0, 0, 0), (2, 2, 2)] - overlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=True) + overlapping_masker = NiftiSpheresMasker(seeds, radius=1, + allow_overlap=True) overlapping_masker.fit_transform(fmri_img) - overlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=True) + overlapping_masker = NiftiSpheresMasker(seeds, radius=2, + allow_overlap=True) overlapping_masker.fit_transform(fmri_img) - noverlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=False) + noverlapping_masker = NiftiSpheresMasker(seeds, radius=1, + allow_overlap=False) noverlapping_masker.fit_transform(fmri_img) - noverlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=False) + noverlapping_masker = NiftiSpheresMasker(seeds, radius=2, + allow_overlap=False) assert_raises_regex(ValueError, 'Overlap detected', noverlapping_masker.fit_transform, fmri_img) + + +def test_small_radius(): + affine = np.eye(4) + shape = (3, 3, 3) + + data = np.random.random(shape) + mask = np.zeros(shape) + mask[1, 1, 1] = 1 + mask[2, 2, 2] = 1 + affine = np.eye(4) * 1.2 + seed = (1.4, 1.4, 1.4) + + masker = NiftiSpheresMasker([seed], radius=0.1, + mask_img=nibabel.Nifti1Image(mask, affine)) + masker.fit_transform(nibabel.Nifti1Image(data, affine)) + + # Test if masking is taken into account + mask[1, 1, 1] = 0 + mask[1, 1, 0] = 1 + + masker = NiftiSpheresMasker([seed], radius=0.1, + mask_img=nibabel.Nifti1Image(mask, affine)) + assert_raises_regex(ValueError, 'Sphere around seed #0 is empty', + masker.fit_transform, + nibabel.Nifti1Image(data, affine)) + + masker = NiftiSpheresMasker([seed], radius=1.6, + mask_img=nibabel.Nifti1Image(mask, affine)) + masker.fit_transform(nibabel.Nifti1Image(data, affine)) diff --git a/nilearn/masking.py b/nilearn/masking.py index 472e1efc11..3a31557c57 100644 --- a/nilearn/masking.py +++ b/nilearn/masking.py @@ -4,6 +4,7 @@ # Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais # License: simplified BSD import warnings +import numbers import numpy as np from scipy import ndimage @@ -29,7 +30,7 @@ def _load_mask_img(mask_img, allow_empty=False): Parameters ---------- mask_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html The mask to check allow_empty: boolean, optional @@ -61,7 +62,7 @@ def _load_mask_img(mask_img, allow_empty=False): % values) mask = _utils.as_ndarray(mask, dtype=bool) - return mask, mask_img.get_affine() + return mask, mask_img.affine def _extrapolate_out_mask(data, mask, iterations=1): @@ -78,11 +79,12 @@ def _extrapolate_out_mask(data, mask, iterations=1): masked_data[1:-1, 1:-1, 1:-1] = data.copy() masked_data[np.logical_not(larger_mask)] = np.nan outer_shell = larger_mask.copy() - outer_shell[1:-1, 1:-1, 1:-1] = new_mask - mask + outer_shell[1:-1, 1:-1, 1:-1] = np.logical_xor(new_mask, mask) outer_shell_x, outer_shell_y, outer_shell_z = np.where(outer_shell) extrapolation = list() - for i, j, k in [(0, 1, 0), (0, -1, 0), (1, 0, 0), (-1, 0, 0), - (1, 0, 0), (-1, 0, 0)]: + for i, j, k in [(1, 0, 0), (-1, 0, 0), + (0, 1, 0), (0, -1, 0), + (0, 0, 1), (0, 0, -1)]: this_x = outer_shell_x + i this_y = outer_shell_y + j this_z = outer_shell_z + k @@ -106,12 +108,12 @@ def intersect_masks(mask_imgs, threshold=0.5, connected=True): """ Compute intersection of several masks Given a list of input mask images, generate the output image which - is the the threshold-level intersection of the inputs + is the threshold-level intersection of the inputs Parameters ---------- mask_imgs: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html 3D individual masks with same shape and affine. threshold: float, optional @@ -196,7 +198,7 @@ def compute_epi_mask(epi_img, lower_cutoff=0.2, upper_cutoff=0.85, Parameters ---------- epi_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html EPI image, used to compute the mask. 3D and 4D images are accepted. If a 3D image is given, we suggest to use the mean image @@ -301,7 +303,7 @@ def compute_multi_epi_mask(epi_imgs, lower_cutoff=0.2, upper_cutoff=0.85, Parameters ---------- epi_imgs: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html A list of arrays, each item being a subject or a session. 3D and 4D images are accepted. If 3D images is given, we suggest to use the mean image of each @@ -320,7 +322,7 @@ def compute_multi_epi_mask(epi_imgs, lower_cutoff=0.2, upper_cutoff=0.85, upper_cutoff: float, optional upper fraction of the histogram to be discarded. - connected: boolean, optional + connected: bool, optional if connected is True, only the largest connect component is kept. exclude_zeros: boolean, optional @@ -377,7 +379,7 @@ def compute_background_mask(data_imgs, border_size=2, Parameters ---------- data_imgs: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images used to compute the mask. 3D and 4D images are accepted. If a 3D image is given, we suggest to use the mean image @@ -455,7 +457,7 @@ def compute_multi_background_mask(data_imgs, border_size=2, upper_cutoff=0.85, Parameters ---------- data_imgs: list of Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html A list of arrays, each item being a subject or a session. 3D and 4D images are accepted. If 3D images is given, we suggest to use the mean image of each @@ -472,7 +474,7 @@ def compute_multi_background_mask(data_imgs, border_size=2, upper_cutoff=0.85, The size, in voxel of the border used on the side of the image to determine the value of the background. - connected: boolean, optional + connected: bool, optional if connected is True, only the largest connect component is kept. target_affine: 3x3 or 4x4 matrix, optional @@ -512,6 +514,147 @@ def compute_multi_background_mask(data_imgs, border_size=2, upper_cutoff=0.85, return mask +def compute_gray_matter_mask(target_img, threshold=.5, + connected=True, opening=2, memory=None, + verbose=0): + """ Compute a mask corresponding to the gray matter part of the brain. + The gray matter part is calculated through the resampling of MNI152 + template gray matter mask onto the target image + + Parameters + ---------- + target_img: Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + Images used to compute the mask. 3D and 4D images are accepted. + Only the shape and affine of target_img will be used here. + + threshold: float, optional + The value under which the MNI template is cut off. + Default value is 0.5 + + connected: bool, optional + if connected is True, only the largest connected component is kept. + Default is True + + opening: bool or int, optional + if opening is True, a morphological opening is performed, to keep + only large structures. + If opening is an integer `n`, it is performed via `n` erosions. + After estimation of the largest connected constituent, 2`n` closing + operations are performed followed by `n` erosions. This corresponds + to 1 opening operation of order `n` followed by a closing operator + of order `n`. + + memory: instance of joblib.Memory or str + Used to cache the function call. + + verbose: int, optional + Controls the amount of verbosity: higher numbers give + more messages + + Returns + ------- + mask: nibabel.Nifti1Image + The brain mask (3D image) + """ + if verbose > 0: + print("Template mask computation") + + target_img = _utils.check_niimg(target_img) + + from .datasets import load_mni152_brain_mask + template = load_mni152_brain_mask() + dtype = target_img.get_data_dtype() + template = new_img_like(template, + template.get_data().astype(dtype)) + + from .image.resampling import resample_to_img + resampled_template = cache(resample_to_img, memory)(template, target_img) + + mask = resampled_template.get_data() >= threshold + + mask, affine = _post_process_mask(mask, target_img.affine, opening=opening, + connected=connected, + warning_msg="Gray matter mask is empty, " + "lower the threshold or " + "check your input FOV") + + return new_img_like(target_img, mask, affine) + + +def compute_multi_gray_matter_mask(target_imgs, threshold=.5, + connected=True, opening=2, + memory=None, verbose=0, n_jobs=1, **kwargs): + """ Compute a mask corresponding to the gray matter part of the brain for + a list of images. + The gray matter part is calculated through the resampling of MNI152 + template gray matter mask onto the target image + + Parameters + ---------- + target_imgs: list of Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + Images used to compute the mask. 3D and 4D images are accepted. + The images in this list must be of same shape and affine. The mask is + calculated with the first element of the list for only the shape/affine + of the image is used for this masking strategy + + threshold: float, optional + The value under which the MNI template is cut off. + Default value is 0.5 + + connected: bool, optional + if connected is True, only the largest connect component is kept. + Default is True + + opening: bool or int, optional + if opening is True, a morphological opening is performed, to keep + only large structures. + If opening is an integer `n`, it is performed via `n` erosions. + After estimation of the largest connected constituent, 2`n` closing + operations are performed followed by `n` erosions. This corresponds + to 1 opening operation of order `n` followed by a closing operator + of order `n`. + + memory: instance of joblib.Memory or str + Used to cache the function call. + + n_jobs: integer, optional + Argument not used but kept to fit the API + + **kwargs: optional arguments + arguments such as 'target_affine' are used in the call of other + masking strategies, which then would raise an error for this function + which does not need such arguments. + + verbose: int, optional + Controls the amount of verbosity: higher numbers give + more messages + + Returns + ------- + mask: nibabel.Nifti1Image + The brain mask (3D image) + + See also + -------- + nilearn.masking.compute_gray_matter_mask + """ + if len(target_imgs) == 0: + raise TypeError('An empty object - %r - was passed instead of an ' + 'image or a list of images' % target_imgs) + + # Check images in the list have the same FOV without loading them in memory + imgs_generator = _utils.check_niimg(target_imgs, return_iterator=True) + for _ in imgs_generator: + pass + + mask = compute_gray_matter_mask(target_imgs[0], threshold=threshold, + connected=connected, opening=opening, + memory=memory, verbose=verbose) + return mask + + # # Time series extraction # @@ -525,11 +668,11 @@ def apply_mask(imgs, mask_img, dtype='f', Parameters ----------- imgs: list of 4D Niimg-like objects - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Images to be masked. list of lists of 3D images are also accepted. mask_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html 3D mask array: True where a voxel should be used. dtype: numpy dtype or 'f' @@ -573,7 +716,7 @@ def _apply_mask_fmri(imgs, mask_img, dtype='f', """ mask_img = _utils.check_niimg_3d(mask_img) - mask_affine = mask_img.get_affine() + mask_affine = mask_img.affine mask_data = _utils.as_ndarray(mask_img.get_data(), dtype=np.bool) @@ -581,12 +724,12 @@ def _apply_mask_fmri(imgs, mask_img, dtype='f', ensure_finite = True imgs_img = _utils.check_niimg(imgs) - affine = imgs_img.get_affine()[:3, :3] + affine = imgs_img.affine[:3, :3] - if not np.allclose(mask_affine, imgs_img.get_affine()): + if not np.allclose(mask_affine, imgs_img.affine): raise ValueError('Mask affine: \n%s\n is different from img affine:' '\n%s' % (str(mask_affine), - str(imgs_img.get_affine()))) + str(imgs_img.affine))) if not mask_data.shape == imgs_img.shape[:3]: raise ValueError('Mask shape: %s is different from img shape:%s' @@ -617,12 +760,12 @@ def _unmask_3d(X, mask, order="C"): """Take masked data and bring them back to 3D (space only). Parameters - ========== + ---------- X: numpy.ndarray Masked data. shape: (features,) mask: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Mask. mask.ndim must be equal to 3, and dtype *must* be bool. """ @@ -645,7 +788,7 @@ def _unmask_4d(X, mask, order="C"): """Take masked data and bring them back to 4D. Parameters - ========== + ---------- X: numpy.ndarray Masked data. shape: (samples, features) @@ -653,7 +796,7 @@ def _unmask_4d(X, mask, order="C"): Mask. mask.ndim must be equal to 4, and dtype *must* be bool. Returns - ======= + ------- data: numpy.ndarray Unmasked data. Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[0]) @@ -678,16 +821,16 @@ def unmask(X, mask_img, order="F"): This function can be applied to a list of masked data. Parameters - ========== + ---------- X: numpy.ndarray (or list of) Masked data. shape: (samples #, features #). If X is one-dimensional, it is assumed that samples# == 1. mask_img: niimg: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Must be 3-dimensional. Returns - ======= + ------- data: nibabel.Nift1Image object Unmasked data. Depending on the shape of X, data can have different shapes: @@ -697,19 +840,23 @@ def unmask(X, mask_img, order="F"): - X.ndim == 1: Shape: (mask.shape[0], mask.shape[1], mask.shape[2]) """ - - if isinstance(X, list): + # Handle lists. This can be a list of other lists / arrays, or a list or + # numbers. In the latter case skip. + if isinstance(X, list) and not isinstance(X[0], numbers.Number): ret = [] for x in X: ret.append(unmask(x, mask_img, order=order)) # 1-level recursion return ret + # The code after this block assumes that X is an ndarray; ensure this + X = np.asanyarray(X) + mask_img = _utils.check_niimg_3d(mask_img) mask, affine = _load_mask_img(mask_img) - if X.ndim == 2: + if np.ndim(X) == 2: unmasked = _unmask_4d(X, mask, order=order) - elif X.ndim == 1: + elif np.ndim(X) == 1: unmasked = _unmask_3d(X, mask, order=order) else: raise TypeError("Masked data X must be 2D or 1D array; " diff --git a/nilearn/mass_univariate/__init__.py b/nilearn/mass_univariate/__init__.py index 903cdaa668..201dc23f54 100644 --- a/nilearn/mass_univariate/__init__.py +++ b/nilearn/mass_univariate/__init__.py @@ -3,3 +3,5 @@ """ from .permuted_least_squares import permuted_ols + +__all__ = ['permuted_ols'] diff --git a/nilearn/mass_univariate/tests/test_permuted_least_squares.py b/nilearn/mass_univariate/tests/test_permuted_least_squares.py index b20269c13b..e873dc0a17 100644 --- a/nilearn/mass_univariate/tests/test_permuted_least_squares.py +++ b/nilearn/mass_univariate/tests/test_permuted_least_squares.py @@ -3,7 +3,6 @@ """ # Author: Virgile Fritsch, , Feb. 2014 -import nose import numpy as np from scipy import stats from sklearn.utils import check_random_state diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py index e7cafda82f..bb238b47aa 100644 --- a/nilearn/plotting/__init__.py +++ b/nilearn/plotting/__init__.py @@ -11,8 +11,12 @@ def _set_mpl_backend(): # We are doing local imports here to avoid poluting our namespace import matplotlib import os + import sys # Set the backend to a non-interactive one for unices without X - if os.name == 'posix' and 'DISPLAY' not in os.environ: + if (os.name == 'posix' and 'DISPLAY' not in os.environ + and not (sys.platform == 'darwin' + and matplotlib.get_backend() == 'MacOSX' + )): matplotlib.use('Agg') except ImportError: from .._utils.testing import skip_if_running_nose @@ -30,15 +34,24 @@ def _set_mpl_backend(): _set_mpl_backend() ############################################################################### - from . import cm from .img_plotting import plot_img, plot_anat, plot_epi, \ plot_roi, plot_stat_map, plot_glass_brain, plot_connectome, \ plot_prob_atlas, show -from .find_cuts import find_xyz_cut_coords, find_cut_slices +from .find_cuts import find_xyz_cut_coords, find_cut_slices, \ + find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords +from .matrix_plotting import plot_matrix +from .html_surface import view_surf, view_img_on_surf +from .html_stat_map import view_stat_map +from .html_connectome import view_connectome, view_markers +from .surf_plotting import plot_surf, plot_surf_stat_map, plot_surf_roi __all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi', 'plot_roi', 'plot_stat_map', 'plot_glass_brain', 'plot_connectome', 'plot_prob_atlas', 'find_xyz_cut_coords', 'find_cut_slices', - 'show'] + 'show', 'plot_matrix', 'view_surf', 'view_img_on_surf', + 'view_stat_map', 'view_connectome', 'view_markers', + 'find_parcellation_cut_coords', 'find_probabilistic_atlas_cut_coords', + 'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi', + ] diff --git a/nilearn/plotting/cm.py b/nilearn/plotting/cm.py index d11805ed77..1c989f4caa 100644 --- a/nilearn/plotting/cm.py +++ b/nilearn/plotting/cm.py @@ -166,16 +166,43 @@ def alpha_cmap(color, name='', alpha_min=0.5, alpha_max=1.): ################################################################################ # A few transparent colormaps for color, name in (((1, 0, 0), 'red'), - ((0, 1, 0), 'blue'), - ((0, 0, 1), 'green'), + ((0, 1, 0), 'green'), + ((0, 0, 1), 'blue'), ): _cmap_d['%s_transparent' % name] = alpha_cmap(color, name=name) _cmap_d['%s_transparent_full_alpha_range' % name] = alpha_cmap( color, alpha_min=0, alpha_max=1, name=name) - +############################################################################### +# HCP Connectome Workbench colormaps +# As seen in https://github.com/Washington-University/workbench src/Pallete +roy_big_bl = _np.array([(255, 255, 0), (255, 200, 0), + (255, 120, 0), (255, 0, 0), + (200, 0, 0), (150, 0, 0), + (100, 0, 0), (60, 0, 0), + (0, 0, 0), (0, 0, 80), + (0, 0, 170), (75, 0, 125), + (125, 0, 160), (75, 125, 0), + (0, 200, 0), (0, 255, 0), + (0, 255, 255), (0, 255, 255)][::-1]) / 255 + +videen_style = ['#000000', '#bbbbbb', '#dddddd', '#ffffff', + '#ff388d', '#e251e2', '#10b010', '#00ff00', + '#00ffff', '#000000', '#660033', '#33334c', + '#4c4c7f', '#7f7fcc', '#00ff00', '#10b010', + '#ffff00', '#ff9900', '#ff6900', '#ff0000'] + +_cmap_d['roy_big_bl'] = _colors.LinearSegmentedColormap.from_list( + 'roy_big_bl', roy_big_bl.tolist()) +_cmap_d['videen_style'] = _colors.LinearSegmentedColormap.from_list( + 'videen_style', videen_style) + +# Save colormaps in the scope of the module locals().update(_cmap_d) +# Register cmaps in matplotlib too +for k, v in _cmap_d.items(): + _cm.register_cmap(name=k, cmap=v) ################################################################################ diff --git a/nilearn/plotting/data/README.txt b/nilearn/plotting/data/README.txt new file mode 100644 index 0000000000..ddad159b27 --- /dev/null +++ b/nilearn/plotting/data/README.txt @@ -0,0 +1,8 @@ +This directory contains files required for javascript plots. + +html/ : templates for HTML files + +js/ + surface-plot-utils.js : helpers for nilearn plots + plotly-gl3d-latest.min.js : plotly library (https://plot.ly/javascript/getting-started/) + jquery.min.js : jquery library (https://jquery.com/) diff --git a/nilearn/plotting/data/__init__.py b/nilearn/plotting/data/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nilearn/plotting/data/html/__init__.py b/nilearn/plotting/data/html/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nilearn/plotting/data/html/connectome_plot_template.html b/nilearn/plotting/data/html/connectome_plot_template.html new file mode 100644 index 0000000000..9b346219fe --- /dev/null +++ b/nilearn/plotting/data/html/connectome_plot_template.html @@ -0,0 +1,162 @@ + + + + + connectome plot + INSERT_JS_LIBRARIES_HERE + + + + + + +
    + + + + + + diff --git a/nilearn/plotting/data/html/stat_map_template.html b/nilearn/plotting/data/html/stat_map_template.html new file mode 100644 index 0000000000..efa437951a --- /dev/null +++ b/nilearn/plotting/data/html/stat_map_template.html @@ -0,0 +1,123 @@ + + + + + + + + + + + + + + + + + Brain image + + + + + + + +
    + + + diff --git a/nilearn/plotting/data/html/surface_plot_template.html b/nilearn/plotting/data/html/surface_plot_template.html new file mode 100644 index 0000000000..2544fd6e4f --- /dev/null +++ b/nilearn/plotting/data/html/surface_plot_template.html @@ -0,0 +1,94 @@ + + + + + surface plot + + INSERT_JS_LIBRARIES_HERE + + + + + + +
    + + + + + + + + + + diff --git a/nilearn/plotting/data/js/__init__.py b/nilearn/plotting/data/js/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nilearn/plotting/data/js/jquery.min.js b/nilearn/plotting/data/js/jquery.min.js new file mode 100644 index 0000000000..4d9b3a2587 --- /dev/null +++ b/nilearn/plotting/data/js/jquery.min.js @@ -0,0 +1,2 @@ +/*! jQuery v3.3.1 | (c) JS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(e,t){"use strict";var n=[],r=e.document,i=Object.getPrototypeOf,o=n.slice,a=n.concat,s=n.push,u=n.indexOf,l={},c=l.toString,f=l.hasOwnProperty,p=f.toString,d=p.call(Object),h={},g=function e(t){return"function"==typeof t&&"number"!=typeof t.nodeType},y=function e(t){return null!=t&&t===t.window},v={type:!0,src:!0,noModule:!0};function m(e,t,n){var i,o=(t=t||r).createElement("script");if(o.text=e,n)for(i in v)n[i]&&(o[i]=n[i]);t.head.appendChild(o).parentNode.removeChild(o)}function x(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[c.call(e)]||"object":typeof e}var b="3.3.1",w=function(e,t){return new w.fn.init(e,t)},T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;w.fn=w.prototype={jquery:"3.3.1",constructor:w,length:0,toArray:function(){return o.call(this)},get:function(e){return null==e?o.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=w.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return w.each(this,e)},map:function(e){return this.pushStack(w.map(this,function(t,n){return e.call(t,n,t)}))},slice:function(){return this.pushStack(o.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(n>=0&&n0&&t-1 in e)}var E=function(e){var t,n,r,i,o,a,s,u,l,c,f,p,d,h,g,y,v,m,x,b="sizzle"+1*new Date,w=e.document,T=0,C=0,E=ae(),k=ae(),S=ae(),D=function(e,t){return e===t&&(f=!0),0},N={}.hasOwnProperty,A=[],j=A.pop,q=A.push,L=A.push,H=A.slice,O=function(e,t){for(var n=0,r=e.length;n+~]|"+M+")"+M+"*"),z=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),X=new RegExp(W),U=new RegExp("^"+R+"$"),V={ID:new RegExp("^#("+R+")"),CLASS:new RegExp("^\\.("+R+")"),TAG:new RegExp("^("+R+"|[*])"),ATTR:new RegExp("^"+I),PSEUDO:new RegExp("^"+W),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+P+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},G=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Q=/^[^{]+\{\s*\[native \w/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,K=/[+~]/,Z=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ee=function(e,t,n){var r="0x"+t-65536;return r!==r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},te=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ne=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},re=function(){p()},ie=me(function(e){return!0===e.disabled&&("form"in e||"label"in e)},{dir:"parentNode",next:"legend"});try{L.apply(A=H.call(w.childNodes),w.childNodes),A[w.childNodes.length].nodeType}catch(e){L={apply:A.length?function(e,t){q.apply(e,H.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function oe(e,t,r,i){var o,s,l,c,f,h,v,m=t&&t.ownerDocument,T=t?t.nodeType:9;if(r=r||[],"string"!=typeof e||!e||1!==T&&9!==T&&11!==T)return r;if(!i&&((t?t.ownerDocument||t:w)!==d&&p(t),t=t||d,g)){if(11!==T&&(f=J.exec(e)))if(o=f[1]){if(9===T){if(!(l=t.getElementById(o)))return r;if(l.id===o)return r.push(l),r}else if(m&&(l=m.getElementById(o))&&x(t,l)&&l.id===o)return r.push(l),r}else{if(f[2])return L.apply(r,t.getElementsByTagName(e)),r;if((o=f[3])&&n.getElementsByClassName&&t.getElementsByClassName)return L.apply(r,t.getElementsByClassName(o)),r}if(n.qsa&&!S[e+" "]&&(!y||!y.test(e))){if(1!==T)m=t,v=e;else if("object"!==t.nodeName.toLowerCase()){(c=t.getAttribute("id"))?c=c.replace(te,ne):t.setAttribute("id",c=b),s=(h=a(e)).length;while(s--)h[s]="#"+c+" "+ve(h[s]);v=h.join(","),m=K.test(e)&&ge(t.parentNode)||t}if(v)try{return L.apply(r,m.querySelectorAll(v)),r}catch(e){}finally{c===b&&t.removeAttribute("id")}}}return u(e.replace(B,"$1"),t,r,i)}function ae(){var e=[];function t(n,i){return e.push(n+" ")>r.cacheLength&&delete t[e.shift()],t[n+" "]=i}return t}function se(e){return e[b]=!0,e}function ue(e){var t=d.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function le(e,t){var n=e.split("|"),i=n.length;while(i--)r.attrHandle[n[i]]=t}function ce(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function fe(e){return function(t){return"input"===t.nodeName.toLowerCase()&&t.type===e}}function pe(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function de(e){return function(t){return"form"in t?t.parentNode&&!1===t.disabled?"label"in t?"label"in t.parentNode?t.parentNode.disabled===e:t.disabled===e:t.isDisabled===e||t.isDisabled!==!e&&ie(t)===e:t.disabled===e:"label"in t&&t.disabled===e}}function he(e){return se(function(t){return t=+t,se(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}function ge(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}n=oe.support={},o=oe.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return!!t&&"HTML"!==t.nodeName},p=oe.setDocument=function(e){var t,i,a=e?e.ownerDocument||e:w;return a!==d&&9===a.nodeType&&a.documentElement?(d=a,h=d.documentElement,g=!o(d),w!==d&&(i=d.defaultView)&&i.top!==i&&(i.addEventListener?i.addEventListener("unload",re,!1):i.attachEvent&&i.attachEvent("onunload",re)),n.attributes=ue(function(e){return e.className="i",!e.getAttribute("className")}),n.getElementsByTagName=ue(function(e){return e.appendChild(d.createComment("")),!e.getElementsByTagName("*").length}),n.getElementsByClassName=Q.test(d.getElementsByClassName),n.getById=ue(function(e){return h.appendChild(e).id=b,!d.getElementsByName||!d.getElementsByName(b).length}),n.getById?(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){return e.getAttribute("id")===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n=t.getElementById(e);return n?[n]:[]}}):(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){var n="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return n&&n.value===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),r.find.TAG=n.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):n.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},r.find.CLASS=n.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&g)return t.getElementsByClassName(e)},v=[],y=[],(n.qsa=Q.test(d.querySelectorAll))&&(ue(function(e){h.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&y.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||y.push("\\["+M+"*(?:value|"+P+")"),e.querySelectorAll("[id~="+b+"-]").length||y.push("~="),e.querySelectorAll(":checked").length||y.push(":checked"),e.querySelectorAll("a#"+b+"+*").length||y.push(".#.+[+~]")}),ue(function(e){e.innerHTML="";var t=d.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&y.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&y.push(":enabled",":disabled"),h.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&y.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),y.push(",.*:")})),(n.matchesSelector=Q.test(m=h.matches||h.webkitMatchesSelector||h.mozMatchesSelector||h.oMatchesSelector||h.msMatchesSelector))&&ue(function(e){n.disconnectedMatch=m.call(e,"*"),m.call(e,"[s!='']:x"),v.push("!=",W)}),y=y.length&&new RegExp(y.join("|")),v=v.length&&new RegExp(v.join("|")),t=Q.test(h.compareDocumentPosition),x=t||Q.test(h.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return f=!0,0;var r=!e.compareDocumentPosition-!t.compareDocumentPosition;return r||(1&(r=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!n.sortDetached&&t.compareDocumentPosition(e)===r?e===d||e.ownerDocument===w&&x(w,e)?-1:t===d||t.ownerDocument===w&&x(w,t)?1:c?O(c,e)-O(c,t):0:4&r?-1:1)}:function(e,t){if(e===t)return f=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===d?-1:t===d?1:i?-1:o?1:c?O(c,e)-O(c,t):0;if(i===o)return ce(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?ce(a[r],s[r]):a[r]===w?-1:s[r]===w?1:0},d):d},oe.matches=function(e,t){return oe(e,null,null,t)},oe.matchesSelector=function(e,t){if((e.ownerDocument||e)!==d&&p(e),t=t.replace(z,"='$1']"),n.matchesSelector&&g&&!S[t+" "]&&(!v||!v.test(t))&&(!y||!y.test(t)))try{var r=m.call(e,t);if(r||n.disconnectedMatch||e.document&&11!==e.document.nodeType)return r}catch(e){}return oe(t,d,null,[e]).length>0},oe.contains=function(e,t){return(e.ownerDocument||e)!==d&&p(e),x(e,t)},oe.attr=function(e,t){(e.ownerDocument||e)!==d&&p(e);var i=r.attrHandle[t.toLowerCase()],o=i&&N.call(r.attrHandle,t.toLowerCase())?i(e,t,!g):void 0;return void 0!==o?o:n.attributes||!g?e.getAttribute(t):(o=e.getAttributeNode(t))&&o.specified?o.value:null},oe.escape=function(e){return(e+"").replace(te,ne)},oe.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},oe.uniqueSort=function(e){var t,r=[],i=0,o=0;if(f=!n.detectDuplicates,c=!n.sortStable&&e.slice(0),e.sort(D),f){while(t=e[o++])t===e[o]&&(i=r.push(o));while(i--)e.splice(r[i],1)}return c=null,e},i=oe.getText=function(e){var t,n="",r=0,o=e.nodeType;if(o){if(1===o||9===o||11===o){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=i(e)}else if(3===o||4===o)return e.nodeValue}else while(t=e[r++])n+=i(t);return n},(r=oe.selectors={cacheLength:50,createPseudo:se,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(Z,ee),e[3]=(e[3]||e[4]||e[5]||"").replace(Z,ee),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||oe.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&oe.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return V.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=a(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(Z,ee).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=E[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&E(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=oe.attr(r,e);return null==i?"!="===t:!t||(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i.replace($," ")+" ").indexOf(n)>-1:"|="===t&&(i===n||i.slice(0,n.length+1)===n+"-"))}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,f,p,d,h,g=o!==a?"nextSibling":"previousSibling",y=t.parentNode,v=s&&t.nodeName.toLowerCase(),m=!u&&!s,x=!1;if(y){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===v:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?y.firstChild:y.lastChild],a&&m){x=(d=(l=(c=(f=(p=y)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1])&&l[2],p=d&&y.childNodes[d];while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if(1===p.nodeType&&++x&&p===t){c[e]=[T,d,x];break}}else if(m&&(x=d=(l=(c=(f=(p=t)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1]),!1===x)while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===v:1===p.nodeType)&&++x&&(m&&((c=(f=p[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]=[T,x]),p===t))break;return(x-=i)===r||x%r==0&&x/r>=0}}},PSEUDO:function(e,t){var n,i=r.pseudos[e]||r.setFilters[e.toLowerCase()]||oe.error("unsupported pseudo: "+e);return i[b]?i(t):i.length>1?(n=[e,e,"",t],r.setFilters.hasOwnProperty(e.toLowerCase())?se(function(e,n){var r,o=i(e,t),a=o.length;while(a--)e[r=O(e,o[a])]=!(n[r]=o[a])}):function(e){return i(e,0,n)}):i}},pseudos:{not:se(function(e){var t=[],n=[],r=s(e.replace(B,"$1"));return r[b]?se(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),t[0]=null,!n.pop()}}),has:se(function(e){return function(t){return oe(e,t).length>0}}),contains:se(function(e){return e=e.replace(Z,ee),function(t){return(t.textContent||t.innerText||i(t)).indexOf(e)>-1}}),lang:se(function(e){return U.test(e||"")||oe.error("unsupported lang: "+e),e=e.replace(Z,ee).toLowerCase(),function(t){var n;do{if(n=g?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return(n=n.toLowerCase())===e||0===n.indexOf(e+"-")}while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===h},focus:function(e){return e===d.activeElement&&(!d.hasFocus||d.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:de(!1),disabled:de(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!r.pseudos.empty(e)},header:function(e){return Y.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:he(function(){return[0]}),last:he(function(e,t){return[t-1]}),eq:he(function(e,t,n){return[n<0?n+t:n]}),even:he(function(e,t){for(var n=0;n=0;)e.push(r);return e}),gt:he(function(e,t,n){for(var r=n<0?n+t:n;++r1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function be(e,t,n){for(var r=0,i=t.length;r-1&&(o[l]=!(a[l]=f))}}else v=we(v===a?v.splice(h,v.length):v),i?i(null,a,v,u):L.apply(a,v)})}function Ce(e){for(var t,n,i,o=e.length,a=r.relative[e[0].type],s=a||r.relative[" "],u=a?1:0,c=me(function(e){return e===t},s,!0),f=me(function(e){return O(t,e)>-1},s,!0),p=[function(e,n,r){var i=!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):f(e,n,r));return t=null,i}];u1&&xe(p),u>1&&ve(e.slice(0,u-1).concat({value:" "===e[u-2].type?"*":""})).replace(B,"$1"),n,u0,i=e.length>0,o=function(o,a,s,u,c){var f,h,y,v=0,m="0",x=o&&[],b=[],w=l,C=o||i&&r.find.TAG("*",c),E=T+=null==w?1:Math.random()||.1,k=C.length;for(c&&(l=a===d||a||c);m!==k&&null!=(f=C[m]);m++){if(i&&f){h=0,a||f.ownerDocument===d||(p(f),s=!g);while(y=e[h++])if(y(f,a||d,s)){u.push(f);break}c&&(T=E)}n&&((f=!y&&f)&&v--,o&&x.push(f))}if(v+=m,n&&m!==v){h=0;while(y=t[h++])y(x,b,a,s);if(o){if(v>0)while(m--)x[m]||b[m]||(b[m]=j.call(u));b=we(b)}L.apply(u,b),c&&!o&&b.length>0&&v+t.length>1&&oe.uniqueSort(u)}return c&&(T=E,l=w),x};return n?se(o):o}return s=oe.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=a(e)),n=t.length;while(n--)(o=Ce(t[n]))[b]?r.push(o):i.push(o);(o=S(e,Ee(i,r))).selector=e}return o},u=oe.select=function(e,t,n,i){var o,u,l,c,f,p="function"==typeof e&&e,d=!i&&a(e=p.selector||e);if(n=n||[],1===d.length){if((u=d[0]=d[0].slice(0)).length>2&&"ID"===(l=u[0]).type&&9===t.nodeType&&g&&r.relative[u[1].type]){if(!(t=(r.find.ID(l.matches[0].replace(Z,ee),t)||[])[0]))return n;p&&(t=t.parentNode),e=e.slice(u.shift().value.length)}o=V.needsContext.test(e)?0:u.length;while(o--){if(l=u[o],r.relative[c=l.type])break;if((f=r.find[c])&&(i=f(l.matches[0].replace(Z,ee),K.test(u[0].type)&&ge(t.parentNode)||t))){if(u.splice(o,1),!(e=i.length&&ve(u)))return L.apply(n,i),n;break}}}return(p||s(e,d))(i,t,!g,n,!t||K.test(e)&&ge(t.parentNode)||t),n},n.sortStable=b.split("").sort(D).join("")===b,n.detectDuplicates=!!f,p(),n.sortDetached=ue(function(e){return 1&e.compareDocumentPosition(d.createElement("fieldset"))}),ue(function(e){return e.innerHTML="","#"===e.firstChild.getAttribute("href")})||le("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),n.attributes&&ue(function(e){return e.innerHTML="",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||le("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ue(function(e){return null==e.getAttribute("disabled")})||le(P,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),oe}(e);w.find=E,w.expr=E.selectors,w.expr[":"]=w.expr.pseudos,w.uniqueSort=w.unique=E.uniqueSort,w.text=E.getText,w.isXMLDoc=E.isXML,w.contains=E.contains,w.escapeSelector=E.escape;var k=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&w(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},D=w.expr.match.needsContext;function N(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var A=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,t,n){return g(t)?w.grep(e,function(e,r){return!!t.call(e,r,e)!==n}):t.nodeType?w.grep(e,function(e){return e===t!==n}):"string"!=typeof t?w.grep(e,function(e){return u.call(t,e)>-1!==n}):w.filter(t,e,n)}w.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?w.find.matchesSelector(r,e)?[r]:[]:w.find.matches(e,w.grep(t,function(e){return 1===e.nodeType}))},w.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(w(e).filter(function(){for(t=0;t1?w.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&D.test(e)?w(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(w.fn.init=function(e,t,n){var i,o;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(i="<"===e[0]&&">"===e[e.length-1]&&e.length>=3?[null,e,null]:L.exec(e))||!i[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(i[1]){if(t=t instanceof w?t[0]:t,w.merge(this,w.parseHTML(i[1],t&&t.nodeType?t.ownerDocument||t:r,!0)),A.test(i[1])&&w.isPlainObject(t))for(i in t)g(this[i])?this[i](t[i]):this.attr(i,t[i]);return this}return(o=r.getElementById(i[2]))&&(this[0]=o,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):g(e)?void 0!==n.ready?n.ready(e):e(w):w.makeArray(e,this)}).prototype=w.fn,q=w(r);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};w.fn.extend({has:function(e){var t=w(e,this),n=t.length;return this.filter(function(){for(var e=0;e-1:1===n.nodeType&&w.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(o.length>1?w.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?u.call(w(e),this[0]):u.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(w.uniqueSort(w.merge(this.get(),w(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}});function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}w.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return k(e,"parentNode")},parentsUntil:function(e,t,n){return k(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return k(e,"nextSibling")},prevAll:function(e){return k(e,"previousSibling")},nextUntil:function(e,t,n){return k(e,"nextSibling",n)},prevUntil:function(e,t,n){return k(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return N(e,"iframe")?e.contentDocument:(N(e,"template")&&(e=e.content||e),w.merge([],e.childNodes))}},function(e,t){w.fn[e]=function(n,r){var i=w.map(this,t,n);return"Until"!==e.slice(-5)&&(r=n),r&&"string"==typeof r&&(i=w.filter(r,i)),this.length>1&&(O[e]||w.uniqueSort(i),H.test(e)&&i.reverse()),this.pushStack(i)}});var M=/[^\x20\t\r\n\f]+/g;function R(e){var t={};return w.each(e.match(M)||[],function(e,n){t[n]=!0}),t}w.Callbacks=function(e){e="string"==typeof e?R(e):w.extend({},e);var t,n,r,i,o=[],a=[],s=-1,u=function(){for(i=i||e.once,r=t=!0;a.length;s=-1){n=a.shift();while(++s-1)o.splice(n,1),n<=s&&s--}),this},has:function(e){return e?w.inArray(e,o)>-1:o.length>0},empty:function(){return o&&(o=[]),this},disable:function(){return i=a=[],o=n="",this},disabled:function(){return!o},lock:function(){return i=a=[],n||t||(o=n=""),this},locked:function(){return!!i},fireWith:function(e,n){return i||(n=[e,(n=n||[]).slice?n.slice():n],a.push(n),t||u()),this},fire:function(){return l.fireWith(this,arguments),this},fired:function(){return!!r}};return l};function I(e){return e}function W(e){throw e}function $(e,t,n,r){var i;try{e&&g(i=e.promise)?i.call(e).done(t).fail(n):e&&g(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}w.extend({Deferred:function(t){var n=[["notify","progress",w.Callbacks("memory"),w.Callbacks("memory"),2],["resolve","done",w.Callbacks("once memory"),w.Callbacks("once memory"),0,"resolved"],["reject","fail",w.Callbacks("once memory"),w.Callbacks("once memory"),1,"rejected"]],r="pending",i={state:function(){return r},always:function(){return o.done(arguments).fail(arguments),this},"catch":function(e){return i.then(null,e)},pipe:function(){var e=arguments;return w.Deferred(function(t){w.each(n,function(n,r){var i=g(e[r[4]])&&e[r[4]];o[r[1]](function(){var e=i&&i.apply(this,arguments);e&&g(e.promise)?e.promise().progress(t.notify).done(t.resolve).fail(t.reject):t[r[0]+"With"](this,i?[e]:arguments)})}),e=null}).promise()},then:function(t,r,i){var o=0;function a(t,n,r,i){return function(){var s=this,u=arguments,l=function(){var e,l;if(!(t=o&&(r!==W&&(s=void 0,u=[e]),n.rejectWith(s,u))}};t?c():(w.Deferred.getStackHook&&(c.stackTrace=w.Deferred.getStackHook()),e.setTimeout(c))}}return w.Deferred(function(e){n[0][3].add(a(0,e,g(i)?i:I,e.notifyWith)),n[1][3].add(a(0,e,g(t)?t:I)),n[2][3].add(a(0,e,g(r)?r:W))}).promise()},promise:function(e){return null!=e?w.extend(e,i):i}},o={};return w.each(n,function(e,t){var a=t[2],s=t[5];i[t[1]]=a.add,s&&a.add(function(){r=s},n[3-e][2].disable,n[3-e][3].disable,n[0][2].lock,n[0][3].lock),a.add(t[3].fire),o[t[0]]=function(){return o[t[0]+"With"](this===o?void 0:this,arguments),this},o[t[0]+"With"]=a.fireWith}),i.promise(o),t&&t.call(o,o),o},when:function(e){var t=arguments.length,n=t,r=Array(n),i=o.call(arguments),a=w.Deferred(),s=function(e){return function(n){r[e]=this,i[e]=arguments.length>1?o.call(arguments):n,--t||a.resolveWith(r,i)}};if(t<=1&&($(e,a.done(s(n)).resolve,a.reject,!t),"pending"===a.state()||g(i[n]&&i[n].then)))return a.then();while(n--)$(i[n],s(n),a.reject);return a.promise()}});var B=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;w.Deferred.exceptionHook=function(t,n){e.console&&e.console.warn&&t&&B.test(t.name)&&e.console.warn("jQuery.Deferred exception: "+t.message,t.stack,n)},w.readyException=function(t){e.setTimeout(function(){throw t})};var F=w.Deferred();w.fn.ready=function(e){return F.then(e)["catch"](function(e){w.readyException(e)}),this},w.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--w.readyWait:w.isReady)||(w.isReady=!0,!0!==e&&--w.readyWait>0||F.resolveWith(r,[w]))}}),w.ready.then=F.then;function _(){r.removeEventListener("DOMContentLoaded",_),e.removeEventListener("load",_),w.ready()}"complete"===r.readyState||"loading"!==r.readyState&&!r.documentElement.doScroll?e.setTimeout(w.ready):(r.addEventListener("DOMContentLoaded",_),e.addEventListener("load",_));var z=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===x(n)){i=!0;for(s in n)z(e,t,s,n[s],!0,o,a)}else if(void 0!==r&&(i=!0,g(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(w(e),n)})),t))for(;s1,null,!0)},removeData:function(e){return this.each(function(){K.remove(this,e)})}}),w.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=J.get(e,t),n&&(!r||Array.isArray(n)?r=J.access(e,t,w.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=w.queue(e,t),r=n.length,i=n.shift(),o=w._queueHooks(e,t),a=function(){w.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return J.get(e,n)||J.access(e,n,{empty:w.Callbacks("once memory").add(function(){J.remove(e,[t+"queue",n])})})}}),w.fn.extend({queue:function(e,t){var n=2;return"string"!=typeof e&&(t=e,e="fx",n--),arguments.length\x20\t\r\n\f]+)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"
    ","
    "],col:[2,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],_default:[0,"",""]};ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;function ye(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&N(e,t)?w.merge([e],n):n}function ve(e,t){for(var n=0,r=e.length;n-1)i&&i.push(o);else if(l=w.contains(o.ownerDocument,o),a=ye(f.appendChild(o),"script"),l&&ve(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}!function(){var e=r.createDocumentFragment().appendChild(r.createElement("div")),t=r.createElement("input");t.setAttribute("type","radio"),t.setAttribute("checked","checked"),t.setAttribute("name","t"),e.appendChild(t),h.checkClone=e.cloneNode(!0).cloneNode(!0).lastChild.checked,e.innerHTML="",h.noCloneChecked=!!e.cloneNode(!0).lastChild.defaultValue}();var be=r.documentElement,we=/^key/,Te=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ce=/^([^.]*)(?:\.(.+)|)/;function Ee(){return!0}function ke(){return!1}function Se(){try{return r.activeElement}catch(e){}}function De(e,t,n,r,i,o){var a,s;if("object"==typeof t){"string"!=typeof n&&(r=r||n,n=void 0);for(s in t)De(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=ke;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return w().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=w.guid++)),e.each(function(){w.event.add(this,t,i,r,n)})}w.event={global:{},add:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.get(e);if(y){n.handler&&(n=(o=n).handler,i=o.selector),i&&w.find.matchesSelector(be,i),n.guid||(n.guid=w.guid++),(u=y.events)||(u=y.events={}),(a=y.handle)||(a=y.handle=function(t){return"undefined"!=typeof w&&w.event.triggered!==t.type?w.event.dispatch.apply(e,arguments):void 0}),l=(t=(t||"").match(M)||[""]).length;while(l--)d=g=(s=Ce.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=w.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=w.event.special[d]||{},c=w.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&w.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(e,r,h,a)||e.addEventListener&&e.addEventListener(d,a)),f.add&&(f.add.call(e,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),w.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.hasData(e)&&J.get(e);if(y&&(u=y.events)){l=(t=(t||"").match(M)||[""]).length;while(l--)if(s=Ce.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){f=w.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,y.handle)||w.removeEvent(e,d,y.handle),delete u[d])}else for(d in u)w.event.remove(e,d+t[l],n,r,!0);w.isEmptyObject(u)&&J.remove(e,"handle events")}},dispatch:function(e){var t=w.event.fix(e),n,r,i,o,a,s,u=new Array(arguments.length),l=(J.get(this,"events")||{})[t.type]||[],c=w.event.special[t.type]||{};for(u[0]=t,n=1;n=1))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n-1:w.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u\x20\t\r\n\f]*)[^>]*)\/>/gi,Ae=/\s*$/g;function Le(e,t){return N(e,"table")&&N(11!==t.nodeType?t:t.firstChild,"tr")?w(e).children("tbody")[0]||e:e}function He(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Oe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Pe(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(J.hasData(e)&&(o=J.access(e),a=J.set(t,o),l=o.events)){delete a.handle,a.events={};for(i in l)for(n=0,r=l[i].length;n1&&"string"==typeof y&&!h.checkClone&&je.test(y))return e.each(function(i){var o=e.eq(i);v&&(t[0]=y.call(this,i,o.html())),Re(o,t,n,r)});if(p&&(i=xe(t,e[0].ownerDocument,!1,e,r),o=i.firstChild,1===i.childNodes.length&&(i=o),o||r)){for(u=(s=w.map(ye(i,"script"),He)).length;f")},clone:function(e,t,n){var r,i,o,a,s=e.cloneNode(!0),u=w.contains(e.ownerDocument,e);if(!(h.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||w.isXMLDoc(e)))for(a=ye(s),r=0,i=(o=ye(e)).length;r0&&ve(a,!u&&ye(e,"script")),s},cleanData:function(e){for(var t,n,r,i=w.event.special,o=0;void 0!==(n=e[o]);o++)if(Y(n)){if(t=n[J.expando]){if(t.events)for(r in t.events)i[r]?w.event.remove(n,r):w.removeEvent(n,r,t.handle);n[J.expando]=void 0}n[K.expando]&&(n[K.expando]=void 0)}}}),w.fn.extend({detach:function(e){return Ie(this,e,!0)},remove:function(e){return Ie(this,e)},text:function(e){return z(this,function(e){return void 0===e?w.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Re(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Le(this,e).appendChild(e)})},prepend:function(){return Re(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Le(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(w.cleanData(ye(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return w.clone(this,e,t)})},html:function(e){return z(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Ae.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=w.htmlPrefilter(e);try{for(;n=0&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))),u}function et(e,t,n){var r=$e(e),i=Fe(e,t,r),o="border-box"===w.css(e,"boxSizing",!1,r),a=o;if(We.test(i)){if(!n)return i;i="auto"}return a=a&&(h.boxSizingReliable()||i===e.style[t]),("auto"===i||!parseFloat(i)&&"inline"===w.css(e,"display",!1,r))&&(i=e["offset"+t[0].toUpperCase()+t.slice(1)],a=!0),(i=parseFloat(i)||0)+Ze(e,t,n||(o?"border":"content"),a,r,i)+"px"}w.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Fe(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=G(t),u=Xe.test(t),l=e.style;if(u||(t=Je(s)),a=w.cssHooks[t]||w.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"==(o=typeof n)&&(i=ie.exec(n))&&i[1]&&(n=ue(e,t,i),o="number"),null!=n&&n===n&&("number"===o&&(n+=i&&i[3]||(w.cssNumber[s]?"":"px")),h.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=G(t);return Xe.test(t)||(t=Je(s)),(a=w.cssHooks[t]||w.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=Fe(e,t,r)),"normal"===i&&t in Ve&&(i=Ve[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),w.each(["height","width"],function(e,t){w.cssHooks[t]={get:function(e,n,r){if(n)return!ze.test(w.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?et(e,t,r):se(e,Ue,function(){return et(e,t,r)})},set:function(e,n,r){var i,o=$e(e),a="border-box"===w.css(e,"boxSizing",!1,o),s=r&&Ze(e,t,r,a,o);return a&&h.scrollboxSize()===o.position&&(s-=Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-parseFloat(o[t])-Ze(e,t,"border",!1,o)-.5)),s&&(i=ie.exec(n))&&"px"!==(i[3]||"px")&&(e.style[t]=n,n=w.css(e,t)),Ke(e,n,s)}}}),w.cssHooks.marginLeft=_e(h.reliableMarginLeft,function(e,t){if(t)return(parseFloat(Fe(e,"marginLeft"))||e.getBoundingClientRect().left-se(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),w.each({margin:"",padding:"",border:"Width"},function(e,t){w.cssHooks[e+t]={expand:function(n){for(var r=0,i={},o="string"==typeof n?n.split(" "):[n];r<4;r++)i[e+oe[r]+t]=o[r]||o[r-2]||o[0];return i}},"margin"!==e&&(w.cssHooks[e+t].set=Ke)}),w.fn.extend({css:function(e,t){return z(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=$e(e),i=t.length;a1)}});function tt(e,t,n,r,i){return new tt.prototype.init(e,t,n,r,i)}w.Tween=tt,tt.prototype={constructor:tt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||w.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(w.cssNumber[n]?"":"px")},cur:function(){var e=tt.propHooks[this.prop];return e&&e.get?e.get(this):tt.propHooks._default.get(this)},run:function(e){var t,n=tt.propHooks[this.prop];return this.options.duration?this.pos=t=w.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):tt.propHooks._default.set(this),this}},tt.prototype.init.prototype=tt.prototype,tt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=w.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){w.fx.step[e.prop]?w.fx.step[e.prop](e):1!==e.elem.nodeType||null==e.elem.style[w.cssProps[e.prop]]&&!w.cssHooks[e.prop]?e.elem[e.prop]=e.now:w.style(e.elem,e.prop,e.now+e.unit)}}},tt.propHooks.scrollTop=tt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},w.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},w.fx=tt.prototype.init,w.fx.step={};var nt,rt,it=/^(?:toggle|show|hide)$/,ot=/queueHooks$/;function at(){rt&&(!1===r.hidden&&e.requestAnimationFrame?e.requestAnimationFrame(at):e.setTimeout(at,w.fx.interval),w.fx.tick())}function st(){return e.setTimeout(function(){nt=void 0}),nt=Date.now()}function ut(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=oe[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function lt(e,t,n){for(var r,i=(pt.tweeners[t]||[]).concat(pt.tweeners["*"]),o=0,a=i.length;o1)},removeAttr:function(e){return this.each(function(){w.removeAttr(this,e)})}}),w.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?w.prop(e,t,n):(1===o&&w.isXMLDoc(e)||(i=w.attrHooks[t.toLowerCase()]||(w.expr.match.bool.test(t)?dt:void 0)),void 0!==n?null===n?void w.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=w.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!h.radioValue&&"radio"===t&&N(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(M);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),dt={set:function(e,t,n){return!1===t?w.removeAttr(e,n):e.setAttribute(n,n),n}},w.each(w.expr.match.bool.source.match(/\w+/g),function(e,t){var n=ht[t]||w.find.attr;ht[t]=function(e,t,r){var i,o,a=t.toLowerCase();return r||(o=ht[a],ht[a]=i,i=null!=n(e,t,r)?a:null,ht[a]=o),i}});var gt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;w.fn.extend({prop:function(e,t){return z(this,w.prop,e,t,arguments.length>1)},removeProp:function(e){return this.each(function(){delete this[w.propFix[e]||e]})}}),w.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&w.isXMLDoc(e)||(t=w.propFix[t]||t,i=w.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=w.find.attr(e,"tabindex");return t?parseInt(t,10):gt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),h.optSelected||(w.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),w.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){w.propFix[this.toLowerCase()]=this});function vt(e){return(e.match(M)||[]).join(" ")}function mt(e){return e.getAttribute&&e.getAttribute("class")||""}function xt(e){return Array.isArray(e)?e:"string"==typeof e?e.match(M)||[]:[]}w.fn.extend({addClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).addClass(e.call(this,t,mt(this)))});if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},removeClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).removeClass(e.call(this,t,mt(this)))});if(!arguments.length)return this.attr("class","");if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])while(r.indexOf(" "+o+" ")>-1)r=r.replace(" "+o+" "," ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(e,t){var n=typeof e,r="string"===n||Array.isArray(e);return"boolean"==typeof t&&r?t?this.addClass(e):this.removeClass(e):g(e)?this.each(function(n){w(this).toggleClass(e.call(this,n,mt(this),t),t)}):this.each(function(){var t,i,o,a;if(r){i=0,o=w(this),a=xt(e);while(t=a[i++])o.hasClass(t)?o.removeClass(t):o.addClass(t)}else void 0!==e&&"boolean"!==n||((t=mt(this))&&J.set(this,"__className__",t),this.setAttribute&&this.setAttribute("class",t||!1===e?"":J.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&(" "+vt(mt(n))+" ").indexOf(t)>-1)return!0;return!1}});var bt=/\r/g;w.fn.extend({val:function(e){var t,n,r,i=this[0];{if(arguments.length)return r=g(e),this.each(function(n){var i;1===this.nodeType&&(null==(i=r?e.call(this,n,w(this).val()):e)?i="":"number"==typeof i?i+="":Array.isArray(i)&&(i=w.map(i,function(e){return null==e?"":e+""})),(t=w.valHooks[this.type]||w.valHooks[this.nodeName.toLowerCase()])&&"set"in t&&void 0!==t.set(this,i,"value")||(this.value=i))});if(i)return(t=w.valHooks[i.type]||w.valHooks[i.nodeName.toLowerCase()])&&"get"in t&&void 0!==(n=t.get(i,"value"))?n:"string"==typeof(n=i.value)?n.replace(bt,""):null==n?"":n}}}),w.extend({valHooks:{option:{get:function(e){var t=w.find.attr(e,"value");return null!=t?t:vt(w.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r-1)&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),w.each(["radio","checkbox"],function(){w.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=w.inArray(w(e).val(),t)>-1}},h.checkOn||(w.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),h.focusin="onfocusin"in e;var wt=/^(?:focusinfocus|focusoutblur)$/,Tt=function(e){e.stopPropagation()};w.extend(w.event,{trigger:function(t,n,i,o){var a,s,u,l,c,p,d,h,v=[i||r],m=f.call(t,"type")?t.type:t,x=f.call(t,"namespace")?t.namespace.split("."):[];if(s=h=u=i=i||r,3!==i.nodeType&&8!==i.nodeType&&!wt.test(m+w.event.triggered)&&(m.indexOf(".")>-1&&(m=(x=m.split(".")).shift(),x.sort()),c=m.indexOf(":")<0&&"on"+m,t=t[w.expando]?t:new w.Event(m,"object"==typeof t&&t),t.isTrigger=o?2:3,t.namespace=x.join("."),t.rnamespace=t.namespace?new RegExp("(^|\\.)"+x.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,t.result=void 0,t.target||(t.target=i),n=null==n?[t]:w.makeArray(n,[t]),d=w.event.special[m]||{},o||!d.trigger||!1!==d.trigger.apply(i,n))){if(!o&&!d.noBubble&&!y(i)){for(l=d.delegateType||m,wt.test(l+m)||(s=s.parentNode);s;s=s.parentNode)v.push(s),u=s;u===(i.ownerDocument||r)&&v.push(u.defaultView||u.parentWindow||e)}a=0;while((s=v[a++])&&!t.isPropagationStopped())h=s,t.type=a>1?l:d.bindType||m,(p=(J.get(s,"events")||{})[t.type]&&J.get(s,"handle"))&&p.apply(s,n),(p=c&&s[c])&&p.apply&&Y(s)&&(t.result=p.apply(s,n),!1===t.result&&t.preventDefault());return t.type=m,o||t.isDefaultPrevented()||d._default&&!1!==d._default.apply(v.pop(),n)||!Y(i)||c&&g(i[m])&&!y(i)&&((u=i[c])&&(i[c]=null),w.event.triggered=m,t.isPropagationStopped()&&h.addEventListener(m,Tt),i[m](),t.isPropagationStopped()&&h.removeEventListener(m,Tt),w.event.triggered=void 0,u&&(i[c]=u)),t.result}},simulate:function(e,t,n){var r=w.extend(new w.Event,n,{type:e,isSimulated:!0});w.event.trigger(r,null,t)}}),w.fn.extend({trigger:function(e,t){return this.each(function(){w.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return w.event.trigger(e,t,n,!0)}}),h.focusin||w.each({focus:"focusin",blur:"focusout"},function(e,t){var n=function(e){w.event.simulate(t,e.target,w.event.fix(e))};w.event.special[t]={setup:function(){var r=this.ownerDocument||this,i=J.access(r,t);i||r.addEventListener(e,n,!0),J.access(r,t,(i||0)+1)},teardown:function(){var r=this.ownerDocument||this,i=J.access(r,t)-1;i?J.access(r,t,i):(r.removeEventListener(e,n,!0),J.remove(r,t))}}});var Ct=e.location,Et=Date.now(),kt=/\?/;w.parseXML=function(t){var n;if(!t||"string"!=typeof t)return null;try{n=(new e.DOMParser).parseFromString(t,"text/xml")}catch(e){n=void 0}return n&&!n.getElementsByTagName("parsererror").length||w.error("Invalid XML: "+t),n};var St=/\[\]$/,Dt=/\r?\n/g,Nt=/^(?:submit|button|image|reset|file)$/i,At=/^(?:input|select|textarea|keygen)/i;function jt(e,t,n,r){var i;if(Array.isArray(t))w.each(t,function(t,i){n||St.test(e)?r(e,i):jt(e+"["+("object"==typeof i&&null!=i?t:"")+"]",i,n,r)});else if(n||"object"!==x(t))r(e,t);else for(i in t)jt(e+"["+i+"]",t[i],n,r)}w.param=function(e,t){var n,r=[],i=function(e,t){var n=g(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(Array.isArray(e)||e.jquery&&!w.isPlainObject(e))w.each(e,function(){i(this.name,this.value)});else for(n in e)jt(n,e[n],t,i);return r.join("&")},w.fn.extend({serialize:function(){return w.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=w.prop(this,"elements");return e?w.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!w(this).is(":disabled")&&At.test(this.nodeName)&&!Nt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=w(this).val();return null==n?null:Array.isArray(n)?w.map(n,function(e){return{name:t.name,value:e.replace(Dt,"\r\n")}}):{name:t.name,value:n.replace(Dt,"\r\n")}}).get()}});var qt=/%20/g,Lt=/#.*$/,Ht=/([?&])_=[^&]*/,Ot=/^(.*?):[ \t]*([^\r\n]*)$/gm,Pt=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Mt=/^(?:GET|HEAD)$/,Rt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Bt=r.createElement("a");Bt.href=Ct.href;function Ft(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(M)||[];if(g(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function _t(e,t,n,r){var i={},o=e===Wt;function a(s){var u;return i[s]=!0,w.each(e[s]||[],function(e,s){var l=s(t,n,r);return"string"!=typeof l||o||i[l]?o?!(u=l):void 0:(t.dataTypes.unshift(l),a(l),!1)}),u}return a(t.dataTypes[0])||!i["*"]&&a("*")}function zt(e,t){var n,r,i=w.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&w.extend(!0,e,r),e}function Xt(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}function Ut(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}w.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Ct.href,type:"GET",isLocal:Pt.test(Ct.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":w.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,w.ajaxSettings),t):zt(w.ajaxSettings,e)},ajaxPrefilter:Ft(It),ajaxTransport:Ft(Wt),ajax:function(t,n){"object"==typeof t&&(n=t,t=void 0),n=n||{};var i,o,a,s,u,l,c,f,p,d,h=w.ajaxSetup({},n),g=h.context||h,y=h.context&&(g.nodeType||g.jquery)?w(g):w.event,v=w.Deferred(),m=w.Callbacks("once memory"),x=h.statusCode||{},b={},T={},C="canceled",E={readyState:0,getResponseHeader:function(e){var t;if(c){if(!s){s={};while(t=Ot.exec(a))s[t[1].toLowerCase()]=t[2]}t=s[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return c?a:null},setRequestHeader:function(e,t){return null==c&&(e=T[e.toLowerCase()]=T[e.toLowerCase()]||e,b[e]=t),this},overrideMimeType:function(e){return null==c&&(h.mimeType=e),this},statusCode:function(e){var t;if(e)if(c)E.always(e[E.status]);else for(t in e)x[t]=[x[t],e[t]];return this},abort:function(e){var t=e||C;return i&&i.abort(t),k(0,t),this}};if(v.promise(E),h.url=((t||h.url||Ct.href)+"").replace(Rt,Ct.protocol+"//"),h.type=n.method||n.type||h.method||h.type,h.dataTypes=(h.dataType||"*").toLowerCase().match(M)||[""],null==h.crossDomain){l=r.createElement("a");try{l.href=h.url,l.href=l.href,h.crossDomain=Bt.protocol+"//"+Bt.host!=l.protocol+"//"+l.host}catch(e){h.crossDomain=!0}}if(h.data&&h.processData&&"string"!=typeof h.data&&(h.data=w.param(h.data,h.traditional)),_t(It,h,n,E),c)return E;(f=w.event&&h.global)&&0==w.active++&&w.event.trigger("ajaxStart"),h.type=h.type.toUpperCase(),h.hasContent=!Mt.test(h.type),o=h.url.replace(Lt,""),h.hasContent?h.data&&h.processData&&0===(h.contentType||"").indexOf("application/x-www-form-urlencoded")&&(h.data=h.data.replace(qt,"+")):(d=h.url.slice(o.length),h.data&&(h.processData||"string"==typeof h.data)&&(o+=(kt.test(o)?"&":"?")+h.data,delete h.data),!1===h.cache&&(o=o.replace(Ht,"$1"),d=(kt.test(o)?"&":"?")+"_="+Et+++d),h.url=o+d),h.ifModified&&(w.lastModified[o]&&E.setRequestHeader("If-Modified-Since",w.lastModified[o]),w.etag[o]&&E.setRequestHeader("If-None-Match",w.etag[o])),(h.data&&h.hasContent&&!1!==h.contentType||n.contentType)&&E.setRequestHeader("Content-Type",h.contentType),E.setRequestHeader("Accept",h.dataTypes[0]&&h.accepts[h.dataTypes[0]]?h.accepts[h.dataTypes[0]]+("*"!==h.dataTypes[0]?", "+$t+"; q=0.01":""):h.accepts["*"]);for(p in h.headers)E.setRequestHeader(p,h.headers[p]);if(h.beforeSend&&(!1===h.beforeSend.call(g,E,h)||c))return E.abort();if(C="abort",m.add(h.complete),E.done(h.success),E.fail(h.error),i=_t(Wt,h,n,E)){if(E.readyState=1,f&&y.trigger("ajaxSend",[E,h]),c)return E;h.async&&h.timeout>0&&(u=e.setTimeout(function(){E.abort("timeout")},h.timeout));try{c=!1,i.send(b,k)}catch(e){if(c)throw e;k(-1,e)}}else k(-1,"No Transport");function k(t,n,r,s){var l,p,d,b,T,C=n;c||(c=!0,u&&e.clearTimeout(u),i=void 0,a=s||"",E.readyState=t>0?4:0,l=t>=200&&t<300||304===t,r&&(b=Xt(h,E,r)),b=Ut(h,b,E,l),l?(h.ifModified&&((T=E.getResponseHeader("Last-Modified"))&&(w.lastModified[o]=T),(T=E.getResponseHeader("etag"))&&(w.etag[o]=T)),204===t||"HEAD"===h.type?C="nocontent":304===t?C="notmodified":(C=b.state,p=b.data,l=!(d=b.error))):(d=C,!t&&C||(C="error",t<0&&(t=0))),E.status=t,E.statusText=(n||C)+"",l?v.resolveWith(g,[p,C,E]):v.rejectWith(g,[E,C,d]),E.statusCode(x),x=void 0,f&&y.trigger(l?"ajaxSuccess":"ajaxError",[E,h,l?p:d]),m.fireWith(g,[E,C]),f&&(y.trigger("ajaxComplete",[E,h]),--w.active||w.event.trigger("ajaxStop")))}return E},getJSON:function(e,t,n){return w.get(e,t,n,"json")},getScript:function(e,t){return w.get(e,void 0,t,"script")}}),w.each(["get","post"],function(e,t){w[t]=function(e,n,r,i){return g(n)&&(i=i||r,r=n,n=void 0),w.ajax(w.extend({url:e,type:t,dataType:i,data:n,success:r},w.isPlainObject(e)&&e))}}),w._evalUrl=function(e){return w.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},w.fn.extend({wrapAll:function(e){var t;return this[0]&&(g(e)&&(e=e.call(this[0])),t=w(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(e){return g(e)?this.each(function(t){w(this).wrapInner(e.call(this,t))}):this.each(function(){var t=w(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=g(e);return this.each(function(n){w(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(e){return this.parent(e).not("body").each(function(){w(this).replaceWith(this.childNodes)}),this}}),w.expr.pseudos.hidden=function(e){return!w.expr.pseudos.visible(e)},w.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},w.ajaxSettings.xhr=function(){try{return new e.XMLHttpRequest}catch(e){}};var Vt={0:200,1223:204},Gt=w.ajaxSettings.xhr();h.cors=!!Gt&&"withCredentials"in Gt,h.ajax=Gt=!!Gt,w.ajaxTransport(function(t){var n,r;if(h.cors||Gt&&!t.crossDomain)return{send:function(i,o){var a,s=t.xhr();if(s.open(t.type,t.url,t.async,t.username,t.password),t.xhrFields)for(a in t.xhrFields)s[a]=t.xhrFields[a];t.mimeType&&s.overrideMimeType&&s.overrideMimeType(t.mimeType),t.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");for(a in i)s.setRequestHeader(a,i[a]);n=function(e){return function(){n&&(n=r=s.onload=s.onerror=s.onabort=s.ontimeout=s.onreadystatechange=null,"abort"===e?s.abort():"error"===e?"number"!=typeof s.status?o(0,"error"):o(s.status,s.statusText):o(Vt[s.status]||s.status,s.statusText,"text"!==(s.responseType||"text")||"string"!=typeof s.responseText?{binary:s.response}:{text:s.responseText},s.getAllResponseHeaders()))}},s.onload=n(),r=s.onerror=s.ontimeout=n("error"),void 0!==s.onabort?s.onabort=r:s.onreadystatechange=function(){4===s.readyState&&e.setTimeout(function(){n&&r()})},n=n("abort");try{s.send(t.hasContent&&t.data||null)}catch(e){if(n)throw e}},abort:function(){n&&n()}}}),w.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),w.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return w.globalEval(e),e}}}),w.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),w.ajaxTransport("script",function(e){if(e.crossDomain){var t,n;return{send:function(i,o){t=w(" + + + """.format(js_utils) + else: + with open(os.path.join(js_dir, 'jquery.min.js')) as f: + jquery = f.read() + with open(os.path.join(js_dir, 'plotly-gl3d-latest.min.js')) as f: + plotly = f.read() + js_lib = """ + + + + """.format(jquery, plotly, js_utils) + return html.replace('INSERT_JS_LIBRARIES_HERE', js_lib) + + +def get_html_template(template_name): + """Get an HTML file from package data""" + template_path = os.path.join( + os.path.dirname(__file__), 'data', 'html', template_name) + with open(template_path, 'rb') as f: + return f.read().decode('utf-8') + + +def _remove_after_n_seconds(file_name, n_seconds): + script = os.path.join(os.path.dirname(__file__), 'rm_file.py') + subprocess.Popen(['python', script, file_name, str(n_seconds)]) + + +class HTMLDocument(object): + """ + Embeds a plot in a web page. + + If you are running a Jupyter notebook, the plot will be displayed + inline if this object is the output of a cell. + Otherwise, use open_in_browser() to open it in a web browser (or + save_as_html("filename.html") to save it as an html file). + + use str(document) or document.html to get the content of the web page, + and document.get_iframe() to have it wrapped in an iframe. + + """ + _all_open_html_repr = weakref.WeakSet() + + def __init__(self, html, width=600, height=400): + self.html = html + self.width = width + self.height = height + self._temp_file = None + self._check_n_open() + + def _check_n_open(self): + HTMLDocument._all_open_html_repr.add(self) + if len(HTMLDocument._all_open_html_repr) > 9: + warnings.warn('It seems you have created more than 10 ' + 'nilearn views. As each view uses dozens ' + 'of megabytes of RAM, you might want to ' + 'delete some of them.') + + def resize(self, width, height): + """Resize the plot displayed in a Jupyter notebook.""" + self.width, self.height = width, height + return self + + def get_iframe(self, width=None, height=None): + """ + Get the document wrapped in an inline frame. + + For inserting in another HTML page of for display in a Jupyter + notebook. + + """ + if width is None: + width = self.width + if height is None: + height = self.height + escaped = cgi.escape(self.html, quote=True) + wrapped = ''.format( + escaped, width, height) + return wrapped + + def get_standalone(self): + """ Get the plot in an HTML page.""" + return self.html + + def _repr_html_(self): + """ + Used by the Jupyter notebook. + + Users normally won't call this method explicitely. + """ + return self.get_iframe() + + def __str__(self): + return self.html + + def save_as_html(self, file_name): + """ + Save the plot in an HTML file, that can later be opened in a browser. + """ + with open(file_name, 'wb') as f: + f.write(self.html.encode('utf-8')) + + def open_in_browser(self, file_name=None, temp_file_lifetime=30): + """ + Save the plot to a temporary HTML file and open it in a browser. + + Parameters + ---------- + + file_name : str, optional + .html file to use as temporary file + + temp_file_lifetime : float, optional (default=30.) + Time, in seconds, after which the temporary file is removed. + If None, it is never removed. + + """ + if file_name is None: + fd, file_name = tempfile.mkstemp('.html', 'nilearn_surface_plot_') + os.close(fd) + self.save_as_html(file_name) + self._temp_file = file_name + file_size = os.path.getsize(file_name) / 1e6 + if temp_file_lifetime is None: + print(("Saved HTML in temporary file: {}\n" + "file size is {:.1f}M, delete it when you're done, " + "for example by calling this.remove_temp_file").format( + file_name, file_size)) + else: + _remove_after_n_seconds(self._temp_file, temp_file_lifetime) + webbrowser.open('file://{}'.format(file_name)) + + def remove_temp_file(self): + """ + Remove the temporary file created by `open_in_browser`, if necessary. + """ + if self._temp_file is None: + return + if not os.path.isfile(self._temp_file): + return + os.remove(self._temp_file) + print('removed {}'.format(self._temp_file)) + self._temp_file = None + + +def colorscale(cmap, values, threshold=None, symmetric_cmap=True, vmax=None): + """Normalize a cmap, put it in plotly format, get threshold and range""" + cmap = mpl_cm.get_cmap(cmap) + abs_values = np.abs(values) + if not symmetric_cmap and (values.min() < 0): + warnings.warn('you have specified symmetric_cmap=False' + 'but the map contains negative values; ' + 'setting symmetric_cmap to True') + symmetric_cmap = True + if vmax is None: + if symmetric_cmap: + vmax = abs_values.max() + vmin = - vmax + else: + vmin, vmax = values.min(), values.max() + else: + vmin = -vmax if symmetric_cmap else 0 + norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) + cmaplist = [cmap(i) for i in range(cmap.N)] + abs_threshold = None + if threshold is not None: + abs_threshold = check_threshold(threshold, values, fast_abs_percentile) + istart = int(norm(-abs_threshold, clip=True) * (cmap.N - 1)) + istop = int(norm(abs_threshold, clip=True) * (cmap.N - 1)) + for i in range(istart, istop): + cmaplist[i] = (0.5, 0.5, 0.5, 1.) # just an average gray color + our_cmap = mpl.colors.LinearSegmentedColormap.from_list( + 'Custom cmap', cmaplist, cmap.N) + x = np.linspace(0, 1, 100) + rgb = our_cmap(x, bytes=True)[:, :3] + rgb = np.array(rgb, dtype=int) + colors = [] + for i, col in zip(x, rgb): + colors.append([np.round(i, 3), "rgb({}, {}, {})".format(*col)]) + return { + 'colors': colors, 'vmin': vmin, 'vmax': vmax, 'cmap': our_cmap, + 'norm': norm, 'abs_threshold': abs_threshold, + 'symmetric_cmap': symmetric_cmap + } + + +def encode(a): + """Base64 encode a numpy array""" + try: + data = a.tobytes() + except AttributeError: + # np < 1.9 + data = a.tostring() + return base64.b64encode(data).decode('utf-8') + + +def decode(b, dtype): + """Decode a numpy array encoded as Base64""" + return np.frombuffer(base64.b64decode(b.encode('utf-8')), dtype) + + +def mesh_to_plotly(mesh): + mesh = surface.load_surf_mesh(mesh) + x, y, z = map(encode, np.asarray(mesh[0].T, dtype='= 1.0.0. + + .. versionadded:: 0.4.1 + + kwargs : extra keyword arguments + Extra keyword arguments are sent to pylab.imshow + + Returns + ------- + display : instance of matplotlib + Axes image. + """ + if reorder: + if labels is None or labels is False: + raise ValueError("Labels are needed to show the reordering.") + try: + from scipy.cluster.hierarchy import (linkage, optimal_leaf_ordering, + leaves_list) + except ImportError: + raise ImportError("A scipy version of at least 1.0 is needed " + "for ordering the matrix with " + "optimal_leaf_ordering.") + valid_reorder_args = [True, 'single', 'complete', 'average'] + if reorder not in valid_reorder_args: + raise ValueError("Parameter reorder needs to be " + "one of {}.".format(valid_reorder_args)) + if reorder is True: + reorder = 'average' + linkage_matrix = linkage(mat, method=reorder) + ordered_linkage = optimal_leaf_ordering(linkage_matrix, mat) + index = leaves_list(ordered_linkage) + # make sure labels is an ndarray and copy it + labels = np.array(labels).copy() + mat = mat.copy() + # and reorder labels and matrix + labels = labels[index] + mat = mat[index, :][:, index] + + if tri == 'lower': + mask = np.tri(mat.shape[0], k=-1, dtype=np.bool) ^ True + mat = np.ma.masked_array(mat, mask) + elif tri == 'diag': + mask = np.tri(mat.shape[0], dtype=np.bool) ^ True + mat = np.ma.masked_array(mat, mask) + if axes is not None and figure is not None: + raise ValueError("Parameters figure and axes cannot be specified " + "together. You gave 'figure=%s, axes=%s'" + % (figure, axes)) + if figure is not None: + if isinstance(figure, plt.Figure): + fig = figure + else: + fig = plt.figure(figsize=figure) + axes = plt.gca() + own_fig = True + else: + if axes is None: + fig, axes = plt.subplots(1, 1, figsize=(7, 5)) + own_fig = True + else: + fig = axes.figure + own_fig = False + display = axes.imshow(mat, aspect='equal', interpolation='nearest', + cmap=cmap, **kwargs) + axes.set_autoscale_on(False) + ymin, ymax = axes.get_ylim() + if labels is False: + axes.xaxis.set_major_formatter(plt.NullFormatter()) + axes.yaxis.set_major_formatter(plt.NullFormatter()) + elif labels is not None: + axes.set_xticks(np.arange(len(labels))) + axes.set_xticklabels(labels, size='x-small') + for label in axes.get_xticklabels(): + label.set_ha('right') + label.set_rotation(50) + axes.set_yticks(np.arange(len(labels))) + axes.set_yticklabels(labels, size='x-small') + for label in axes.get_yticklabels(): + label.set_ha('right') + label.set_va('top') + label.set_rotation(10) + + if grid is not False: + size = len(mat) + # Different grids for different layouts + if tri == 'lower': + for i in range(size): + # Correct for weird mis-sizing + i = 1.001 * i + axes.plot([i + 0.5, i + 0.5], [size - 0.5, i + 0.5], + color='grey') + axes.plot([i + 0.5, -0.5], [i + 0.5, i + 0.5], + color='grey') + elif tri == 'diag': + for i in range(size): + # Correct for weird mis-sizing + i = 1.001 * i + axes.plot([i + 0.5, i + 0.5], [size - 0.5, i - 0.5], + color='grey') + axes.plot([i + 0.5, -0.5], [i - 0.5, i - 0.5], color='grey') + else: + for i in range(size): + # Correct for weird mis-sizing + i = 1.001 * i + axes.plot([i + 0.5, i + 0.5], [size - 0.5, -0.5], color='grey') + axes.plot([size - 0.5, -0.5], [i + 0.5, i + 0.5], color='grey') + + axes.set_ylim(ymin, ymax) + + if auto_fit: + if labels is not None and labels is not False: + fit_axes(axes) + elif own_fig: + plt.tight_layout(pad=.1, + rect=((0, 0, .95, 1) if colorbar + else (0, 0, 1, 1))) + + if colorbar: + cax, kw = make_axes(axes, location='right', fraction=0.05, shrink=0.8, + pad=.0) + fig.colorbar(mappable=display, cax=cax) + # make some room + fig.subplots_adjust(right=0.8) + # change current axis back to matrix + plt.sca(axes) + + if title is not None: + # Adjust the size + text_len = np.max([len(t) for t in title.split('\n')]) + size = axes.bbox.size[0] / text_len + axes.text(0.95, 0.95, title, + horizontalalignment='right', + verticalalignment='top', + transform=axes.transAxes, + size=size) + + return display diff --git a/nilearn/plotting/rm_file.py b/nilearn/plotting/rm_file.py new file mode 100644 index 0000000000..08d04894a2 --- /dev/null +++ b/nilearn/plotting/rm_file.py @@ -0,0 +1,23 @@ +""" +Remove a file after a certain time. This is run in a subprocess +by nilearn.plotting.html_surface.SurfaceView to remove the temporary +file it uses to open a plot in a web browser. + +""" +import os +import time +import warnings +import argparse + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('file_name', type=str) + parser.add_argument('n_seconds', type=float) + args = parser.parse_args() + + time.sleep(args.n_seconds) + if os.path.isfile(args.file_name): + try: + os.remove(args.file_name) + except Exception as e: + warnings.warn('failed to remove {}:\n{}'.format(args.file_name, e)) diff --git a/nilearn/plotting/surf_plotting.py b/nilearn/plotting/surf_plotting.py new file mode 100644 index 0000000000..befac5c6e4 --- /dev/null +++ b/nilearn/plotting/surf_plotting.py @@ -0,0 +1,541 @@ +""" +Functions for surface visualization. +Only matplotlib is required. +""" +import numpy as np + +import matplotlib.pyplot as plt + +from mpl_toolkits.mplot3d import Axes3D + +from matplotlib.colorbar import make_axes +from matplotlib.cm import ScalarMappable, get_cmap +from matplotlib.colors import Normalize, LinearSegmentedColormap + +from ..surface import load_surf_data, load_surf_mesh +from .._utils.compat import _basestring +from .img_plotting import _get_colorbar_and_data_ranges, _crop_colorbar + + +def plot_surf(surf_mesh, surf_map=None, bg_map=None, + hemi='left', view='lateral', cmap=None, colorbar=False, + avg_method='mean', threshold=None, alpha='auto', + bg_on_data=False, darkness=1, vmin=None, vmax=None, + cbar_vmin=None, cbar_vmax=None, + title=None, output_file=None, axes=None, figure=None, **kwargs): + """ Plotting of surfaces with optional background and data + + .. versionadded:: 0.3 + + Parameters + ---------- + surf_mesh: str or list of two numpy.ndarray + Surface mesh geometry, can be a file (valid formats are + .gii or Freesurfer specific files such as .orig, .pial, + .sphere, .white, .inflated) or + a list of two Numpy arrays, the first containing the x-y-z coordinates + of the mesh vertices, the second containing the indices + (into coords) of the mesh faces. + + surf_map: str or numpy.ndarray, optional. + Data to be displayed on the surface mesh. Can be a file (valid formats + are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific files such as + .thickness, .curv, .sulc, .annot, .label) or + a Numpy array + + bg_map: Surface data object (to be defined), optional, + Background image to be plotted on the mesh underneath the + surf_data in greyscale, most likely a sulcal depth map for + realistic shading. + + hemi : {'left', 'right'}, default is 'left' + Hemisphere to display. + + view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'}, default is 'lateral' + View of the surface that is rendered. + + cmap: matplotlib colormap, str or colormap object, default is None + To use for plotting of the stat_map. Either a string + which is a name of a matplotlib colormap, or a matplotlib + colormap object. If None, matplolib default will be chosen + + colorbar : bool, optional, default is False + If True, a colorbar of surf_map is displayed. + + avg_method: {'mean', 'median'}, default is 'mean' + How to average vertex values to derive the face value, mean results + in smooth, median in sharp boundaries. + + threshold : a number, None, or 'auto', default is None. + If None is given, the image is not thresholded. + If a number is given, it is used to threshold the image, values + below the threshold (in absolute value) are plotted as transparent. + + alpha: float, alpha level of the mesh (not surf_data), default 'auto' + If 'auto' is chosen, alpha will default to .5 when no bg_map + is passed and to 1 if a bg_map is passed. + + bg_on_stat: bool, default is False + If True, and a bg_map is specified, the surf_data data is multiplied + by the background image, so that e.g. sulcal depth is visible beneath + the surf_data. + NOTE: that this non-uniformly changes the surf_data values according + to e.g the sulcal depth. + + darkness: float, between 0 and 1, default is 1 + Specifying the darkness of the background image. + 1 indicates that the original values of the background are used. + .5 indicates the background values are reduced by half before being + applied. + + vmin, vmax: lower / upper bound to plot surf_data values + If None , the values will be set to min/max of the data + + title : str, optional + Figure title. + + output_file: str, or None, optional + The name of an image file to export plot to. Valid extensions + are .png, .pdf, .svg. If output_file is not None, the plot + is saved to a file, and the display is closed. + + axes: instance of matplotlib axes, None, optional + The axes instance to plot to. The projection must be '3d' (e.g., + `figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`, + where axes should be passed.). + If None, a new axes is created. + + figure: instance of matplotlib figure, None, optional + The figure instance to plot to. If None, a new figure is created. + + See Also + -------- + nilearn.datasets.fetch_surf_fsaverage : For surface data object to be + used as background map for this plotting function. + + nilearn.plotting.plot_surf_roi : For plotting statistical maps on brain + surfaces. + + nilearn.plotting.plot_surf_stat_map for plotting statistical maps on + brain surfaces. + """ + + # load mesh and derive axes limits + mesh = load_surf_mesh(surf_mesh) + coords, faces = mesh[0], mesh[1] + limits = [coords.min(), coords.max()] + + # set view + if hemi == 'right': + if view == 'lateral': + elev, azim = 0, 0 + elif view == 'medial': + elev, azim = 0, 180 + elif view == 'dorsal': + elev, azim = 90, 0 + elif view == 'ventral': + elev, azim = 270, 0 + elif view == 'anterior': + elev, azim = 0, 90 + elif view == 'posterior': + elev, azim = 0, 270 + else: + raise ValueError('view must be one of lateral, medial, ' + 'dorsal, ventral, anterior, or posterior') + elif hemi == 'left': + if view == 'medial': + elev, azim = 0, 0 + elif view == 'lateral': + elev, azim = 0, 180 + elif view == 'dorsal': + elev, azim = 90, 0 + elif view == 'ventral': + elev, azim = 270, 0 + elif view == 'anterior': + elev, azim = 0, 90 + elif view == 'posterior': + elev, azim = 0, 270 + else: + raise ValueError('view must be one of lateral, medial, ' + 'dorsal, ventral, anterior, or posterior') + else: + raise ValueError('hemi must be one of right or left') + + # set alpha if in auto mode + if alpha == 'auto': + if bg_map is None: + alpha = .5 + else: + alpha = 1 + + # if no cmap is given, set to matplotlib default + if cmap is None: + cmap = plt.cm.get_cmap(plt.rcParamsDefault['image.cmap']) + else: + # if cmap is given as string, translate to matplotlib cmap + if isinstance(cmap, _basestring): + cmap = plt.cm.get_cmap(cmap) + + # initiate figure and 3d axes + if axes is None: + if figure is None: + figure = plt.figure() + axes = Axes3D(figure, rect=[0, 0, 1, 1], + xlim=limits, ylim=limits) + else: + if figure is None: + figure = axes.get_figure() + axes.set_xlim(*limits) + axes.set_ylim(*limits) + axes.set_aspect(.74) + axes.view_init(elev=elev, azim=azim) + axes.set_axis_off() + + # plot mesh without data + p3dcollec = axes.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], + triangles=faces, linewidth=0., + antialiased=False, + color='white') + + # reduce viewing distance to remove space around mesh + axes.dist = 8 + + # If depth_map and/or surf_map are provided, map these onto the surface + # set_facecolors function of Poly3DCollection is used as passing the + # facecolors argument to plot_trisurf does not seem to work + if bg_map is not None or surf_map is not None: + + face_colors = np.ones((faces.shape[0], 4)) + # face_colors[:, :3] = .5*face_colors[:, :3] # why this? + + if bg_map is not None: + bg_data = load_surf_data(bg_map) + if bg_data.shape[0] != coords.shape[0]: + raise ValueError('The bg_map does not have the same number ' + 'of vertices as the mesh.') + bg_faces = np.mean(bg_data[faces], axis=1) + bg_faces = bg_faces - bg_faces.min() + bg_faces = bg_faces / bg_faces.max() + # control background darkness + bg_faces *= darkness + face_colors = plt.cm.gray_r(bg_faces) + + # modify alpha values of background + face_colors[:, 3] = alpha * face_colors[:, 3] + # should it be possible to modify alpha of surf data as well? + + if surf_map is not None: + surf_map_data = load_surf_data(surf_map) + if len(surf_map_data.shape) is not 1: + raise ValueError('surf_map can only have one dimension but has' + '%i dimensions' % len(surf_map_data.shape)) + if surf_map_data.shape[0] != coords.shape[0]: + raise ValueError('The surf_map does not have the same number ' + 'of vertices as the mesh.') + + # create face values from vertex values by selected avg methods + if avg_method == 'mean': + surf_map_faces = np.mean(surf_map_data[faces], axis=1) + elif avg_method == 'median': + surf_map_faces = np.median(surf_map_data[faces], axis=1) + + # if no vmin/vmax are passed figure them out from data + if vmin is None: + vmin = np.nanmin(surf_map_faces) + if vmax is None: + vmax = np.nanmax(surf_map_faces) + + # treshold if inidcated + if threshold is None: + kept_indices = np.where(surf_map_faces)[0] + else: + kept_indices = np.where(np.abs(surf_map_faces) >= threshold)[0] + + surf_map_faces = surf_map_faces - vmin + surf_map_faces = surf_map_faces / (vmax - vmin) + + # multiply data with background if indicated + if bg_on_data: + face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])\ + * face_colors[kept_indices] + else: + face_colors[kept_indices] = cmap(surf_map_faces[kept_indices]) + + if colorbar: + our_cmap = get_cmap(cmap) + norm = Normalize(vmin=vmin, vmax=vmax) + + nb_ticks = 5 + ticks = np.linspace(vmin, vmax, nb_ticks) + bounds = np.linspace(vmin, vmax, our_cmap.N) + + if threshold is not None: + cmaplist = [our_cmap(i) for i in range(our_cmap.N)] + # set colors to grey for absolute values < threshold + istart = int(norm(-threshold, clip=True) * + (our_cmap.N - 1)) + istop = int(norm(threshold, clip=True) * + (our_cmap.N - 1)) + for i in range(istart, istop): + cmaplist[i] = (0.5, 0.5, 0.5, 1.) + our_cmap = LinearSegmentedColormap.from_list( + 'Custom cmap', cmaplist, our_cmap.N) + + # we need to create a proxy mappable + proxy_mappable = ScalarMappable(cmap=our_cmap, norm=norm) + proxy_mappable.set_array(surf_map_faces) + cax, kw = make_axes(axes, location='right', fraction=.1, + shrink=.6, pad=.0) + cbar = figure.colorbar( + proxy_mappable, cax=cax, ticks=ticks, + boundaries=bounds, spacing='proportional', + format='%.2g', orientation='vertical') + _crop_colorbar(cbar, cbar_vmin, cbar_vmax) + + p3dcollec.set_facecolors(face_colors) + + if title is not None: + axes.set_title(title, position=(.5, .95)) + + # save figure if output file is given + if output_file is not None: + figure.savefig(output_file) + plt.close(figure) + else: + return figure + + +def plot_surf_stat_map(surf_mesh, stat_map, bg_map=None, + hemi='left', view='lateral', threshold=None, + alpha='auto', vmax=None, cmap='cold_hot', + colorbar=True, symmetric_cbar="auto", bg_on_data=False, + darkness=1, title=None, output_file=None, axes=None, + figure=None, **kwargs): + """ Plotting a stats map on a surface mesh with optional background + + .. versionadded:: 0.3 + + Parameters + ---------- + surf_mesh : str or list of two numpy.ndarray + Surface mesh geometry, can be a file (valid formats are + .gii or Freesurfer specific files such as .orig, .pial, + .sphere, .white, .inflated) or + a list of two Numpy arrays, the first containing the x-y-z + coordinates of the mesh vertices, the second containing the + indices (into coords) of the mesh faces + + stat_map : str or numpy.ndarray + Statistical map to be displayed on the surface mesh, can + be a file (valid formats are .gii, .mgz, .nii, .nii.gz, or + Freesurfer specific files such as .thickness, .curv, .sulc, .annot, + .label) or + a Numpy array + + bg_map : Surface data object (to be defined), optional, + Background image to be plotted on the mesh underneath the + stat_map in greyscale, most likely a sulcal depth map for + realistic shading. + + hemi : {'left', 'right'}, default is 'left' + Hemispere to display. + + view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'}, default is 'lateral' + View of the surface that is rendered. + + threshold : a number, None, or 'auto', default is None + If None is given, the image is not thresholded. + If a number is given, it is used to threshold the image, + values below the threshold (in absolute value) are plotted + as transparent. + + cmap : matplotlib colormap in str or colormap object, default 'coolwarm' + To use for plotting of the stat_map. Either a string + which is a name of a matplotlib colormap, or a matplotlib + colormap object. + + colorbar : bool, optional, default is False + If True, a symmetric colorbar of the statistical map is displayed. + + alpha : float, alpha level of the mesh (not the stat_map), default 'auto' + If 'auto' is chosen, alpha will default to .5 when no bg_map is + passed and to 1 if a bg_map is passed. + + vmax : upper bound for plotting of stat_map values. + + symmetric_cbar : bool or 'auto', optional, default 'auto' + Specifies whether the colorbar should range from -vmax to vmax + or from vmin to vmax. Setting to 'auto' will select the latter + if the range of the whole image is either positive or negative. + Note: The colormap will always range from -vmax to vmax. + + bg_on_data : bool, default is False + If True, and a bg_map is specified, the stat_map data is multiplied + by the background image, so that e.g. sulcal depth is visible beneath + the stat_map. + NOTE: that this non-uniformly changes the stat_map values according + to e.g the sulcal depth. + + darkness: float, between 0 and 1, default 1 + Specifying the darkness of the background image. 1 indicates that the + original values of the background are used. .5 indicates the + background values are reduced by half before being applied. + + title : str, optional + Figure title. + + output_file: str, or None, optional + The name of an image file to export plot to. Valid extensions + are .png, .pdf, .svg. If output_file is not None, the plot + is saved to a file, and the display is closed. + + axes: instance of matplotlib axes, None, optional + The axes instance to plot to. The projection must be '3d' (e.g., + `figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`, + where axes should be passed.). + If None, a new axes is created. + + figure: instance of matplotlib figure, None, optional + The figure instance to plot to. If None, a new figure is created. + + See Also + -------- + nilearn.datasets.fetch_surf_fsaverage : For surface data object to be + used as background map for this plotting function. + + nilearn.plotting.plot_surf : For brain surface visualization. + """ + + # Call _get_colorbar_and_data_ranges to derive symmetric vmin, vmax + # And colorbar limits depending on symmetric_cbar settings + cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( + stat_map, vmax, symmetric_cbar, kwargs) + + display = plot_surf( + surf_mesh, surf_map=stat_map, bg_map=bg_map, hemi=hemi, view=view, + avg_method='mean', threshold=threshold, cmap=cmap, colorbar=colorbar, + alpha=alpha, bg_on_data=bg_on_data, darkness=1, vmax=vmax, vmin=vmin, + title=title, output_file=output_file, axes=axes, figure=figure, + cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax, **kwargs) + + return display + + +def plot_surf_roi(surf_mesh, roi_map, bg_map=None, + hemi='left', view='lateral', alpha='auto', + vmin=None, vmax=None, cmap='gist_ncar', + bg_on_data=False, darkness=1, title=None, + output_file=None, axes=None, figure=None, **kwargs): + """ Plotting ROI on a surface mesh with optional background + + .. versionadded:: 0.3 + + Parameters + ---------- + surf_mesh : str or list of two numpy.ndarray + Surface mesh geometry, can be a file (valid formats are + .gii or Freesurfer specific files such as .orig, .pial, + .sphere, .white, .inflated) or + a list of two Numpy arrays, the first containing the x-y-z + coordinates of the mesh vertices, the second containing the indices + (into coords) of the mesh faces + + roi_map : str or numpy.ndarray or list of numpy.ndarray + ROI map to be displayed on the surface mesh, can be a file + (valid formats are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific + files such as .annot or .label), or + a Numpy array containing a value for each vertex, or + a list of Numpy arrays, one array per ROI which contains indices + of all vertices included in that ROI. + + hemi : {'left', 'right'}, default is 'left' + Hemisphere to display. + + bg_map : Surface data object (to be defined), optional, + Background image to be plotted on the mesh underneath the + stat_map in greyscale, most likely a sulcal depth map for + realistic shading. + + view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'}, default is 'lateral' + View of the surface that is rendered. + + cmap : matplotlib colormap str or colormap object, default 'coolwarm' + To use for plotting of the rois. Either a string which is a name + of a matplotlib colormap, or a matplotlib colormap object. + + alpha : float, default is 'auto' + Alpha level of the mesh (not the stat_map). If default, + alpha will default to .5 when no bg_map is passed + and to 1 if a bg_map is passed. + + bg_on_data : bool, default is False + If True, and a bg_map is specified, the stat_map data is multiplied + by the background image, so that e.g. sulcal depth is visible beneath + the stat_map. Beware that this non-uniformly changes the stat_map + values according to e.g the sulcal depth. + + darkness : float, between 0 and 1, default is 1 + Specifying the darkness of the background image. 1 indicates that the + original values of the background are used. .5 indicates the background + values are reduced by half before being applied. + + title : str, optional + Figure title. + + output_file: str, or None, optional + The name of an image file to export plot to. Valid extensions + are .png, .pdf, .svg. If output_file is not None, the plot + is saved to a file, and the display is closed. + + axes: Axes instance | None + The axes instance to plot to. The projection must be '3d' (e.g., + `plt.subplots(subplot_kw={'projection': '3d'})`). + If None, a new axes is created. + + figure: Figure instance | None + The figure to plot to. If None, a new figure is created. + + See Also + -------- + nilearn.datasets.fetch_surf_fsaverage: For surface data object to be + used as background map for this plotting function. + + nilearn.plotting.plot_surf: For brain surface visualization. + """ + + v, _ = load_surf_mesh(surf_mesh) + + # if roi_map is a list of arrays with indices for different rois + if isinstance(roi_map, list): + roi_list = roi_map[:] + roi_map = np.zeros(v.shape[0]) + idx = 1 + for arr in roi_list: + roi_map[arr] = idx + idx += 1 + + elif isinstance(roi_map, np.ndarray): + # if roi_map is an array with values for all surface nodes + roi_data = load_surf_data(roi_map) + # or a single array with indices for a single roi + if roi_data.shape[0] != v.shape[0]: + roi_map = np.zeros(v.shape[0], dtype=int) + roi_map[roi_data] = 1 + + else: + raise ValueError('Invalid input for roi_map. Input can be a file ' + '(valid formats are .gii, .mgz, .nii, ' + '.nii.gz, or Freesurfer specific files such as ' + '.annot or .label), or a Numpy array containing a ' + 'value for each vertex, or a list of Numpy arrays, ' + 'one array per ROI which contains indices of all ' + 'vertices included in that ROI') + vmin, vmax = np.min(roi_map), 1 + np.max(roi_map) + display = plot_surf(surf_mesh, surf_map=roi_map, bg_map=bg_map, + hemi=hemi, view=view, avg_method='median', + cmap=cmap, alpha=alpha, bg_on_data=bg_on_data, + darkness=darkness, vmin=vmin, vmax=vmax, + title=title, output_file=output_file, + axes=axes, figure=figure, **kwargs) + + return display diff --git a/nilearn/plotting/tests/test_cm.py b/nilearn/plotting/tests/test_cm.py index 92554bbd06..36e44058eb 100644 --- a/nilearn/plotting/tests/test_cm.py +++ b/nilearn/plotting/tests/test_cm.py @@ -19,3 +19,7 @@ def test_replace_inside(): if hasattr(plt.cm, 'gnuplot'): # gnuplot is only in recent version of MPL replace_inside(plt.cm.gnuplot, plt.cm.gnuplot2, .2, .8) + + +def test_cm_preload(): + plt.imshow([list(range(10))], cmap="cold_hot") diff --git a/nilearn/plotting/tests/test_displays.py b/nilearn/plotting/tests/test_displays.py index e19fbf0c77..7d376b2da4 100644 --- a/nilearn/plotting/tests/test_displays.py +++ b/nilearn/plotting/tests/test_displays.py @@ -2,17 +2,18 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: import tempfile -import numpy as np - import matplotlib.pyplot as plt +import nibabel +import numpy as np from nilearn.plotting.displays import OrthoSlicer, XSlicer, OrthoProjector +from nilearn.plotting.displays import LZRYProjector from nilearn.datasets import load_mni152_template - ############################################################################## # Some smoke testing for graphics-related code + def test_demo_ortho_slicer(): # This is only a smoke test oslicer = OrthoSlicer(cut_coords=(0, 0, 0)) @@ -40,3 +41,74 @@ def test_demo_ortho_projector(): with tempfile.TemporaryFile() as fp: oprojector.savefig(fp) oprojector.close() + + +def test_contour_fillings_levels_in_add_contours(): + oslicer = OrthoSlicer(cut_coords=(0, 0, 0)) + img = load_mni152_template() + # levels should be atleast 2 + # If single levels are passed then we force upper level to be inf + oslicer.add_contours(img, filled=True, colors='r', + alpha=0.2, levels=[0.]) + + # If two levels are passed, it should be increasing from zero index + # In this case, we simply omit appending inf + oslicer.add_contours(img, filled=True, colors='b', + alpha=0.1, levels=[0., 0.2]) + + # without passing colors and alpha. In this case, default values are + # chosen from matplotlib + oslicer.add_contours(img, filled=True, levels=[0., 0.2]) + + # levels with only one value + oslicer.add_contours(img, filled=True, levels=[0.]) + + # without passing levels, should work with default levels from + # matplotlib + oslicer.add_contours(img, filled=True) + + +def test_user_given_cmap_with_colorbar(): + img = load_mni152_template() + oslicer = OrthoSlicer(cut_coords=(0, 0, 0)) + + # Test with cmap given as a string + oslicer.add_overlay(img, cmap='Paired', colorbar=True) + oslicer.close() + + +def test_data_complete_mask(): + """This special case test is due to matplotlib 2.1.0. + + When the data is completely masked, then we have plotting issues + See similar issue #9280 reported in matplotlib. This function + tests the patch added for this particular issue. + """ + # data is completely masked + data = np.zeros((10, 20, 30)) + affine = np.eye(4) + + img = nibabel.Nifti1Image(data, affine) + oslicer = OrthoSlicer(cut_coords=(0, 0, 0)) + oslicer.add_overlay(img) + oslicer.close() + + +def test_add_markers_cut_coords_is_none(): + # A special case test for add_markers when cut_coords are None. This + # case is used when coords are placed on glass brain + orthoslicer = OrthoSlicer(cut_coords=(None, None, None)) + orthoslicer.add_markers([(0, 0, 2)]) + orthoslicer.close() + + +def test_add_graph_with_node_color_as_string(): + lzry_projector = LZRYProjector(cut_coords=(0, 0, 0, 0)) + matrix = np.array([[0, 3], [3, 0]]) + node_coords = [[-53.60, -62.80, 36.64], [23.87, 0.31, 69.42]] + # node_color as string + lzry_projector.add_graph(matrix, node_coords, node_color='red') + lzry_projector.close() + # node_color as sequence of string + lzry_projector.add_graph(matrix, node_coords, node_color=['red', 'blue']) + lzry_projector.close() diff --git a/nilearn/plotting/tests/test_edge_detect.py b/nilearn/plotting/tests/test_edge_detect.py index 9da180678f..ad9f00e476 100644 --- a/nilearn/plotting/tests/test_edge_detect.py +++ b/nilearn/plotting/tests/test_edge_detect.py @@ -1,10 +1,19 @@ import numpy as np from nilearn.plotting.edge_detect import _edge_detect - +from nose.tools import assert_true def test_edge_detect(): img = np.zeros((10, 10)) img[:5] = 1 _, edge_mask = _edge_detect(img) np.testing.assert_almost_equal(img[4], 1) + + +def test_edge_nan(): + img = np.zeros((10, 10)) + img[:5] = 1 + img[0] = np.NaN + grad_mag, edge_mask = _edge_detect(img) + np.testing.assert_almost_equal(img[4], 1) + assert_true((grad_mag[0] > 2).all()) diff --git a/nilearn/plotting/tests/test_find_cuts.py b/nilearn/plotting/tests/test_find_cuts.py index a587fef832..394a4f7d93 100644 --- a/nilearn/plotting/tests/test_find_cuts.py +++ b/nilearn/plotting/tests/test_find_cuts.py @@ -1,10 +1,12 @@ import numpy as np -from nose.tools import assert_equal, assert_true +from nose.tools import assert_equal, assert_true, assert_not_equal import nibabel from nilearn.plotting.find_cuts import (find_xyz_cut_coords, find_cut_slices, - _transform_cut_coords) + _transform_cut_coords, + find_parcellation_cut_coords, + find_probabilistic_atlas_cut_coords) from nilearn._utils.testing import assert_raises_regex, assert_warns -from nilearn.plotting.find_cuts import find_xyz_cut_coords +from nilearn.masking import compute_epi_mask def test_find_cut_coords(): @@ -15,7 +17,10 @@ def test_find_cut_coords(): # identity affine affine = np.eye(4) img = nibabel.Nifti1Image(data, affine) - x, y, z = find_xyz_cut_coords(img, mask=np.ones(data.shape, np.bool)) + mask_img = compute_epi_mask(img) + x, y, z = find_xyz_cut_coords(img, + mask_img=mask_img) + np.testing.assert_allclose((x, y, z), (x_map, y_map, z_map), # Need such a high tolerance for the test to @@ -25,7 +30,8 @@ def test_find_cut_coords(): # non-trivial affine affine = np.diag([1. / 2, 1 / 3., 1 / 4., 1.]) img = nibabel.Nifti1Image(data, affine) - x, y, z = find_xyz_cut_coords(img, mask=np.ones(data.shape, np.bool)) + mask_img = compute_epi_mask(img) + x, y, z = find_xyz_cut_coords(img, mask_img=mask_img) np.testing.assert_allclose((x, y, z), (x_map / 2., y_map / 3., z_map / 4.), # Need such a high tolerance for the test to @@ -42,6 +48,25 @@ def test_find_cut_coords(): np.array([x, y, z]), 0.5 * np.array(data.shape).astype(np.float)) + # regression test (cf. #922) + # pseudo-4D images as input (i.e., X, Y, Z, 1) + # previously raised "ValueError: too many values to unpack" + rng = np.random.RandomState(42) + data_3d = rng.randn(10, 10, 10) + data_4d = data_3d[..., np.newaxis] + affine = np.eye(4) + img_3d = nibabel.Nifti1Image(data_3d, affine) + img_4d = nibabel.Nifti1Image(data_4d, affine) + assert_equal(find_xyz_cut_coords(img_3d), find_xyz_cut_coords(img_4d)) + + # test passing empty image returns coordinates pointing to AC-PC line + data = np.zeros((20, 30, 40)) + affine = np.eye(4) + img = nibabel.Nifti1Image(data, affine) + cut_coords = find_xyz_cut_coords(img) + assert_equal(cut_coords, [0.0, 0.0, 0.0]) + cut_coords = assert_warns(UserWarning, find_xyz_cut_coords, img) + def test_find_cut_slices(): data = np.zeros((50, 50, 50)) @@ -61,9 +86,9 @@ def test_find_cut_slices(): # of the data for cut in cuts: if direction == 'x': - cut_value = data[cut] + cut_value = data[int(cut)] elif direction == 'z': - cut_value = data[..., cut] + cut_value = data[..., int(cut)] assert_equal(cut_value.max(), 1) # Now ask more cuts than it is possible to have with a given spacing @@ -73,6 +98,30 @@ def test_find_cut_slices(): cuts = find_cut_slices(img, direction=direction, n_cuts=n_cuts, spacing=2) + # non-diagonal affines + affine = np.array([[-1., 0., 0., 123.46980286], + [0., 0., 1., -94.11079407], + [0., -1., 0., 160.694], + [0., 0., 0., 1.]]) + img = nibabel.Nifti1Image(data, affine) + cuts = find_cut_slices(img, direction='z') + assert_not_equal(np.diff(cuts).min(), 0.) + affine = np.array([[-2., 0., 0., 123.46980286], + [0., 0., 2., -94.11079407], + [0., -2., 0., 160.694], + [0., 0., 0., 1.]]) + img = nibabel.Nifti1Image(data, affine) + cuts = find_cut_slices(img, direction='z') + assert_not_equal(np.diff(cuts).min(), 0.) + # Rotate it slightly + angle = np.pi / 180 * 15 + rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], + [np.sin(angle), np.cos(angle)]]) + affine[:2, :2] = rotation_matrix * 2.0 + img = nibabel.Nifti1Image(data, affine) + cuts = find_cut_slices(img, direction='z') + assert_not_equal(np.diff(cuts).min(), 0.) + def test_validity_of_ncuts_error_in_find_cut_slices(): data = np.zeros((50, 50, 50)) @@ -130,9 +179,9 @@ def test_tranform_cut_coords(): def test_find_cuts_empty_mask_no_crash(): img = nibabel.Nifti1Image(np.ones((2, 2, 2)), np.eye(4)) - mask = np.zeros((2, 2, 2)).astype(np.bool) + mask_img = compute_epi_mask(img) cut_coords = assert_warns(UserWarning, find_xyz_cut_coords, img, - mask=mask) + mask_img=mask_img) np.testing.assert_array_equal(cut_coords, [.5, .5, .5]) @@ -141,3 +190,108 @@ def test_fast_abs_percentile_no_index_error_find_cuts(): data = np.array([[[1., 2.], [3., 4.]], [[0., 0.], [0., 0.]]]) img = nibabel.Nifti1Image(data, np.eye(4)) assert_equal(len(find_xyz_cut_coords(img)), 3) + + +def test_find_parcellation_cut_coords(): + data = np.zeros((100, 100, 100)) + x_map_a, y_map_a, z_map_a = (10, 10, 10) + x_map_b, y_map_b, z_map_b = (30, 30, 30) + x_map_c, y_map_c, z_map_c = (50, 50, 50) + # Defining 3 parcellations + data[x_map_a - 10:x_map_a + 10, y_map_a - 10:y_map_a + 10, z_map_a - 10: z_map_a + 10] = 1 + data[x_map_b - 10:x_map_b + 10, y_map_b - 10:y_map_b + 10, z_map_b - 10: z_map_b + 10] = 2 + data[x_map_c - 10:x_map_c + 10, y_map_c - 10:y_map_c + 10, z_map_c - 10: z_map_c + 10] = 3 + + # Number of labels + labels = np.unique(data) + labels = labels[labels != 0] + n_labels = len(labels) + + # identity affine + affine = np.eye(4) + img = nibabel.Nifti1Image(data, affine) + # find coordinates with return label names is True + coords, labels_list = find_parcellation_cut_coords(img, + return_label_names=True) + # Check outputs + assert_equal((n_labels, 3), coords.shape) + # number of labels in data should equal number of labels list returned + assert_equal(n_labels, len(labels_list)) + # Labels numbered should match the numbers in returned labels list + assert_equal(list(labels), labels_list) + + # Match with the number of non-overlapping labels + np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]), + (x_map_a, y_map_a, z_map_a), rtol=6e-2) + np.testing.assert_allclose((coords[1][0], coords[1][1], coords[1][2]), + (x_map_b, y_map_b, z_map_b), rtol=6e-2) + np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]), + (x_map_c, y_map_c, z_map_c), rtol=6e-2) + + # non-trivial affine + affine = np.diag([1 / 2., 1 / 3., 1 / 4., 1.]) + img = nibabel.Nifti1Image(data, affine) + coords = find_parcellation_cut_coords(img) + assert_equal((n_labels, 3), coords.shape) + np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]), + (x_map_a / 2., y_map_a / 3., z_map_a / 4.), + rtol=6e-2) + np.testing.assert_allclose((coords[1][0], coords[1][1], coords[1][2]), + (x_map_b / 2., y_map_b / 3., z_map_b / 4.), + rtol=6e-2) + np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]), + (x_map_c / 2., y_map_c / 3., z_map_c / 4.), + rtol=6e-2) + # test raises an error with wrong label_hemisphere name with 'lft' + error_msg = ("Invalid label_hemisphere name:lft. Should be one of " + "these 'left' or 'right'.") + assert_raises_regex(ValueError, error_msg, find_parcellation_cut_coords, + labels_img=img, label_hemisphere='lft') + + +def test_find_probabilistic_atlas_cut_coords(): + # make data + arr1 = np.zeros((100, 100, 100)) + x_map_a, y_map_a, z_map_a = 30, 40, 50 + arr1[x_map_a - 10:x_map_a + 10, y_map_a - 20:y_map_a + 20, z_map_a - 30: z_map_a + 30] = 1 + + arr2 = np.zeros((100, 100, 100)) + x_map_b, y_map_b, z_map_b = 40, 50, 60 + arr2[x_map_b - 10:x_map_b + 10, y_map_b - 20:y_map_b + 20, z_map_b - 30: z_map_b + 30] = 1 + + # make data with empty in between non-empty maps to make sure that + # code does not crash + arr3 = np.zeros((100, 100, 100)) + + data = np.concatenate((arr1[..., np.newaxis], arr3[..., np.newaxis], + arr2[..., np.newaxis]), axis=3) + + # Number of maps in time dimension + n_maps = data.shape[-1] + + # run test on img with identity affine + affine = np.eye(4) + img = nibabel.Nifti1Image(data, affine) + coords = find_probabilistic_atlas_cut_coords(img) + + # Check outputs + assert_equal((n_maps, 3), coords.shape) + + np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]), + (x_map_a, y_map_a, z_map_a), rtol=6e-2) + np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]), + (x_map_b - 0.5, y_map_b - 0.5, z_map_b - 0.5), + rtol=6e-2) + + # non-trivial affine + affine = np.diag([1 / 2., 1 / 3., 1 / 4., 1.]) + img = nibabel.Nifti1Image(data, affine) + coords = find_probabilistic_atlas_cut_coords(img) + # Check outputs + assert_equal((n_maps, 3), coords.shape) + np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]), + (x_map_a / 2., y_map_a / 3., z_map_a / 4.), + rtol=6e-2) + np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]), + (x_map_b / 2., y_map_b / 3., z_map_b / 4.), + rtol=6e-2) diff --git a/nilearn/plotting/tests/test_html_connectome.py b/nilearn/plotting/tests/test_html_connectome.py new file mode 100644 index 0000000000..43c47a1d16 --- /dev/null +++ b/nilearn/plotting/tests/test_html_connectome.py @@ -0,0 +1,79 @@ +import numpy as np + +from nilearn.plotting.js_plotting_utils import decode +from nilearn.plotting import html_connectome + +from .test_js_plotting_utils import check_html + + +def test_prepare_line(): + e = np.asarray([0, 1, 2, 3], dtype=int) + n = np.asarray([[0, 1], [0, 2], [2, 3], [8, 9]], dtype=int) + pe, pn = html_connectome._prepare_line(e, n) + assert (pn == [0, 1, 0, 0, 2, 0, 2, 3, 0, 8, 9, 0]).all() + assert(pe == [0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 3, 0]).all() + + +def _make_connectome(): + adj = np.diag([1.5, .3, 2.5], 2) + adj += adj.T + adj += np.eye(5) + + coord = np.arange(5) + coord = np.asarray([coord * 10, -coord, coord[::-1]]).T + return adj, coord + + +def test_get_connectome(): + adj, coord = _make_connectome() + connectome = html_connectome._get_connectome(adj, coord) + con_x = decode(connectome['_con_x'], ' (-1, 0) assert_true(plotted_array.mask[-1, 0]) + # Save execution time and memory + plt.close() + def test_plot_glass_brain_threshold_for_uint8(): # mask was applied in [-threshold, threshold] which is problematic @@ -213,6 +239,9 @@ def test_plot_glass_brain_threshold_for_uint8(): # axis orientation seem to be flipped, hence (0, 0) -> (-1, 0) assert_true(plotted_array.mask[-1, 0]) + # Save execution time and memory + plt.close() + def test_save_plot(): img = _generate_img() @@ -234,6 +263,9 @@ def test_save_plot(): finally: os.remove(filename) + # Save execution time and memory + plt.close() + def test_display_methods(): img = _generate_img() @@ -253,6 +285,9 @@ def test_plot_with_axes_or_figure(): ax = plt.subplot(111) plot_img(img, axes=ax) + # Save execution time and memory + plt.close() + def test_plot_stat_map_colorbar_variations(): # This is only a smoke test @@ -283,6 +318,9 @@ def test_plot_empty_slice(): img = nibabel.Nifti1Image(data, mni_affine) plot_img(img, display_mode='y', threshold=1) + # Save execution time and memory + plt.close() + def test_plot_img_invalid(): # Check that we get a meaningful error message when we give a wrong @@ -299,6 +337,9 @@ def test_plot_img_with_auto_cut_coords(): plot_img(img, cut_coords=None, display_mode=display_mode, black_bg=True) + # Save execution time and memory + plt.close() + def test_plot_img_with_resampling(): data = _generate_img().get_data() @@ -313,6 +354,9 @@ def test_plot_img_with_resampling(): colors=['limegreen', 'yellow']) display.add_edges(img, color='c') + # Save execution time and memory + plt.close() + def test_plot_noncurrent_axes(): """Regression test for Issue #450""" @@ -331,6 +375,9 @@ def test_plot_noncurrent_axes(): ax_fh = niax.ax.get_figure() assert_equal(ax_fh, fh1, 'New axis %s should be in fh1.' % ax_name) + # Save execution time and memory + plt.close() + def test_plot_connectome(): node_color = ['green', 'blue', 'k', 'cyan'] @@ -346,6 +393,7 @@ def test_plot_connectome(): title='threshold=0.38', node_size=10, node_color=node_color) plot_connectome(*args, **kwargs) + plt.close() # used to speed-up tests for the next plots kwargs['display_mode'] = 'x' @@ -363,6 +411,7 @@ def test_plot_connectome(): os.path.getsize(filename) > 0) finally: os.remove(filename) + plt.close() # with node_kwargs, edge_kwargs and edge_cmap arguments plot_connectome(*args, @@ -370,21 +419,25 @@ def test_plot_connectome(): node_size=[10, 20, 30, 40], node_color=np.zeros((4, 3)), edge_cmap='RdBu', + colorbar=True, node_kwargs={ 'marker': 'v'}, edge_kwargs={ 'linewidth': 4}) + plt.close() # masked array support masked_adjacency_matrix = np.ma.masked_array( adjacency_matrix, np.abs(adjacency_matrix) < 0.5) plot_connectome(masked_adjacency_matrix, node_coords, **kwargs) + plt.close() # sparse matrix support sparse_adjacency_matrix = sparse.coo_matrix(adjacency_matrix) plot_connectome(sparse_adjacency_matrix, node_coords, **kwargs) + plt.close() # NaN matrix support nan_adjacency_matrix = np.array([[1., np.nan, 0.], @@ -392,10 +445,26 @@ def test_plot_connectome(): [np.nan, 2., 1.]]) nan_node_coords = np.arange(3 * 3).reshape(3, 3) plot_connectome(nan_adjacency_matrix, nan_node_coords, **kwargs) + plt.close() # smoke-test where there is no edge to draw, e.g. when # edge_threshold is too high plot_connectome(*args, edge_threshold=1e12) + plt.close() + + # with colorbar=True + plot_connectome(*args, colorbar=True) + plt.close() + + # smoke-test with hemispheric saggital cuts + plot_connectome(*args, display_mode='lzry') + plt.close() + + # test node_color as a string with display_mode='lzry' + plot_connectome(*args, node_color='red', display_mode='lzry') + plt.close() + plot_connectome(*args, node_color=['red'], display_mode='lzry') + plt.close() def test_plot_connectome_exceptions(): @@ -482,6 +551,7 @@ def test_singleton_ax_dim(): shape[axis] = 1 img = nibabel.Nifti1Image(np.ones(shape), np.eye(4)) plot_stat_map(img, None, display_mode=direction) + plt.close() def test_plot_prob_atlas(): @@ -492,34 +562,39 @@ def test_plot_prob_atlas(): img = nibabel.Nifti1Image(data_rng, affine) # Testing the 4D plot prob atlas with contours plot_prob_atlas(img, view_type='contours') + plt.close() # Testing the 4D plot prob atlas with contours plot_prob_atlas(img, view_type='filled_contours', threshold=0.2) + plt.close() # Testing the 4D plot prob atlas with contours plot_prob_atlas(img, view_type='continuous') + plt.close() + # Testing the 4D plot prob atlas with colormap + plot_prob_atlas(img, view_type='filled_contours', colorbar=True) + plt.close() + # threshold=None + plot_prob_atlas(img, threshold=None) + plt.close() def test_get_colorbar_and_data_ranges_with_vmin(): - affine = np.eye(4) data = np.array([[-.5, 1., np.nan], [0., np.nan, -.2], [1.5, 2.5, 3.]]) - img = nibabel.Nifti1Image(data, affine) assert_raises_regex(ValueError, 'does not accept a "vmin" argument', _get_colorbar_and_data_ranges, - img, vmax=None, + data, vmax=None, symmetric_cbar=True, kwargs={'vmin': 1.}) def test_get_colorbar_and_data_ranges_pos_neg(): # data with positive and negative range - affine = np.eye(4) data = np.array([[-.5, 1., np.nan], [0., np.nan, -.2], [1.5, 2.5, 3.]]) - img = nibabel.Nifti1Image(data, affine) # Reasonable additional arguments that would end up being passed # to imshow in a real plotting use case @@ -527,7 +602,7 @@ def test_get_colorbar_and_data_ranges_pos_neg(): # symmetric_cbar set to True cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=None, + data, vmax=None, symmetric_cbar=True, kwargs=kwargs) assert_equal(vmin, -np.nanmax(data)) @@ -536,7 +611,7 @@ def test_get_colorbar_and_data_ranges_pos_neg(): assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=2, + data, vmax=2, symmetric_cbar=True, kwargs=kwargs) assert_equal(vmin, -2) @@ -546,7 +621,7 @@ def test_get_colorbar_and_data_ranges_pos_neg(): # symmetric_cbar is set to False cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=None, + data, vmax=None, symmetric_cbar=False, kwargs=kwargs) assert_equal(vmin, -np.nanmax(data)) @@ -555,7 +630,7 @@ def test_get_colorbar_and_data_ranges_pos_neg(): assert_equal(cbar_vmax, np.nanmax(data)) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=2, + data, vmax=2, symmetric_cbar=False, kwargs=kwargs) assert_equal(vmin, -2) @@ -565,7 +640,7 @@ def test_get_colorbar_and_data_ranges_pos_neg(): # symmetric_cbar is set to 'auto', same behaviours as True for this case cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=None, + data, vmax=None, symmetric_cbar='auto', kwargs=kwargs) assert_equal(vmin, -np.nanmax(data)) @@ -574,7 +649,7 @@ def test_get_colorbar_and_data_ranges_pos_neg(): assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=2, + data, vmax=2, symmetric_cbar='auto', kwargs=kwargs) assert_equal(vmin, -2) @@ -585,15 +660,13 @@ def test_get_colorbar_and_data_ranges_pos_neg(): def test_get_colorbar_and_data_ranges_pos(): # data with positive range - affine = np.eye(4) data_pos = np.array([[0, 1., np.nan], [0., np.nan, 0], [1.5, 2.5, 3.]]) - img_pos = nibabel.Nifti1Image(data_pos, affine) # symmetric_cbar set to True cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_pos, vmax=None, + data_pos, vmax=None, symmetric_cbar=True, kwargs={}) assert_equal(vmin, -np.nanmax(data_pos)) @@ -602,7 +675,7 @@ def test_get_colorbar_and_data_ranges_pos(): assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_pos, vmax=2, + data_pos, vmax=2, symmetric_cbar=True, kwargs={}) assert_equal(vmin, -2) @@ -612,7 +685,7 @@ def test_get_colorbar_and_data_ranges_pos(): # symmetric_cbar is set to False cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_pos, vmax=None, + data_pos, vmax=None, symmetric_cbar=False, kwargs={}) assert_equal(vmin, -np.nanmax(data_pos)) @@ -621,7 +694,7 @@ def test_get_colorbar_and_data_ranges_pos(): assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_pos, vmax=2, + data_pos, vmax=2, symmetric_cbar=False, kwargs={}) assert_equal(vmin, -2) @@ -631,7 +704,7 @@ def test_get_colorbar_and_data_ranges_pos(): # symmetric_cbar is set to 'auto', same behaviour as false in this case cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_pos, vmax=None, + data_pos, vmax=None, symmetric_cbar='auto', kwargs={}) assert_equal(vmin, -np.nanmax(data_pos)) @@ -640,7 +713,7 @@ def test_get_colorbar_and_data_ranges_pos(): assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_pos, vmax=2, + data_pos, vmax=2, symmetric_cbar='auto', kwargs={}) assert_equal(vmin, -2) @@ -651,15 +724,13 @@ def test_get_colorbar_and_data_ranges_pos(): def test_get_colorbar_and_data_ranges_neg(): # data with negative range - affine = np.eye(4) data_neg = np.array([[-.5, 0, np.nan], [0., np.nan, -.2], [0, 0, 0]]) - img_neg = nibabel.Nifti1Image(data_neg, affine) # symmetric_cbar set to True cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_neg, vmax=None, + data_neg, vmax=None, symmetric_cbar=True, kwargs={}) assert_equal(vmin, np.nanmin(data_neg)) @@ -668,7 +739,7 @@ def test_get_colorbar_and_data_ranges_neg(): assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_neg, vmax=2, + data_neg, vmax=2, symmetric_cbar=True, kwargs={}) assert_equal(vmin, -2) @@ -678,7 +749,7 @@ def test_get_colorbar_and_data_ranges_neg(): # symmetric_cbar is set to False cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_neg, vmax=None, + data_neg, vmax=None, symmetric_cbar=False, kwargs={}) assert_equal(vmin, np.nanmin(data_neg)) @@ -687,7 +758,7 @@ def test_get_colorbar_and_data_ranges_neg(): assert_equal(cbar_vmax, 0) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_neg, vmax=2, + data_neg, vmax=2, symmetric_cbar=False, kwargs={}) assert_equal(vmin, -2) @@ -697,7 +768,7 @@ def test_get_colorbar_and_data_ranges_neg(): # symmetric_cbar is set to 'auto', same behaviour as False in this case cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_neg, vmax=None, + data_neg, vmax=None, symmetric_cbar='auto', kwargs={}) assert_equal(vmin, np.nanmin(data_neg)) @@ -706,7 +777,7 @@ def test_get_colorbar_and_data_ranges_neg(): assert_equal(cbar_vmax, 0) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img_neg, vmax=2, + data_neg, vmax=2, symmetric_cbar='auto', kwargs={}) assert_equal(vmin, -2) @@ -717,7 +788,6 @@ def test_get_colorbar_and_data_ranges_neg(): def test_get_colorbar_and_data_ranges_masked_array(): # data with positive and negative range - affine = np.eye(4) data = np.array([[-.5, 1., np.nan], [0., np.nan, -.2], [1.5, 2.5, 3.]]) @@ -725,15 +795,13 @@ def test_get_colorbar_and_data_ranges_masked_array(): # Easier to fill masked values with NaN to test against later on filled_data = masked_data.filled(np.nan) - img = nibabel.Nifti1Image(masked_data, affine) - # Reasonable additional arguments that would end up being passed # to imshow in a real plotting use case kwargs = {'aspect': 'auto', 'alpha': 0.9} # symmetric_cbar set to True cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=None, + masked_data, vmax=None, symmetric_cbar=True, kwargs=kwargs) assert_equal(vmin, -np.nanmax(filled_data)) @@ -742,7 +810,7 @@ def test_get_colorbar_and_data_ranges_masked_array(): assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=2, + masked_data, vmax=2, symmetric_cbar=True, kwargs=kwargs) assert_equal(vmin, -2) @@ -752,7 +820,7 @@ def test_get_colorbar_and_data_ranges_masked_array(): # symmetric_cbar is set to False cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=None, + masked_data, vmax=None, symmetric_cbar=False, kwargs=kwargs) assert_equal(vmin, -np.nanmax(filled_data)) @@ -761,7 +829,7 @@ def test_get_colorbar_and_data_ranges_masked_array(): assert_equal(cbar_vmax, np.nanmax(filled_data)) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=2, + masked_data, vmax=2, symmetric_cbar=False, kwargs=kwargs) assert_equal(vmin, -2) @@ -771,7 +839,7 @@ def test_get_colorbar_and_data_ranges_masked_array(): # symmetric_cbar is set to 'auto', same behaviours as True for this case cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=None, + masked_data, vmax=None, symmetric_cbar='auto', kwargs=kwargs) assert_equal(vmin, -np.nanmax(filled_data)) @@ -780,10 +848,110 @@ def test_get_colorbar_and_data_ranges_masked_array(): assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( - img, vmax=2, + masked_data, vmax=2, symmetric_cbar='auto', kwargs=kwargs) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) + + +def test_invalid_in_display_mode_cut_coords_all_plots(): + img = _generate_img() + + for plot_func in [plot_img, plot_anat, plot_roi, plot_epi, + plot_stat_map, plot_prob_atlas, plot_glass_brain]: + assert_raises_regex(ValueError, + "The input given for display_mode='ortho' needs to " + "be a list of 3d world coordinates.", + plot_func, + img, display_mode='ortho', cut_coords=2) + + +def test_outlier_cut_coords(): + """ Test to plot a subset of a large set of cuts found for a small area.""" + bg_img = load_mni152_template() + + data = np.zeros((79, 95, 79)) + affine = np.array([[ -2., 0., 0., 78.], + [ 0., 2., 0., -112.], + [ 0., 0., 2., -70.], + [ 0., 0., 0., 1.]]) + + # Color a cube around a corner area: + x, y, z = 20, 22, 60 + x_map, y_map, z_map = coord_transform(x, y, z, + np.linalg.inv(affine)) + + data[int(x_map) - 1:int(x_map) + 1, + int(y_map) - 1:int(y_map) + 1, + int(z_map) - 1:int(z_map) + 1] = 1 + img = nibabel.Nifti1Image(data, affine) + cuts = find_cut_slices(img, n_cuts=20, direction='z') + + p = plot_stat_map(img, display_mode='z', cut_coords=cuts[-4:], + bg_img=bg_img) + + +def test_plot_stat_map_with_nans(): + img = _generate_img() + data = img.get_data() + + data[6, 5, 1] = np.nan + data[1, 5, 2] = np.nan + data[1, 3, 2] = np.nan + data[6, 5, 2] = np.inf + + img = nibabel.Nifti1Image(data, mni_affine) + plot_epi(img) + plot_stat_map(img) + plot_glass_brain(img) + + +def test_plotting_functions_with_cmaps(): + img = load_mni152_template() + cmaps = ['Paired', 'Set1', 'Set2', 'Set3'] + for cmap in cmaps: + plot_roi(img, cmap=cmap, colorbar=True) + plot_stat_map(img, cmap=cmap, colorbar=True) + plot_glass_brain(img, cmap=cmap, colorbar=True) + + if LooseVersion(matplotlib.__version__) >= LooseVersion('2.0.0'): + plot_stat_map(img, cmap='viridis', colorbar=True) + + plt.close() + + +def test_plotting_functions_with_nans_in_bg_img(): + bg_img = _generate_img() + bg_data = bg_img.get_data() + + bg_data[6, 5, 1] = np.nan + bg_data[1, 5, 2] = np.nan + bg_data[1, 3, 2] = np.nan + bg_data[6, 5, 2] = np.inf + + bg_img = nibabel.Nifti1Image(bg_data, mni_affine) + plot_anat(bg_img) + # test with plot_roi passing background image which contains nans values + # in it + roi_img = _generate_img() + plot_roi(roi_img=roi_img, bg_img=bg_img) + stat_map_img = _generate_img() + plot_stat_map(stat_map_img=stat_map_img, bg_img=bg_img) + + plt.close() + + +def test_plotting_functions_with_dim_invalid_input(): + # Test whether error raises with bad error to input + img = _generate_img() + assert_raises(ValueError, plot_stat_map, img, dim='-10') + + +def test_add_markers_using_plot_glass_brain(): + fig = plot_glass_brain(None) + coords = [(-34, -39, -9)] + fig.add_markers(coords) + fig.close() diff --git a/nilearn/plotting/tests/test_js_plotting_utils.py b/nilearn/plotting/tests/test_js_plotting_utils.py new file mode 100644 index 0000000000..41fed4a598 --- /dev/null +++ b/nilearn/plotting/tests/test_js_plotting_utils.py @@ -0,0 +1,331 @@ +import os +import re +import base64 +import webbrowser +import time +import tempfile + +import numpy as np +import matplotlib +from numpy.testing import assert_warns, assert_no_warnings +try: + from lxml import etree + LXML_INSTALLED = True +except ImportError: + LXML_INSTALLED = False + +from nilearn.plotting import js_plotting_utils +from nilearn import surface +from nilearn.datasets import fetch_surf_fsaverage + + +# Note: html output by nilearn view_* functions +# should validate as html5 using https://validator.w3.org/nu/ with no +# warnings + + +def _normalize_ws(text): + return re.sub(r'\s+', ' ', text) + + +def test_add_js_lib(): + html = js_plotting_utils.get_html_template('surface_plot_template.html') + cdn = js_plotting_utils.add_js_lib(html, embed_js=False) + assert "decodeBase64" in cdn + assert _normalize_ws(""" + + """) in _normalize_ws(cdn) + inline = _normalize_ws(js_plotting_utils.add_js_lib(html, embed_js=True)) + assert _normalize_ws("""/*! jQuery v3.3.1 | (c) JS Foundation and other + contributors | jquery.org/license */""") in inline + assert _normalize_ws("""** + * plotly.js (gl3d - minified) v1.38.3 + * Copyright 2012-2018, Plotly, Inc. + * All rights reserved. + * Licensed under the MIT license + */ """) in inline + assert "decodeBase64" in inline + + +def check_colors(colors): + assert len(colors) == 100 + val, cstring = zip(*colors) + assert np.allclose(np.linspace(0, 1, 100), val, atol=1e-3) + assert val[0] == 0 + assert val[-1] == 1 + for cs in cstring: + assert re.match(r'rgb\(\d+, \d+, \d+\)', cs) + return val, cstring + + +def test_colorscale_no_threshold(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = None + colors = js_plotting_utils.colorscale(cmap, values, threshold) + check_colors(colors['colors']) + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert colors['abs_threshold'] is None + + +def test_colorscale_threshold_0(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = '0%' + colors = js_plotting_utils.colorscale(cmap, values, threshold) + check_colors(colors['colors']) + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert colors['abs_threshold'] == 1.5 + assert colors['symmetric_cmap'] + + +def test_colorscale_threshold_99(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = '99%' + colors = js_plotting_utils.colorscale(cmap, values, threshold) + check_colors(colors['colors']) + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert colors['abs_threshold'] == 13 + assert colors['symmetric_cmap'] + + +def test_colorscale_threshold_50(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = '50%' + colors = js_plotting_utils.colorscale(cmap, values, threshold) + val, cstring = check_colors(colors['colors']) + assert cstring[50] == 'rgb(127, 127, 127)' + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert np.allclose(colors['abs_threshold'], 7.55, 2) + assert colors['symmetric_cmap'] + + +def test_colorscale_absolute_threshold(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = 7.25 + colors = js_plotting_utils.colorscale(cmap, values, threshold) + val, cstring = check_colors(colors['colors']) + assert cstring[50] == 'rgb(127, 127, 127)' + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert np.allclose(colors['abs_threshold'], 7.25) + assert colors['symmetric_cmap'] + + +def test_colorscale_asymmetric_cmap(): + cmap = 'jet' + values = np.arange(15) + colors = js_plotting_utils.colorscale(cmap, values, symmetric_cmap=False) + assert (colors['vmin'], colors['vmax']) == (0, 14) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (14, 0) + assert not colors['symmetric_cmap'] + + +def test_colorscale_vmax(): + cmap = 'jet' + values = np.arange(15) + colors = js_plotting_utils.colorscale(cmap, values, vmax=7) + assert (colors['vmin'], colors['vmax']) == (-7, 7) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (7, -7) + assert colors['symmetric_cmap'] + + +def test_colorscale_asymmetric_cmap_vmax(): + cmap = 'jet' + values = np.arange(15) + colors = js_plotting_utils.colorscale(cmap, values, vmax=7, + symmetric_cmap=False) + assert (colors['vmin'], colors['vmax']) == (0, 7) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (7, 0) + assert not colors['symmetric_cmap'] + + +def test_colorscale_asymmetric_cmap_negative_values(): + cmap = 'jet' + values = np.linspace(-15, 4) + assert_warns(UserWarning, js_plotting_utils.colorscale, cmap, + values, symmetric_cmap=False) + + colors = js_plotting_utils.colorscale(cmap, values, vmax=7, + symmetric_cmap=False) + assert (colors['vmin'], colors['vmax']) == (-7, 7) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (7, -7) + assert colors['symmetric_cmap'] + + +def test_encode(): + for dtype in ['f4', '>i4']: + a = np.arange(10, dtype=dtype) + encoded = js_plotting_utils.encode(a) + decoded = base64.b64decode(encoded.encode('utf-8')) + b = np.frombuffer(decoded, dtype=dtype) + assert np.allclose(js_plotting_utils.decode(encoded, dtype=dtype), b) + assert np.allclose(a, b) + + +def test_mesh_to_plotly(): + fsaverage = fetch_surf_fsaverage() + coord, triangles = surface.load_surf_mesh(fsaverage['pial_left']) + plotly = js_plotting_utils.mesh_to_plotly(fsaverage['pial_left']) + for i, key in enumerate(['_x', '_y', '_z']): + assert np.allclose( + js_plotting_utils.decode(plotly[key], '' in str(html) + _check_open_in_browser(html) + resized = html.resize(3, 17) + assert resized is html + assert (html.width, html.height) == (3, 17) + assert "width=3 height=17" in html.get_iframe() + assert "width=33 height=37" in html.get_iframe(33, 37) + if not LXML_INSTALLED: + return + root = etree.HTML(html.html.encode('utf-8'), + parser=etree.HTMLParser(huge_tree=True)) + head = root.find('head') + assert len(head.findall('script')) == 5 + body = root.find('body') + div = body.find('div') + assert ('id', plot_div_id) in div.items() + if not check_selects: + return + selects = body.findall('select') + assert len(selects) == 3 + hemi = selects[0] + assert ('id', 'select-hemisphere') in hemi.items() + assert len(hemi.findall('option')) == 2 + kind = selects[1] + assert ('id', 'select-kind') in kind.items() + assert len(kind.findall('option')) == 2 + view = selects[2] + assert ('id', 'select-view') in view.items() + assert len(view.findall('option')) == 7 + + +def _open_mock(f): + print('opened {}'.format(f)) + + +def _check_open_in_browser(html): + wb_open = webbrowser.open + webbrowser.open = _open_mock + try: + html.open_in_browser(temp_file_lifetime=None) + temp_file = html._temp_file + assert html._temp_file is not None + assert os.path.isfile(temp_file) + html.remove_temp_file() + assert html._temp_file is None + assert not os.path.isfile(temp_file) + html.remove_temp_file() + html._temp_file = 'aaaaaaaaaaaaaaaaaaaaaa' + html.remove_temp_file() + finally: + webbrowser.open = wb_open + try: + os.remove(temp_file) + except Exception: + pass + + +def test_temp_file_removing(): + html = js_plotting_utils.HTMLDocument('hello') + wb_open = webbrowser.open + webbrowser.open = _open_mock + try: + html.open_in_browser(temp_file_lifetime=.5) + assert os.path.isfile(html._temp_file) + time.sleep(1.5) + assert not os.path.isfile(html._temp_file) + html.open_in_browser(temp_file_lifetime=None) + assert os.path.isfile(html._temp_file) + time.sleep(1.5) + assert os.path.isfile(html._temp_file) + finally: + webbrowser.open = wb_open + try: + os.remove(html._temp_file) + except Exception: + pass + + +def _open_views(): + return [js_plotting_utils.HTMLDocument('') for i in range(12)] + + +def _open_one_view(): + for i in range(12): + v = js_plotting_utils.HTMLDocument('') + return v + + +def test_open_view_warning(): + # opening many views (without deleting the SurfaceView objects) + # should raise a warning about memory usage + assert_warns(UserWarning, _open_views) + assert_no_warnings(_open_one_view) + + +def test_to_color_strings(): + colors = [[0, 0, 1], [1, 0, 0], [.5, .5, .5]] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] + + colors = [[0, 0, 1, 1], [1, 0, 0, 1], [.5, .5, .5, 0]] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] + + colors = ['#0000ff', '#ff0000', '#7f7f7f'] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] + + colors = [[0, 0, 1, 1], [1, 0, 0, 1], [.5, .5, .5, 0]] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] + + colors = ['r', 'green', 'black', 'white'] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#ff0000', '#008000', '#000000', '#ffffff'] + + if matplotlib.__version__ < '2': + return + + colors = ['#0000ffff', '#ff0000ab', '#7f7f7f00'] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] diff --git a/nilearn/plotting/tests/test_matrix_plotting.py b/nilearn/plotting/tests/test_matrix_plotting.py new file mode 100644 index 0000000000..848089c295 --- /dev/null +++ b/nilearn/plotting/tests/test_matrix_plotting.py @@ -0,0 +1,46 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import matplotlib.pyplot as plt +from nose.tools import assert_true, assert_equal, assert_raises +from nilearn.plotting.matrix_plotting import plot_matrix + +############################################################################## +# Some smoke testing for graphics-related code + + +def test_matrix_plotting(): + from numpy import zeros + from distutils.version import LooseVersion + mat = zeros((10, 10)) + labels = [str(i) for i in range(10)] + ax = plot_matrix(mat, labels=labels, title='foo') + plt.close() + # test if plotting lower triangle works + ax = plot_matrix(mat, labels=labels, tri='lower') + # test if it returns an AxesImage + ax.axes.set_title('Title') + plt.close() + import scipy + if LooseVersion(scipy.__version__) >= LooseVersion('1.0.0'): + # test if a ValueError is raised when reorder=True without labels + assert_raises(ValueError, plot_matrix, mat, labels=None, reorder=True) + # test if a ValueError is raised when reorder argument is wrong + assert_raises(ValueError, plot_matrix, mat, labels=labels, reorder=' ') + # test if reordering with default linkage works + idx = [2, 3, 5] + from itertools import permutations + # make symmetric matrix of similarities so we can get a block + for perm in permutations(idx, 2): + mat[perm] = 1 + ax = plot_matrix(mat, labels=labels, reorder=True) + assert_equal(len(labels), len(ax.axes.get_xticklabels())) + reordered_labels = [int(lbl.get_text()) + for lbl in ax.axes.get_xticklabels()] + # block order does not matter + assert_true(reordered_labels[:3] == idx or reordered_labels[-3:] == idx, + 'Clustering does not find block structure.') + plt.close() + # test if reordering with specific linkage works + ax = plot_matrix(mat, labels=labels, reorder='complete') + plt.close() diff --git a/nilearn/plotting/tests/test_surf_plotting.py b/nilearn/plotting/tests/test_surf_plotting.py new file mode 100644 index 0000000000..55d10725a3 --- /dev/null +++ b/nilearn/plotting/tests/test_surf_plotting.py @@ -0,0 +1,198 @@ +# Tests for functions in surf_plotting.py + +import tempfile + +from distutils.version import LooseVersion +from nose import SkipTest +from nilearn._utils.testing import assert_raises_regex + +import numpy as np +import matplotlib +import matplotlib.pyplot as plt + +from nilearn.plotting.surf_plotting import (plot_surf, plot_surf_stat_map, + plot_surf_roi) +from nilearn.surface.tests.test_surface import _generate_surf + + +def test_plot_surf(): + mesh = _generate_surf() + rng = np.random.RandomState(0) + bg = rng.randn(mesh[0].shape[0], ) + + # Plot mesh only + plot_surf(mesh) + + # Plot mesh with background + plot_surf(mesh, bg_map=bg) + plot_surf(mesh, bg_map=bg, darkness=0.5) + plot_surf(mesh, bg_map=bg, alpha=0.5) + + # Plot different views + plot_surf(mesh, bg_map=bg, hemi='right') + plot_surf(mesh, bg_map=bg, view='medial') + plot_surf(mesh, bg_map=bg, hemi='right', view='medial') + + # Plot with colorbar + plot_surf(mesh, bg_map=bg, colorbar=True) + + # Save execution time and memory + plt.close() + + +def test_plot_surf_error(): + mesh = _generate_surf() + rng = np.random.RandomState(0) + + # Wrong inputs for view or hemi + assert_raises_regex(ValueError, 'view must be one of', + plot_surf, mesh, view='middle') + assert_raises_regex(ValueError, 'hemi must be one of', + plot_surf, mesh, hemi='lft') + + # Wrong size of background image + assert_raises_regex(ValueError, + 'bg_map does not have the same number of vertices', + plot_surf, mesh, + bg_map=rng.randn(mesh[0].shape[0] - 1, )) + + # Wrong size of surface data + assert_raises_regex(ValueError, + 'surf_map does not have the same number of vertices', + plot_surf, mesh, + surf_map=rng.randn(mesh[0].shape[0] + 1, )) + + assert_raises_regex(ValueError, + 'surf_map can only have one dimension', plot_surf, + mesh, surf_map=rng.randn(mesh[0].shape[0], 2)) + + +def test_plot_surf_stat_map(): + mesh = _generate_surf() + rng = np.random.RandomState(0) + bg = rng.randn(mesh[0].shape[0], ) + data = 10 * rng.randn(mesh[0].shape[0], ) + + # Plot mesh with stat map + plot_surf_stat_map(mesh, stat_map=data) + plot_surf_stat_map(mesh, stat_map=data, colorbar=True) + plot_surf_stat_map(mesh, stat_map=data, alpha=1) + + # Plot mesh with background and stat map + plot_surf_stat_map(mesh, stat_map=data, bg_map=bg) + plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, + bg_on_data=True, darkness=0.5) + plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, colorbar=True, + bg_on_data=True, darkness=0.5) + + # Apply threshold + plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, + bg_on_data=True, darkness=0.5, + threshold=0.3) + plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, colorbar=True, + bg_on_data=True, darkness=0.5, + threshold=0.3) + + # Change vmax + plot_surf_stat_map(mesh, stat_map=data, vmax=5) + plot_surf_stat_map(mesh, stat_map=data, vmax=5, colorbar=True) + + # Change colormap + plot_surf_stat_map(mesh, stat_map=data, cmap='cubehelix') + plot_surf_stat_map(mesh, stat_map=data, cmap='cubehelix', colorbar=True) + + # Plot to axes + axes = plt.subplots(ncols=2, subplot_kw={'projection': '3d'})[1] + for ax in axes.flatten(): + plot_surf_stat_map(mesh, stat_map=data, ax=ax) + axes = plt.subplots(ncols=2, subplot_kw={'projection': '3d'})[1] + for ax in axes.flatten(): + plot_surf_stat_map(mesh, stat_map=data, ax=ax, colorbar=True) + + fig = plot_surf_stat_map(mesh, stat_map=data, colorbar=False) + assert len(fig.axes) == 1 + # symmetric_cbar + fig = plot_surf_stat_map( + mesh, stat_map=data, colorbar=True, symmetric_cbar=True) + assert len(fig.axes) == 2 + yticklabels = fig.axes[1].get_yticklabels() + first, last = yticklabels[0].get_text(), yticklabels[-1].get_text() + assert float(first) == - float(last) + # no symmetric_cbar + fig = plot_surf_stat_map( + mesh, stat_map=data, colorbar=True, symmetric_cbar=False) + assert len(fig.axes) == 2 + yticklabels = fig.axes[1].get_yticklabels() + first, last = yticklabels[0].get_text(), yticklabels[-1].get_text() + assert float(first) != - float(last) + # Save execution time and memory + plt.close() + + +def test_plot_surf_stat_map_error(): + mesh = _generate_surf() + rng = np.random.RandomState(0) + data = 10 * rng.randn(mesh[0].shape[0], ) + + # Try to input vmin + assert_raises_regex(ValueError, + 'this function does not accept a "vmin" argument', + plot_surf_stat_map, mesh, stat_map=data, vmin=0) + + # Wrong size of stat map data + assert_raises_regex(ValueError, + 'surf_map does not have the same number of vertices', + plot_surf_stat_map, mesh, + stat_map=np.hstack((data, data))) + + assert_raises_regex(ValueError, + 'surf_map can only have one dimension', + plot_surf_stat_map, mesh, + stat_map=np.vstack((data, data)).T) + + +def test_plot_surf_roi(): + mesh = _generate_surf() + rng = np.random.RandomState(0) + roi1 = rng.randint(0, mesh[0].shape[0], size=5) + roi2 = rng.randint(0, mesh[0].shape[0], size=10) + parcellation = rng.rand(mesh[0].shape[0]) + + # plot roi + plot_surf_roi(mesh, roi_map=roi1) + plot_surf_roi(mesh, roi_map=roi1, colorbar=True) + + # plot parcellation + plot_surf_roi(mesh, roi_map=parcellation) + plot_surf_roi(mesh, roi_map=parcellation, colorbar=True) + + # plot roi list + plot_surf_roi(mesh, roi_map=[roi1, roi2]) + plot_surf_roi(mesh, roi_map=[roi1, roi2], colorbar=True) + + # plot to axes + plot_surf_roi(mesh, roi_map=roi1, ax=None, figure=plt.gcf()) + + # plot to axes + with tempfile.NamedTemporaryFile() as tmp_file: + plot_surf_roi(mesh, roi_map=roi1, ax=plt.gca(), figure=None, + output_file=tmp_file.name) + with tempfile.NamedTemporaryFile() as tmp_file: + plot_surf_roi(mesh, roi_map=roi1, ax=plt.gca(), figure=None, + output_file=tmp_file.name, colorbar=True) + + # Save execution time and memory + plt.close() + + +def test_plot_surf_roi_error(): + mesh = _generate_surf() + rng = np.random.RandomState(0) + roi1 = rng.randint(0, mesh[0].shape[0], size=5) + roi2 = rng.randint(0, mesh[0].shape[0], size=10) + + # Wrong input + assert_raises_regex(ValueError, + 'Invalid input for roi_map', + plot_surf_roi, mesh, + roi_map={'roi1': roi1, 'roi2': roi2}) diff --git a/nilearn/regions/__init__.py b/nilearn/regions/__init__.py index dafd963de7..520d825dc1 100644 --- a/nilearn/regions/__init__.py +++ b/nilearn/regions/__init__.py @@ -2,14 +2,17 @@ The :mod:`nilearn.regions` class module includes region extraction procedure on a 4D statistical/atlas maps and its function. """ -from .region_extractor import connected_regions, RegionExtractor +from .region_extractor import (connected_regions, RegionExtractor, + connected_label_regions) from .signal_extraction import ( img_to_signals_labels, signals_to_img_labels, img_to_signals_maps, signals_to_img_maps, ) +from .parcellations import Parcellations __all__ = [ 'connected_regions', 'RegionExtractor', + 'connected_label_regions', 'img_to_signals_labels', 'signals_to_img_labels', 'img_to_signals_maps', 'signals_to_img_maps', -] + 'Parcellations'] diff --git a/nilearn/regions/parcellations.py b/nilearn/regions/parcellations.py new file mode 100644 index 0000000000..5f208e8916 --- /dev/null +++ b/nilearn/regions/parcellations.py @@ -0,0 +1,430 @@ +"""Parcellation tools such as KMeans or Ward for fMRI images +""" + +import numpy as np + +from sklearn.base import clone +from sklearn.feature_extraction import image +from sklearn.externals.joblib import Memory, delayed, Parallel + +from ..decomposition.multi_pca import MultiPCA +from ..input_data import NiftiLabelsMasker +from .._utils.compat import _basestring +from .._utils.niimg import _safe_get_data +from .._utils.niimg_conversions import _iter_check_niimg + + +def _estimator_fit(data, estimator): + """ Estimator to fit on the data matrix + + Parameters + ---------- + data : numpy array + Data matrix + + estimator : instance of estimator from sklearn + MiniBatchKMeans or AgglomerativeClustering + + Returns + ------- + labels_ : numpy.ndarray + labels_ estimated from estimator + """ + estimator = clone(estimator) + estimator.fit(data.T) + + return estimator.labels_ + + +def _check_parameters_transform(imgs, confounds): + """A helper function to check the parameters and prepare for processing + as a list. + """ + if not isinstance(imgs, (list, tuple)) or \ + isinstance(imgs, _basestring): + imgs = [imgs, ] + single_subject = True + elif isinstance(imgs, (list, tuple)) and len(imgs) == 1: + single_subject = True + else: + single_subject = False + + if confounds is None and isinstance(imgs, (list, tuple)): + confounds = [None] * len(imgs) + + if confounds is not None: + if not isinstance(confounds, (list, tuple)) or \ + isinstance(confounds, _basestring): + confounds = [confounds, ] + + if len(confounds) != len(imgs): + raise ValueError("Number of confounds given does not match with " + "the given number of images.") + return imgs, confounds, single_subject + + +def _labels_masker_extraction(img, masker, confound): + """ Helper function for parallelizing NiftiLabelsMasker extractor + on list of Nifti images. + + Parameters + ---------- + img : 4D Nifti image like object + Image to process. + + masker : instance of NiftiLabelsMasker + Used for extracting signals with fit_transform + + confound : csv file or numpy array + Confound used for signal cleaning while extraction. + Passed to signal.clean + + Returns + ------- + signals : numpy array + Signals extracted on given img + """ + masker = clone(masker) + signals = masker.fit_transform(img, confounds=confound) + return signals + + +class Parcellations(MultiPCA): + """Learn parcellations on fMRI images. + + Four different types of clustering methods can be used such as kmeans, + ward, complete, average. Kmeans will call MiniBatchKMeans whereas + ward, complete, average are used within in Agglomerative Clustering. + All methods are leveraged from scikit-learn. + + .. versionadded:: 0.4.1 + + Parameters + ---------- + method : str, {'kmeans', 'ward', 'complete', 'average'} + A method to choose between for brain parcellations. + + n_parcels : int, default=50 + Number of parcellations to divide the brain data into. + + random_state : int or RandomState + Pseudo number generator state used for random sampling. + + mask : Niimg-like object or NiftiMasker, MultiNiftiMasker instance + Mask/Masker used for masking the data. + If mask image if provided, it will be used in the MultiNiftiMasker. + If an instance of MultiNiftiMasker is provided, then this instance + parameters will be used in masking the data by overriding the default + masker parameters. + If None, mask will be automatically computed by a MultiNiftiMasker + with default parameters. + + smoothing_fwhm : float, optional default=4. + If smoothing_fwhm is not None, it gives the full-width half maximum in + millimeters of the spatial smoothing to apply to the signal. + + standardize : boolean, optional + If standardize is True, the time-series are centered and normed: + their mean is put to 0 and their variance to 1 in the time dimension. + + detrend : boolean, optional + Whether to detrend signals or not. + This parameter is passed to signal.clean. Please see the related + documentation for details + + low_pass: None or float, optional + This parameter is passed to signal.clean. Please see the related + documentation for details + + high_pass: None or float, optional + This parameter is passed to signal.clean. Please see the related + documentation for details + + t_r : float, optional + This parameter is passed to signal.clean. Please see the related + documentation for details + + target_affine : 3x3 or 4x4 matrix, optional + This parameter is passed to image.resample_img. Please see the + related documentation for details. The given affine will be + considered as same for all given list of images. + + target_shape : 3-tuple of integers, optional + This parameter is passed to image.resample_img. Please see the + related documentation for details. + + memory : instance of joblib.Memory or str + Used to cache the masking process. + By default, no caching is done. If a string is given, it is the + path to the caching directory. + + memory_level : integer, optional + Rough estimator of the amount of memory used by caching. Higher value + means more memory for caching. + + n_jobs : integer, optional + The number of CPUs to use to do the computation. -1 means + 'all CPUs', -2 'all CPUs but one', and so on. + + verbose : integer, optional + Indicate the level of verbosity. By default, nothing is printed. + + Returns + ------- + labels_img_ : Nifti1Image + Labels image to each parcellation learned on fmri images. + + masker_ : instance of NiftiMasker or MultiNiftiMasker + The masker used to mask the data + + connectivity_ : numpy.ndarray + voxel-to-voxel connectivity matrix computed from a mask. + Note that this attribute is only seen if selected methods are + Agglomerative Clustering type, 'ward', 'complete', 'average'. + + Notes + ----- + * Transforming list of Nifti images to data matrix takes few steps. + Reducing the data dimensionality using randomized SVD, build brain + parcellations using KMeans or various Agglomerative methods. + + * This object uses spatially-constrained AgglomerativeClustering for + method='ward' or 'complete' or 'average'. Spatial connectivity matrix + (voxel-to-voxel) is built-in object which means no need of explicitly + giving the matrix. + + """ + VALID_METHODS = ['kmeans', 'ward', 'complete', 'average'] + + def __init__(self, method, n_parcels=50, + random_state=0, mask=None, smoothing_fwhm=4., + standardize=False, detrend=False, + low_pass=None, high_pass=None, t_r=None, + target_affine=None, target_shape=None, + mask_strategy='epi', mask_args=None, + memory=Memory(cachedir=None), + memory_level=0, n_jobs=1, verbose=1): + self.method = method + self.n_parcels = n_parcels + + MultiPCA.__init__(self, n_components=200, + random_state=random_state, + mask=mask, memory=memory, + smoothing_fwhm=smoothing_fwhm, + standardize=standardize, detrend=detrend, + low_pass=low_pass, high_pass=high_pass, + t_r=t_r, target_affine=target_affine, + target_shape=target_shape, + mask_strategy=mask_strategy, + mask_args=mask_args, + memory_level=memory_level, + n_jobs=n_jobs, + verbose=verbose) + + def _raw_fit(self, data): + """ Fits the parcellation method on this reduced data. + + Data are coming from a base decomposition estimator which computes + the mask and reduces the dimensionality of images using + randomized_svd. + + Parameters + ---------- + data : ndarray + Shape (n_samples, n_features) + + Returns + ------- + labels_ : numpy.ndarray + Labels to each cluster in the brain. + + connectivity_ : numpy.ndarray + voxel-to-voxel connectivity matrix computed from a mask. + Note that, this attribute is returned only for selected methods + such as 'ward', 'complete', 'average'. + """ + valid_methods = self.VALID_METHODS + if self.method is None: + raise ValueError("Parcellation method is specified as None. " + "Please select one of the method in " + "{0}".format(valid_methods)) + if self.method is not None and self.method not in valid_methods: + raise ValueError("The method you have selected is not implemented " + "'{0}'. Valid methods are in {1}" + .format(self.method, valid_methods)) + + # we delay importing Ward or AgglomerativeClustering and same + # time import plotting module before that. + + # Because sklearn.cluster imports scipy hierarchy and hierarchy imports + # matplotlib. So, we force import matplotlib first using our + # plotting to avoid backend display error with matplotlib + # happening in Travis + try: + from nilearn import plotting + except: + pass + + components = MultiPCA._raw_fit(self, data) + + mask_img_ = self.masker_.mask_img_ + if self.verbose: + print("[{0}] computing {1}".format(self.__class__.__name__, + self.method)) + + if self.method == 'kmeans': + from sklearn.cluster import MiniBatchKMeans + kmeans = MiniBatchKMeans(n_clusters=self.n_parcels, + init='k-means++', + random_state=self.random_state, + verbose=self.verbose) + labels = self._cache(_estimator_fit, + func_memory_level=1)(components.T, kmeans) + else: + mask_ = _safe_get_data(mask_img_).astype(np.bool) + shape = mask_.shape + connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1], + n_z=shape[2], mask=mask_) + + from sklearn.cluster import AgglomerativeClustering + + agglomerative = AgglomerativeClustering( + n_clusters=self.n_parcels, connectivity=connectivity, + linkage=self.method, memory=self.memory) + + labels = self._cache(_estimator_fit, + func_memory_level=1)(components.T, + agglomerative) + + self.connectivity_ = connectivity + # Avoid 0 label + labels = labels + 1 + self.labels_img_ = self.masker_.inverse_transform(labels) + + return self + + def _check_fitted(self): + """Helper function to check whether fit is called or not. + """ + if not hasattr(self, 'labels_img_'): + raise ValueError("Object has no labels_img_ attribute. " + "Ensure that fit() is called before transform.") + + def transform(self, imgs, confounds=None): + """Extract signals from parcellations learned on fmri images. + + Parameters + ---------- + imgs : List of Nifti-like images + See http://nilearn.github.io/manipulating_images/input_output.html. + Images to process. + + confounds: List of CSV files or arrays-like, optional + Each file or numpy array in a list should have shape + (number of scans, number of confounds) + This parameter is passed to signal.clean. Please see the related + documentation for details. Must be of same length of imgs. + + Returns + ------- + region_signals: List of or 2D numpy.ndarray + Signals extracted for each label for each image. + Example, for single image shape will be + (number of scans, number of labels) + """ + self._check_fitted() + imgs, confounds, single_subject = _check_parameters_transform( + imgs, confounds) + # Requires for special cases like extracting signals on list of + # 3D images + imgs_list = _iter_check_niimg(imgs, atleast_4d=True) + + masker = NiftiLabelsMasker(self.labels_img_, + mask_img=self.masker_.mask_img_, + smoothing_fwhm=self.smoothing_fwhm, + standardize=self.standardize, + detrend=self.detrend, + low_pass=self.low_pass, + high_pass=self.high_pass, t_r=self.t_r, + resampling_target='data', + memory=self.memory, + memory_level=self.memory_level, + verbose=self.verbose) + + region_signals = Parallel(n_jobs=self.n_jobs)( + delayed(self._cache(_labels_masker_extraction, + func_memory_level=2)) + (img, masker, confound) + for img, confound in zip(imgs_list, confounds)) + + if single_subject: + return region_signals[0] + else: + return region_signals + + def fit_transform(self, imgs, confounds=None): + """Fit the images to parcellations and then transform them. + + Parameters + ---------- + imgs : List of Nifti-like images + See http://nilearn.github.io/manipulating_images/input_output.html. + Images for process for fit as well for transform to signals. + + confounds : List of CSV files or arrays-like, optional + Each file or numpy array in a list should have shape + (number of scans, number of confounds). + This parameter is passed to signal.clean. Given confounds + should have same length as images if given as a list. + + Note: same confounds will used for cleaning signals before + learning parcellations. + + Returns + ------- + region_signals: List of or 2D numpy.ndarray + Signals extracted for each label for each image. + Example, for single image shape will be + (number of scans, number of labels) + """ + return self.fit(imgs, confounds=confounds).transform(imgs, + confounds=confounds) + + def inverse_transform(self, signals): + """Transform signals extracted from parcellations back to brain + images. + + Uses `labels_img_` (parcellations) built at fit() level. + + Parameters + ---------- + signals : List of 2D numpy.ndarray + Each 2D array with shape (number of scans, number of regions) + + Returns + ------- + imgs : List of or Nifti-like image + Brain image(s) + """ + from .signal_extraction import signals_to_img_labels + + self._check_fitted() + + if not isinstance(signals, (list, tuple)) or\ + isinstance(signals, np.ndarray): + signals = [signals, ] + single_subject = True + elif isinstance(signals, (list, tuple)) and len(signals) == 1: + single_subject = True + else: + single_subject = False + + imgs = Parallel(n_jobs=self.n_jobs)( + delayed(self._cache(signals_to_img_labels, func_memory_level=2)) + (each_signal, self.labels_img_, self.mask_img_) + for each_signal in signals) + + if single_subject: + return imgs[0] + else: + return imgs diff --git a/nilearn/regions/region_extractor.py b/nilearn/regions/region_extractor.py index bc6ea73e09..c84bfc9c22 100644 --- a/nilearn/regions/region_extractor.py +++ b/nilearn/regions/region_extractor.py @@ -3,19 +3,21 @@ """ import numbers +import collections import numpy as np -from scipy.ndimage import label +from scipy import ndimage from scipy.stats import scoreatpercentile from sklearn.externals.joblib import Memory from .. import masking from ..input_data import NiftiMapsMasker -from .._utils import check_niimg, check_niimg_4d +from .._utils import check_niimg, check_niimg_3d, check_niimg_4d from ..image import new_img_like, resample_img from ..image.image import _smooth_array, threshold_img from .._utils.niimg_conversions import concat_niimgs, _check_same_fov +from .._utils.niimg import _safe_get_data from .._utils.compat import _basestring from .._utils.ndimage import _peak_local_max from .._utils.segmentation import _random_walker @@ -53,7 +55,8 @@ def _threshold_maps_ratio(maps_img, threshold): else: ratio = threshold - maps_data = maps.get_data() + maps_data = _safe_get_data(maps, ensure_finite=True) + abs_maps = np.abs(maps_data) # thresholding cutoff_threshold = scoreatpercentile( @@ -65,6 +68,61 @@ def _threshold_maps_ratio(maps_img, threshold): return threshold_maps_img +def _remove_small_regions(input_data, index, affine, min_size): + """Remove small regions in volume from input_data of specified min_size. + + min_size should be specified in mm^3 (region size in volume). + + Parameters + ---------- + input_data : numpy.ndarray + Values inside the regions defined by labels contained in input_data + are summed together to get the size and compare with given min_size. + For example, see scipy.ndimage.label + + index : numpy.ndarray + A sequence of label numbers of the regions to be measured corresponding + to input_data. For example, sequence can be generated using + np.arange(n_labels + 1) + + affine : numpy.ndarray + Affine of input_data is used to convert size in voxels to size in + volume of region in mm^3. + + min_size : float in mm^3 + Size of regions in input_data which falls below the specified min_size + of volume in mm^3 will be discarded. + + Returns + ------- + out : numpy.ndarray + Data returned will have regions removed specified by min_size + Otherwise, if criterion is not met then same input data will be + returned. + """ + # with return_counts argument is introduced from numpy 1.9.0. + # _, region_sizes = np.unique(input_data, return_counts=True) + + # For now, to count the region sizes, we use return_inverse from + # np.unique and then use np.bincount to count the region sizes. + + _, region_indices = np.unique(input_data, return_inverse=True) + region_sizes = np.bincount(region_indices) + size_in_vox = min_size / np.abs(np.linalg.det(affine[:3, :3])) + labels_kept = region_sizes > size_in_vox + if not np.all(labels_kept): + # Put to zero the indices not kept + rejected_labels_mask = np.in1d(input_data, + np.where(np.logical_not(labels_kept))[0] + ).reshape(input_data.shape) + # Avoid modifying the input: + input_data = input_data.copy() + input_data[rejected_labels_mask] = 0 + # Reorder the indices to avoid gaps + input_data = np.searchsorted(np.unique(input_data), input_data) + return input_data + + def connected_regions(maps_img, min_region_size=1350, extract_type='local_regions', smoothing_fwhm=6, mask_img=None): @@ -112,13 +170,22 @@ def connected_regions(maps_img, min_region_size=1350, index_of_each_map: numpy array an array of list of indices where each index denotes the identity of each extracted region to their family of brain maps. + + See Also + -------- + nilearn.regions.connected_label_regions : A function can be used for + extraction of regions on labels based atlas images. + + nilearn.regions.RegionExtractor : A class can be used for both + region extraction on continuous type atlas images and + also time series signals extraction from regions extracted. """ all_regions_imgs = [] index_of_each_map = [] maps_img = check_niimg(maps_img, atleast_4d=True) - maps = maps_img.get_data() - affine = maps_img.get_affine() - min_region_size = min_region_size / np.prod(np.diag(abs(affine[:3]))) + maps = _safe_get_data(maps_img).copy() + affine = maps_img.affine + min_region_size = min_region_size / np.abs(np.linalg.det(affine[:3, :3])) allowed_extract_types = ['connected_components', 'local_regions'] if extract_type not in allowed_extract_types: @@ -129,7 +196,7 @@ def connected_regions(maps_img, min_region_size=1350, if mask_img is not None: if not _check_same_fov(maps_img, mask_img): mask_img = resample_img(mask_img, - target_affine=maps_img.get_affine(), + target_affine=maps_img.affine, target_shape=maps_img.shape[:3], interpolation="nearest") mask_data, _ = masking._load_mask_img(mask_img) @@ -143,7 +210,7 @@ def connected_regions(maps_img, min_region_size=1350, if extract_type == 'local_regions': smooth_map = _smooth_array(map_3d, affine=affine, fwhm=smoothing_fwhm) seeds = _peak_local_max(smooth_map) - seeds_label, seeds_id = label(seeds) + seeds_label, seeds_id = ndimage.label(seeds) # Assign -1 to values which are 0. to indicate to ignore seeds_label[map_3d == 0.] = -1 rw_maps = _random_walker(map_3d, seeds_label) @@ -152,7 +219,7 @@ def connected_regions(maps_img, min_region_size=1350, label_maps = rw_maps else: # Connected component extraction - label_maps, n_labels = label(map_3d) + label_maps, n_labels = ndimage.label(map_3d) # Takes the size of each labelized region data labels_size = np.bincount(label_maps.ravel()) @@ -193,7 +260,7 @@ class RegionExtractor(NiftiMapsMasker): Mask to be applied to input data, passed to NiftiMapsMasker. If None, no masking is applied. - min_region_size: int, default 1350 mm^3, optional + min_region_size: float, default 1350 mm^3, optional Minimum volume in mm3 for a region to be kept. For example, if the voxel size is 3x3x3 mm then the volume of the voxel is 27mm^3. By default, it is 1350mm^3 which means we take minimum @@ -227,6 +294,12 @@ class RegionExtractor(NiftiMapsMasker): random walker segementation algorithm on these markers for region separation. + smoothing_fwhm: scalar, default 6mm, optional + To smooth an image to extract most sparser regions. This parameter + is passed to `connected_regions` and exists only for extractor + 'local_regions'. Please set this parameter according to maps + resolution, otherwise extraction will fail. + standardize: bool, True or False, default False, optional If True, the time series signals are centered and normalized by putting their mean to 0 and variance to 1. Recommended to @@ -283,14 +356,21 @@ class RegionExtractor(NiftiMapsMasker): better brain parcellations from rest fMRI", Sparsity Techniques in Medical Imaging, Sep 2014, Boston, United States. pp.8 + See Also + -------- + nilearn.regions.connected_label_regions : A function can be readily + used for extraction of regions on labels based atlas images. + """ def __init__(self, maps_img, mask_img=None, min_region_size=1350, threshold=1., thresholding_strategy='ratio_n_voxels', - extractor='local_regions', standardize=False, detrend=False, + extractor='local_regions', smoothing_fwhm=6, + standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, memory=Memory(cachedir=None), memory_level=0, verbose=0): super(RegionExtractor, self).__init__( maps_img=maps_img, mask_img=mask_img, + smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, memory=memory, memory_level=memory_level, verbose=verbose) @@ -299,6 +379,7 @@ def __init__(self, maps_img, mask_img=None, min_region_size=1350, self.thresholding_strategy = thresholding_strategy self.threshold = threshold self.extractor = extractor + self.smoothing_fwhm = smoothing_fwhm def fit(self, X=None, y=None): """ Prepare the data and setup for the region extraction @@ -328,9 +409,141 @@ def fit(self, X=None, y=None): # connected component extraction self.regions_img_, self.index_ = connected_regions(threshold_maps, self.min_region_size, - self.extractor) + self.extractor, + self.smoothing_fwhm) self.maps_img = self.regions_img_ super(RegionExtractor, self).fit() return self + + +def connected_label_regions(labels_img, min_size=None, connect_diag=True, + labels=None): + """ Extract connected regions from a brain atlas image defined by labels + (integers). + + For each label in an parcellations, separates out connected + components and assigns to each separated region a unique label. + + Parameters + ---------- + + labels_img : Nifti-like image + A 3D image which contains regions denoted as labels. Each region + is assigned with integers. + + min_size : float, in mm^3 optional (default None) + Minimum region size in volume required to keep after extraction. + Removes small or spurious regions. + + connect_diag : bool (default True) + If 'connect_diag' is True, two voxels are considered in the same region + if they are connected along the diagonal (26-connectivity). If it is + False, two voxels are considered connected only if they are within the + same x, y, or z direction. + + labels : 1D numpy array or list of str, (default None), optional + Each string in a list or array denote the name of the brain atlas + regions given in labels_img input. If provided, same names will be + re-assigned corresponding to each connected component based extraction + of regions relabelling. The total number of names should match with the + number of labels assigned in the image. + + NOTE: The order of the names given in labels should be appropriately + matched with the unique labels (integers) assigned to each region + given in labels_img (also excluding 'Background' label). + + Returns + ------- + new_labels_img : Nifti-like image + A new image comprising of regions extracted on an input labels_img. + + new_labels : list, optional + If labels are provided, new labels assigned to region extracted will + be returned. Otherwise, only new labels image will be returned. + + See Also + -------- + nilearn.datasets.fetch_atlas_harvard_oxford : For an example of atlas with + labels. + + nilearn.regions.RegionExtractor : A class can be used for region extraction + on continuous type atlas images. + + nilearn.regions.connected_regions : A function used for region extraction + on continuous type atlas images. + + """ + labels_img = check_niimg_3d(labels_img) + labels_data = _safe_get_data(labels_img, ensure_finite=True) + affine = labels_img.affine + + check_unique_labels = np.unique(labels_data) + + if min_size is not None and not isinstance(min_size, numbers.Number): + raise ValueError("Expected 'min_size' to be specified as integer. " + "You provided {0}".format(min_size)) + if not isinstance(connect_diag, bool): + raise ValueError("'connect_diag' must be specified as True or False. " + "You provided {0}".format(connect_diag)) + if np.any(check_unique_labels < 0): + raise ValueError("The 'labels_img' you provided has unknown/negative " + "integers as labels {0} assigned to regions. " + "All regions in an image should have positive " + "integers assigned as labels." + .format(check_unique_labels)) + + unique_labels = set(check_unique_labels) + # check for background label indicated as 0 + if np.any(check_unique_labels == 0): + unique_labels.remove(0) + + if labels is not None: + if (not isinstance(labels, collections.Iterable) or + isinstance(labels, _basestring)): + labels = [labels, ] + if len(unique_labels) != len(labels): + raise ValueError("The number of labels: {0} provided as input " + "in labels={1} does not match with the number " + "of unique labels in labels_img: {2}. " + "Please provide appropriate match with unique " + "number of labels in labels_img." + .format(len(labels), labels, len(unique_labels))) + new_names = [] + + if labels is None: + this_labels = [None] * len(unique_labels) + else: + this_labels = labels + + new_labels_data = np.zeros(labels_data.shape, dtype=np.int) + current_max_label = 0 + for label_id, name in zip(unique_labels, this_labels): + this_label_mask = (labels_data == label_id) + # Extract regions assigned to each label id + if connect_diag: + structure = np.ones((3, 3, 3), dtype=np.int) + regions, this_n_labels = ndimage.label( + this_label_mask.astype(np.int), structure=structure) + else: + regions, this_n_labels = ndimage.label(this_label_mask.astype(np.int)) + + if min_size is not None: + index = np.arange(this_n_labels + 1) + regions = _remove_small_regions(regions, index, affine, + min_size=min_size) + this_n_labels = regions.max() + + cur_regions = regions[regions != 0] + current_max_label + new_labels_data[regions != 0] = cur_regions + current_max_label += this_n_labels + if name is not None: + new_names.extend([name] * this_n_labels) + + new_labels_img = new_img_like(labels_img, new_labels_data, affine=affine) + if labels is not None: + new_labels = new_names + return new_labels_img, new_labels + + return new_labels_img diff --git a/nilearn/regions/signal_extraction.py b/nilearn/regions/signal_extraction.py index 74422076bf..d17ff79729 100644 --- a/nilearn/regions/signal_extraction.py +++ b/nilearn/regions/signal_extraction.py @@ -11,6 +11,7 @@ from scipy import linalg, ndimage from .. import _utils +from .._utils.niimg import _safe_get_data from .. import masking from ..image import new_img_like @@ -26,18 +27,18 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None, performs no resampling. Parameters - ========== + ---------- imgs: 4D Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html input images. labels_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html regions definition as labels. By default, the label zero is used to denote an absence of region. Use background_label to change it. mask_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Mask to apply to labels before extracting signals. Every point outside the mask is considered as background (i.e. no region). @@ -48,7 +49,7 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None, ordering of output array ("C" or "F"). Defaults to "F". Returns - ======= + ------- signals: numpy.ndarray Signals extracted from each region. One output signal is the mean of all input signals in a given region. If some regions are entirely @@ -60,7 +61,7 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None, the region with label labels[n]. See also - ======== + -------- nilearn.regions.signals_to_img_labels nilearn.regions.img_to_signals_maps """ @@ -70,35 +71,36 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None, # TODO: Make a special case for list of strings (load one image at a # time). imgs = _utils.check_niimg_4d(imgs) - target_affine = imgs.get_affine() + target_affine = imgs.affine target_shape = imgs.shape[:3] # Check shapes and affines. if labels_img.shape != target_shape: raise ValueError("labels_img and imgs shapes must be identical.") - if abs(labels_img.get_affine() - target_affine).max() > 1e-9: + if abs(labels_img.affine - target_affine).max() > 1e-9: raise ValueError("labels_img and imgs affines must be identical") if mask_img is not None: mask_img = _utils.check_niimg_3d(mask_img) if mask_img.shape != target_shape: raise ValueError("mask_img and imgs shapes must be identical.") - if abs(mask_img.get_affine() - target_affine).max() > 1e-9: + if abs(mask_img.affine - target_affine).max() > 1e-9: raise ValueError("mask_img and imgs affines must be identical") # Perform computation - labels_data = labels_img.get_data() + labels_data = _safe_get_data(labels_img, ensure_finite=True) labels = list(np.unique(labels_data)) if background_label in labels: labels.remove(background_label) if mask_img is not None: - mask_data = mask_img.get_data() + mask_data = _safe_get_data(mask_img, ensure_finite=True) labels_data = labels_data.copy() labels_data[np.logical_not(mask_data)] = background_label - data = imgs.get_data() - signals = np.ndarray((data.shape[-1], len(labels)), order=order) + data = _safe_get_data(imgs) + signals = np.ndarray((data.shape[-1], len(labels)), order=order, + dtype=data.dtype) for n, img in enumerate(np.rollaxis(data, -1)): signals[n] = np.asarray(ndimage.measurements.mean(img, labels=labels_data, @@ -112,7 +114,7 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None, def signals_to_img_labels(signals, labels_img, mask_img=None, - background_label=0, order="F"): + background_label=0, order="F"): """Create image from region signals defined as labels. The same region signal is used for each voxel of the corresponding 3D @@ -121,12 +123,12 @@ def signals_to_img_labels(signals, labels_img, mask_img=None, labels_img, mask_img must have the same shapes and affines. Parameters - ========== + ---------- signals: numpy.ndarray 2D array with shape: (scan number, number of regions in labels_img) labels_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Region definitions using labels. mask_img: Niimg-like object, optional @@ -140,13 +142,13 @@ def signals_to_img_labels(signals, labels_img, mask_img=None, ordering of output array ("C" or "F"). Defaults to "F". Returns - ======= + ------- img: nibabel.Nifti1Image Reconstructed image. dtype is that of "signals", affine and shape are those of labels_img. See also - ======== + -------- nilearn.regions.img_to_signals_labels nilearn.regions.signals_to_img_maps """ @@ -154,7 +156,7 @@ def signals_to_img_labels(signals, labels_img, mask_img=None, labels_img = _utils.check_niimg_3d(labels_img) signals = np.asarray(signals) - target_affine = labels_img.get_affine() + target_affine = labels_img.affine target_shape = labels_img.shape[:3] if mask_img is not None: @@ -162,17 +164,17 @@ def signals_to_img_labels(signals, labels_img, mask_img=None, if mask_img.shape != target_shape: raise ValueError("mask_img and labels_img shapes " "must be identical.") - if abs(mask_img.get_affine() - target_affine).max() > 1e-9: + if abs(mask_img.affine - target_affine).max() > 1e-9: raise ValueError("mask_img and labels_img affines " "must be identical") - labels_data = labels_img.get_data() + labels_data = _safe_get_data(labels_img, ensure_finite=True) labels = list(np.unique(labels_data)) if background_label in labels: labels.remove(background_label) if mask_img is not None: - mask_data = mask_img.get_data() + mask_data = _safe_get_data(mask_img, ensure_finite=True) labels_data = labels_data.copy() labels_data[np.logical_not(mask_data)] = background_label @@ -201,18 +203,18 @@ def img_to_signals_maps(imgs, maps_img, mask_img=None): This function is applicable to regions defined by maps. Parameters - ========== + ---------- imgs: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Input images. maps_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html regions definition as maps (array of weights). shape: imgs.shape + (region number, ) mask_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html mask to apply to regions before extracting signals. Every point outside the mask is considered as background (i.e. outside of any region). @@ -221,7 +223,7 @@ def img_to_signals_maps(imgs, maps_img, mask_img=None): ordering of output array ("C" or "F"). Defaults to "F". Returns - ======= + ------- region_signals: numpy.ndarray Signals extracted from each region. Shape is: (scans number, number of regions intersecting mask) @@ -231,38 +233,40 @@ def img_to_signals_maps(imgs, maps_img, mask_img=None): signal region_signals[:, n]. See also - ======== + -------- nilearn.regions.img_to_signals_labels nilearn.regions.signals_to_img_maps """ maps_img = _utils.check_niimg_4d(maps_img) imgs = _utils.check_niimg_4d(imgs) - affine = imgs.get_affine() + affine = imgs.affine shape = imgs.shape[:3] # Check shapes and affines. if maps_img.shape[:3] != shape: raise ValueError("maps_img and imgs shapes must be identical.") - if abs(maps_img.get_affine() - affine).max() > 1e-9: + if abs(maps_img.affine - affine).max() > 1e-9: raise ValueError("maps_img and imgs affines must be identical") - maps_data = maps_img.get_data() + maps_data = _safe_get_data(maps_img, ensure_finite=True) if mask_img is not None: mask_img = _utils.check_niimg_3d(mask_img) if mask_img.shape != shape: raise ValueError("mask_img and imgs shapes must be identical.") - if abs(mask_img.get_affine() - affine).max() > 1e-9: + if abs(mask_img.affine - affine).max() > 1e-9: raise ValueError("mask_img and imgs affines must be identical") maps_data, maps_mask, labels = \ - _trim_maps(maps_data, mask_img.get_data(), keep_empty=True) + _trim_maps(maps_data, + _safe_get_data(mask_img, ensure_finite=True), + keep_empty=True) maps_mask = _utils.as_ndarray(maps_mask, dtype=np.bool) else: maps_mask = np.ones(maps_data.shape[:3], dtype=np.bool) labels = np.arange(maps_data.shape[-1], dtype=np.int) - data = imgs.get_data() + data = _safe_get_data(imgs, ensure_finite=True) region_signals = linalg.lstsq(maps_data[maps_mask, :], data[maps_mask, :])[0].T @@ -275,46 +279,47 @@ def signals_to_img_maps(region_signals, maps_img, mask_img=None): region_signals, mask_img must have the same shapes and affines. Parameters - ========== + ---------- region_signals: numpy.ndarray signals to process, as a 2D array. A signal is a column. There must be as many signals as maps. In pseudo-code: region_signals.shape[1] == maps_img.shape[-1] maps_img: Niimg-like object - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Region definitions using maps. mask_img: Niimg-like object, optional - See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. + See http://nilearn.github.io/manipulating_images/input_output.html Boolean array giving voxels to process. integer arrays also accepted, zero meaning False. Returns - ======= + ------- img: nibabel.Nifti1Image Reconstructed image. affine and shape are those of maps_img. See also - ======== + -------- nilearn.regions.signals_to_img_labels nilearn.regions.img_to_signals_maps """ maps_img = _utils.check_niimg_4d(maps_img) - maps_data = maps_img.get_data() + maps_data = _safe_get_data(maps_img, ensure_finite=True) shape = maps_img.shape[:3] - affine = maps_img.get_affine() + affine = maps_img.affine if mask_img is not None: mask_img = _utils.check_niimg_3d(mask_img) if mask_img.shape != shape: raise ValueError("mask_img and maps_img shapes must be identical.") - if abs(mask_img.get_affine() - affine).max() > 1e-9: + if abs(mask_img.affine - affine).max() > 1e-9: raise ValueError("mask_img and maps_img affines must be " "identical.") - maps_data, maps_mask, _ = _trim_maps(maps_data, mask_img.get_data(), - keep_empty=True) + maps_data, maps_mask, _ = _trim_maps( + maps_data, _safe_get_data(mask_img, ensure_finite=True), + keep_empty=True) maps_mask = _utils.as_ndarray(maps_mask, dtype=np.bool) else: maps_mask = np.ones(maps_data.shape[:3], dtype=np.bool) @@ -332,7 +337,7 @@ def _trim_maps(maps, mask, keep_empty=False, order="F"): must be performed before calling this function. Parameters - ========== + ---------- maps: numpy.ndarray Set of maps, defining some regions. @@ -348,7 +353,7 @@ def _trim_maps(maps, mask, keep_empty=False, order="F"): Ordering of the output maps array (trimmed_maps). Returns - ======= + ------- trimmed_maps: numpy.ndarray New set of maps, computed as intersection of each input map and mask. Empty maps are discarded if keep_empty is False, thus the number of diff --git a/nilearn/regions/tests/__init__.py b/nilearn/regions/tests/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/nilearn/regions/tests/__init__.py @@ -0,0 +1 @@ + diff --git a/nilearn/regions/tests/test_parcellations.py b/nilearn/regions/tests/test_parcellations.py new file mode 100644 index 0000000000..df77cd68eb --- /dev/null +++ b/nilearn/regions/tests/test_parcellations.py @@ -0,0 +1,276 @@ +""" +Test the parcellations tools module +""" +import numpy as np +import nibabel + +from nose.tools import assert_true, assert_equal +from nilearn.regions.parcellations import (Parcellations, + _check_parameters_transform) +from nilearn._utils.testing import assert_raises_regex + + +def test_errors_raised_in_check_parameters_fit(): + # Test whether an error is raised or not given + # a false method type + # valid_methods = ['kmeans', 'ward', 'complete', 'average'] + data = np.zeros((6, 7, 8, 5)) + + img = nibabel.Nifti1Image(data, affine=np.eye(4)) + + method_raise1 = Parcellations(method=None) + assert_raises_regex(ValueError, + "Parcellation method is specified as None. ", + method_raise1.fit, img) + + for invalid_method in ['kmens', 'avg', 'complte']: + method_raise2 = Parcellations(method=invalid_method) + msg = ("The method you have selected is not implemented " + "'{0}'".format(invalid_method)) + assert_raises_regex(ValueError, msg, method_raise2.fit, img) + + +def test_parcellations_fit_on_single_nifti_image(): + # Test return attributes for each method + data = np.zeros((10, 11, 12, 5)) + data[9, 10, 2] = 1 + data[4, 9, 3] = 2 + fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + + methods = ['kmeans', 'ward', 'complete', 'average'] + n_parcels = [5, 10, 15] + for n_parcel, method in zip(n_parcels, methods): + parcellator = Parcellations(method=method, n_parcels=n_parcel) + parcellator.fit(fmri_img) + # Test that object returns attribute labels_img_ + assert_true(parcellator.labels_img_ is not None) + # Test object returns attribute masker_ + assert_true(parcellator.masker_ is not None) + assert_true(parcellator.mask_img_ is not None) + if method != 'kmeans': + # Test that object returns attribute connectivity_ + # only for AgglomerativeClustering methods + assert_true(parcellator.connectivity_ is not None) + labels_img = parcellator.labels_img_ + assert_true(parcellator.labels_img_ is not None) + # After inverse_transform, shape must match with original input + # data + assert_true(labels_img.shape, (data.shape[0], + data.shape[1], + data.shape[2])) + + +def test_parcellations_fit_on_multi_nifti_images(): + data = np.zeros((10, 11, 12, 5)) + data[9, 10, 2] = 1 + data[4, 9, 3] = 2 + fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + # List of fmri images + fmri_imgs = [fmri_img, fmri_img, fmri_img] + + parcellator = Parcellations(method='kmeans', n_parcels=5) + parcellator.fit(fmri_imgs) + assert_true(parcellator.labels_img_ is not None) + + parcellator = Parcellations(method='ward', n_parcels=5) + parcellator.fit(fmri_imgs) + assert_true(parcellator.labels_img_ is not None) + + # Smoke test with explicit mask image + mask_img = np.ones((10, 11, 12)) + mask_img = nibabel.Nifti1Image(mask_img, np.eye(4)) + + parcellator = Parcellations(method='kmeans', n_parcels=5, + mask=mask_img) + parcellator.fit(fmri_imgs) + + parcellator = Parcellations(method='ward', n_parcels=5, + mask=mask_img) + parcellator.fit(fmri_imgs) + + +def test_parcellations_transform_single_nifti_image(): + # Test with NiftiLabelsMasker extraction of timeseries data + # after building a parcellations image + + # Here, data has ones. zeros will be considered as background labels + # not foreground labels + data = np.ones((10, 11, 12, 8)) + data[6, 7, 8] = 2 + data[9, 10, 11] = 3 + parcels = 5 + + fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + + for method in ['kmeans', 'ward', 'complete', 'average']: + parcellator = Parcellations(method=method, n_parcels=parcels) + parcellator.fit(fmri_img) + # transform to signals + signals = parcellator.transform(fmri_img) + # Test if the signals extracted are of same shape as inputs + # Here, we simply return numpy array for single subject input + assert_equal(signals.shape, (fmri_img.shape[3], parcels)) + + # Test for single subject but in a list. + signals = parcellator.transform([fmri_img]) + assert_equal(signals.shape, (fmri_img.shape[3], parcels)) + + +def test_parcellations_transform_multi_nifti_images(): + data = np.ones((10, 11, 12, 10)) + data[6, 7, 8] = 2 + data[9, 10, 11] = 3 + parcels = 5 + + fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + fmri_imgs = [fmri_img, fmri_img, fmri_img] + + for method in ['kmeans', 'ward', 'complete', 'average']: + parcellator = Parcellations(method=method, n_parcels=parcels) + parcellator.fit(fmri_imgs) + # transform multi images to signals. In return, we have length + # equal to the number of images + signals = parcellator.transform(fmri_imgs) + assert_equal(signals[0].shape, (fmri_img.shape[3], parcels)) + assert_equal(signals[1].shape, (fmri_img.shape[3], parcels)) + assert_equal(signals[2].shape, (fmri_img.shape[3], parcels)) + + assert_equal(len(signals), len(fmri_imgs)) + + +def test_check_parameters_transform(): + rng = np.random.RandomState(0) + data = np.ones((10, 11, 12, 10)) + data[6, 7, 8] = 2 + data[9, 10, 11] = 3 + + # single image + fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + # single confound + confounds = rng.randn(*(10, 3)) + # Tests to check whether imgs, confounds returned are + # list or not. Pre-check in parameters to work for list + # of multi images and multi confounds + imgs, confounds, single_subject = _check_parameters_transform(fmri_img, + confounds) + assert_true(isinstance(imgs, (list, tuple))) + assert_true(isinstance(confounds, (list, tuple))) + assert_true(single_subject, True) + + # multi images + fmri_imgs = [fmri_img, fmri_img, fmri_img] + confounds_list = [confounds, confounds, confounds] + imgs, confounds, _ = _check_parameters_transform(fmri_imgs, confounds_list) + assert_equal(imgs, fmri_imgs) + assert_equal(confounds_list, confounds) + + # Test the error when length of images and confounds are not same + msg = ("Number of confounds given does not match with the " + "given number of images") + not_match_confounds_list = [confounds, confounds] + assert_raises_regex(ValueError, msg, _check_parameters_transform, + fmri_imgs, not_match_confounds_list) + + +def test_parcellations_transform_with_multi_confounds_multi_images(): + rng = np.random.RandomState(0) + data = np.ones((10, 11, 12, 10)) + data[6, 7, 8] = 2 + data[9, 10, 11] = 3 + + fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + fmri_imgs = [fmri_img, fmri_img, fmri_img] + + confounds = rng.randn(*(10, 3)) + confounds_list = (confounds, confounds, confounds) + + for method in ['kmeans', 'ward', 'complete', 'average']: + parcellator = Parcellations(method=method, n_parcels=5) + parcellator.fit(fmri_imgs) + + signals = parcellator.transform(fmri_imgs, + confounds=confounds_list) + assert_true(isinstance(signals, list)) + # n_parcels=5, length of data=10 + assert_equal(signals[0].shape, (10, 5)) + + +def test_fit_transform(): + rng = np.random.RandomState(0) + data = np.ones((10, 11, 12, 10)) + data[6, 7, 8] = 2 + data[9, 10, 11] = 3 + + fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + fmri_imgs = [fmri_img, fmri_img, fmri_img] + + confounds = rng.randn(*(10, 3)) + confounds_list = [confounds, confounds, confounds] + + for method in ['kmeans', 'ward', 'complete', 'average']: + parcellator = Parcellations(method=method, n_parcels=5) + signals = parcellator.fit_transform(fmri_imgs) + assert_true(parcellator.labels_img_ is not None) + if method != 'kmeans': + assert_true(parcellator.connectivity_ is not None) + assert_true(parcellator.masker_ is not None) + # fit_transform with confounds + signals = parcellator.fit_transform(fmri_imgs, + confounds=confounds_list) + assert_true(isinstance(signals, list)) + assert_equal(signals[0].shape, (10, 5)) + + +def test_inverse_transform_single_nifti_image(): + data = np.ones((10, 11, 12, 10)) + data[6, 7, 8] = 2 + data[9, 10, 11] = 3 + + fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + methods = ['kmeans', 'ward', 'complete', 'average'] + + for method in methods: + parcellate = Parcellations(method=method, n_parcels=5) + # Fit + parcellate.fit(fmri_img) + assert_true(parcellate.labels_img_ is not None) + # Transform + fmri_reduced = parcellate.transform(fmri_img) + assert_true(isinstance(fmri_reduced, np.ndarray)) + # Shape matching with (scans, regions) + assert_true(fmri_reduced.shape, (10, 5)) + # Inverse transform + fmri_compressed = parcellate.inverse_transform(fmri_reduced) + # A single Nifti image for single subject input + assert_true(isinstance(fmri_compressed, nibabel.Nifti1Image)) + # returns shape of fmri_img + assert_true(fmri_compressed.shape, (10, 11, 12, 10)) + + # fmri_reduced in a list + fmri_compressed = parcellate.inverse_transform([fmri_reduced]) + # A single Nifti image for single subject input + assert_true(isinstance(fmri_compressed, nibabel.Nifti1Image)) + # returns shape of fmri_img + assert_true(fmri_compressed.shape, (10, 11, 12, 10)) + + +def test_transform_3d_input_images(): + # test list of 3D images + data = np.ones((10, 11, 12)) + data[6, 7, 8] = 2 + data[9, 10, 11] = 3 + img = nibabel.Nifti1Image(data, affine=np.eye(4)) + # list of 3 + imgs = [img, img, img] + parcellate = Parcellations(method='ward', n_parcels=20) + X = parcellate.fit_transform(imgs) + assert_true(isinstance(X, list)) + # (number of samples, number of features) + assert_equal(np.concatenate(X).shape, (3, 20)) + # inverse transform + imgs_ = parcellate.inverse_transform(X) + assert_true(isinstance(imgs_, list)) + # test single 3D image + X = parcellate.fit_transform(imgs[0]) + assert_true(isinstance(X, np.ndarray)) + assert_equal(X.shape, (1, 20)) diff --git a/nilearn/regions/tests/test_region_extractor.py b/nilearn/regions/tests/test_region_extractor.py index 268c95f0e6..5d4de657d3 100644 --- a/nilearn/regions/tests/test_region_extractor.py +++ b/nilearn/regions/tests/test_region_extractor.py @@ -2,14 +2,18 @@ import numpy as np import nibabel +from scipy import ndimage -from nose.tools import assert_raises, assert_equal, assert_true, assert_not_equal +from nose.tools import assert_equal, assert_true, assert_not_equal -from nilearn.regions import connected_regions, RegionExtractor -from nilearn.regions.region_extractor import _threshold_maps_ratio -from nilearn.image import iter_img +from nilearn.regions import (connected_regions, RegionExtractor, + connected_label_regions) +from nilearn.regions.region_extractor import (_threshold_maps_ratio, + _remove_small_regions) +from nilearn._utils import testing from nilearn._utils.testing import assert_raises_regex, generate_maps +from nilearn._utils.exceptions import DimensionError def _make_random_data(shape): @@ -34,6 +38,15 @@ def test_invalid_thresholds_in_threshold_maps_ratio(): maps, threshold=invalid_threshold) +def test_nans_threshold_maps_ratio(): + maps, _ = generate_maps((10, 10, 10), n_regions=2) + data = maps.get_data() + data[:, :, 0] = np.nan + + maps_img = nibabel.Nifti1Image(data, np.eye(4)) + thr_maps = _threshold_maps_ratio(maps_img, threshold=0.8) + + def test_threshold_maps_ratio(): # smoke test for function _threshold_maps_ratio with randomly # generated maps @@ -69,12 +82,11 @@ def test_invalids_extract_types_in_connected_regions(): def test_connected_regions(): # 4D maps n_regions = 4 - maps, _ = generate_maps((30, 30, 30), n_regions=n_regions) + maps, mask_img = generate_maps((30, 30, 30), n_regions=n_regions) # 3D maps map_img = np.zeros((30, 30, 30)) + 0.1 * np.random.randn(30, 30, 30) map_img = nibabel.Nifti1Image(map_img, affine=np.eye(4)) - valid_extract_types = ['connected_components', 'local_regions'] # smoke test for function connected_regions and also to check # if the regions extracted should be equal or more than already present. # 4D image case @@ -88,6 +100,24 @@ def test_connected_regions(): extract_type=extract_type) assert_true(connected_extraction_3d_img.shape[-1] >= 1) + # Test input mask_img + extraction_with_mask_img, index = connected_regions(maps, + mask_img=mask_img) + assert_true(extraction_with_mask_img.shape[-1] >= 1) + + # mask_img with different shape + mask = np.zeros(shape=(10, 11, 12), dtype=np.int) + mask[1:-1, 1:-1, 1:-1] = 1 + affine = np.array([[2., 0., 0., 0.], + [0., 2., 0., 0.], + [0., 0., 2., 0.], + [0., 0., 0., 2.]]) + mask_img = nibabel.Nifti1Image(mask, affine=affine) + extraction_not_same_fov_mask, _ = connected_regions(maps, + mask_img=mask_img) + assert_equal(maps.shape[:3], extraction_not_same_fov_mask.shape[:3]) + assert_not_equal(mask_img.shape, extraction_not_same_fov_mask.shape[:3]) + def test_invalid_threshold_strategies(): maps, _ = generate_maps((6, 8, 10), n_regions=1) @@ -135,8 +165,6 @@ def test_region_extractor_fit_and_transform(): assert_true(extractor.regions_img_.shape[-1] >= 9) n_regions_extracted = extractor.regions_img_.shape[-1] - imgs = [] - signals = [] shape = (91, 109, 91, 7) expected_signal_shape = (7, n_regions_extracted) for id_ in range(n_subjects): @@ -144,3 +172,198 @@ def test_region_extractor_fit_and_transform(): # smoke test NiftiMapsMasker transform inherited in Region Extractor signal = extractor.transform(img) assert_equal(expected_signal_shape, signal.shape) + + # smoke test with high resolution image + maps, mask_img = generate_maps((20, 20, 20), n_regions=n_regions, + affine=.2 * np.eye(4)) + + extract_ratio = RegionExtractor(maps, + thresholding_strategy='ratio_n_voxels', + smoothing_fwhm=.6, + min_region_size=.4) + extract_ratio.fit() + assert_not_equal(extract_ratio.regions_img_, '') + assert_true(extract_ratio.regions_img_.shape[-1] >= 9) + + # smoke test with zeros on the diagonal of the affine + affine = np.eye(4) + affine[[0, 1]] = affine[[1, 0]] # permutes first and second lines + maps, mask_img = generate_maps((40, 40, 40), n_regions=n_regions, + affine=affine) + + extract_ratio = RegionExtractor(maps, threshold=0.2, + thresholding_strategy='ratio_n_voxels') + extract_ratio.fit() + assert_not_equal(extract_ratio.regions_img_, '') + assert_true(extract_ratio.regions_img_.shape[-1] >= 9) + + +def test_error_messages_connected_label_regions(): + shape = (13, 11, 12) + affine = np.eye(4) + n_regions = 2 + labels_img = testing.generate_labeled_regions(shape, affine=affine, + n_regions=n_regions) + assert_raises_regex(ValueError, + "Expected 'min_size' to be specified as integer.", + connected_label_regions, + labels_img=labels_img, min_size='a') + assert_raises_regex(ValueError, + "'connect_diag' must be specified as True or False.", + connected_label_regions, + labels_img=labels_img, connect_diag=None) + + +def test_remove_small_regions(): + data = np.array([[[0., 1., 0.], + [0., 1., 1.], + [0., 0., 0.]], + [[0., 0., 0.], + [1., 0., 0.], + [0., 1., 0.]], + [[0., 0., 1.], + [1., 0., 0.], + [0., 1., 1.]]]) + # To remove small regions, data should be labelled + label_map, n_labels = ndimage.label(data) + sum_label_data = np.sum(label_map) + + affine = np.eye(4) + min_size = 10 + # data can be act as mask_data to identify regions in label_map because + # features in label_map are built upon non-zeros in data + index = np.arange(n_labels + 1) + removed_data = _remove_small_regions(label_map, index, affine, min_size) + sum_removed_data = np.sum(removed_data) + + assert_true(sum_removed_data < sum_label_data) + + +def test_connected_label_regions(): + shape = (13, 11, 12) + affine = np.eye(4) + n_regions = 9 + labels_img = testing.generate_labeled_regions(shape, affine=affine, + n_regions=n_regions) + labels_data = labels_img.get_data() + n_labels_wo_reg_ext = len(np.unique(labels_data)) + + # region extraction without specifying min_size + extracted_regions_on_labels_img = connected_label_regions(labels_img) + extracted_regions_labels_data = extracted_regions_on_labels_img.get_data() + n_labels_wo_min = len(np.unique(extracted_regions_labels_data)) + + assert_true(n_labels_wo_reg_ext < n_labels_wo_min) + + # with specifying min_size + extracted_regions_with_min = connected_label_regions(labels_img, + min_size=100) + extracted_regions_with_min_data = extracted_regions_with_min.get_data() + n_labels_with_min = len(np.unique(extracted_regions_with_min_data)) + + assert_true(n_labels_wo_min > n_labels_with_min) + + # Test connect_diag=False + ext_reg_without_connect_diag = connected_label_regions(labels_img, + connect_diag=False) + data_wo_connect_diag = ext_reg_without_connect_diag.get_data() + n_labels_wo_connect_diag = len(np.unique(data_wo_connect_diag)) + assert_true(n_labels_wo_connect_diag > n_labels_wo_reg_ext) + + # If min_size is large and if all the regions are removed then empty image + # will be returned + extract_reg_min_size_large = connected_label_regions(labels_img, + min_size=500) + assert_true(np.unique(extract_reg_min_size_large.get_data()) == 0) + + # Test the names of the brain regions given in labels. + # Test labels for 9 regions in n_regions + labels = ['region_a', 'region_b', 'region_c', 'region_d', 'region_e', + 'region_f', 'region_g', 'region_h', 'region_i'] + + # If labels are provided, first return will contain extracted labels image + # and second return will contain list of new names generated based on same + # name with assigned on both hemispheres for example. + extracted_reg, new_labels = connected_label_regions(labels_img, + min_size=100, + labels=labels) + # The length of new_labels returned can differ depending upon min_size. If + # min_size given is more small regions can be removed therefore newly + # generated labels can be less than original size of labels. Or if min_size + # is less then newly generated labels can be more. + + # We test here whether labels returned are empty or not. + assert_not_equal(new_labels, '') + assert_true(len(new_labels) <= len(labels)) + + # labels given in numpy array + labels = np.asarray(labels) + extracted_reg2, new_labels2 = connected_label_regions(labels_img, + labels=labels) + assert_not_equal(new_labels, '') + # By default min_size is less, so newly generated labels can be more. + assert_true(len(new_labels2) >= len(labels)) + + # If number of labels provided are wrong (which means less than number of + # unique labels in labels_img), then we raise an error + + # Test whether error raises + unique_labels = set(np.unique(np.asarray(labels_img.get_data()))) + unique_labels.remove(0) + + # labels given are less than n_regions=9 + provided_labels = ['region_a', 'region_c', 'region_f', + 'region_g', 'region_h', 'region_i'] + + assert_true(len(provided_labels) < len(unique_labels)) + + np.testing.assert_raises(ValueError, connected_label_regions, + labels_img, labels=provided_labels) + + # Test if unknown/negative integers are provided as labels in labels_img, + # we raise an error and test the same whether error is raised. + labels_data = np.zeros(shape, dtype=np.int) + h0 = shape[0] // 2 + h1 = shape[1] // 2 + h2 = shape[2] // 2 + labels_data[:h0, :h1, :h2] = 1 + labels_data[:h0, :h1, h2:] = 2 + labels_data[:h0, h1:, :h2] = 3 + labels_data[:h0, h1:, h2:] = 4 + labels_data[h0:, :h1, :h2] = 5 + labels_data[h0:, :h1, h2:] = 6 + labels_data[h0:, h1:, :h2] = np.nan + labels_data[h0:, h1:, h2:] = np.inf + + neg_labels_img = nibabel.Nifti1Image(labels_data, affine) + np.testing.assert_raises(ValueError, connected_label_regions, + labels_img=neg_labels_img) + + # If labels_img provided is 4D Nifti image, then test whether error is + # raised or not. Since this function accepts only 3D image. + labels_4d_data = np.zeros((shape) + (2, )) + labels_data[h0:, h1:, :h2] = 0 + labels_data[h0:, h1:, h2:] = 0 + labels_4d_data[..., 0] = labels_data + labels_4d_data[..., 1] = labels_data + labels_img_4d = nibabel.Nifti1Image(labels_4d_data, np.eye(4)) + np.testing.assert_raises(DimensionError, connected_label_regions, + labels_img=labels_img_4d) + + # Test if labels (or names to regions) given is a string without a list. + # Then, we expect it to be split to regions extracted and returned as list. + labels_in_str = 'region_a' + labels_img_in_str = testing.generate_labeled_regions(shape, affine=affine, + n_regions=1) + extract_regions, new_labels = connected_label_regions(labels_img_in_str, + labels=labels_in_str) + assert_true(isinstance(new_labels, list)) + + # If user has provided combination of labels, then function passes without + # breaking and new labels are returned based upon given labels and should + # be equal or more based on regions extracted + combined_labels = ['region_a', '1', 'region_b', '2', 'region_c', '3', + 'region_d', '4', 'region_e'] + ext_reg, new_labels = connected_label_regions(labels_img, + labels=combined_labels) + assert_true(len(new_labels) >= len(combined_labels)) diff --git a/nilearn/regions/tests/test_signal_extraction.py b/nilearn/regions/tests/test_signal_extraction.py index 7d7e2d70e1..171ae624dc 100644 --- a/nilearn/regions/tests/test_signal_extraction.py +++ b/nilearn/regions/tests/test_signal_extraction.py @@ -16,6 +16,10 @@ from nilearn._utils.testing import write_tmp_imgs, assert_raises_regex from nilearn._utils.exceptions import DimensionError +_TEST_DIM_ERROR_MSG = ("Input data has incompatible dimensionality: " + "Expected dimension is 3D and you provided " + "a 4D image") + def test_generate_regions_ts(): """Minimal testing of generate_regions_ts()""" @@ -110,7 +114,7 @@ def test_signals_extraction_with_labels(): assert_true(np.all(data.std(axis=-1) > 0)) # verify that 4D label images are refused - assert_raises_regex(DimensionError, "Data must be a 3D", + assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG, signal_extraction.img_to_signals_labels, data_img, labels_4d_img) @@ -136,10 +140,10 @@ def test_signals_extraction_with_labels(): assert_true(labels_r == list(range(1, 9))) # Same thing, with mask. - assert_raises_regex(DimensionError, "Data must be a 3D", + assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG, signal_extraction.img_to_signals_labels, data_img, labels_img, mask_img=mask_4d_img) - assert_raises_regex(DimensionError, "Data must be a 3D", + assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG, signal_extraction.signals_to_img_labels, data_img, labels_img, mask_img=mask_4d_img) @@ -227,7 +231,7 @@ def test_signal_extraction_with_maps(): img = nibabel.Nifti1Image(data, np.eye(4)) # verify that 4d masks are refused - assert_raises_regex(TypeError, "Data must be a 3D", + assert_raises_regex(TypeError, _TEST_DIM_ERROR_MSG, signal_extraction.img_to_signals_maps, img, maps_img, mask_img=mask_4d_img) @@ -292,11 +296,11 @@ def test_signal_extraction_with_maps_and_labels(): maps_data[labels_data == l, n - 1] = 1 - maps_img = nibabel.Nifti1Image(maps_data, labels_img.get_affine()) + maps_img = nibabel.Nifti1Image(maps_data, labels_img.affine) # Generate fake data fmri_img, _ = generate_fake_fmri(shape=shape, length=length, - affine=labels_img.get_affine()) + affine=labels_img.affine) # Extract signals from maps and labels: results must be identical. maps_signals, maps_labels = signal_extraction.img_to_signals_maps( @@ -309,7 +313,7 @@ def test_signal_extraction_with_maps_and_labels(): # Same thing with a mask, containing only 3 regions. mask_data = (labels_data == 1) + (labels_data == 2) + (labels_data == 5) mask_img = nibabel.Nifti1Image(mask_data.astype(np.int8), - labels_img.get_affine()) + labels_img.affine) labels_signals, labels_labels = signal_extraction.img_to_signals_labels( fmri_img, labels_img, mask_img=mask_img) diff --git a/nilearn/signal.py b/nilearn/signal.py index ca8428e730..dfeb8c5eb4 100644 --- a/nilearn/signal.py +++ b/nilearn/signal.py @@ -11,13 +11,11 @@ import warnings import numpy as np -import scipy -from scipy import signal, stats, linalg -from sklearn.utils import gen_even_slices -from distutils.version import LooseVersion +from scipy import stats, linalg, signal as sp_signal +from sklearn.utils import gen_even_slices, as_float_array from ._utils.compat import _basestring -from ._utils.numpy_conversions import csv_to_array +from ._utils.numpy_conversions import csv_to_array, as_ndarray NP_VERSION = distutils.version.LooseVersion(np.version.short_version).version @@ -26,7 +24,7 @@ def _standardize(signals, detrend=False, normalize=True): """ Center and norm a given signal (time is along first axis) Parameters - ========== + ---------- signals: numpy.ndarray Timeseries to standardize @@ -38,7 +36,7 @@ def _standardize(signals, detrend=False, normalize=True): to unit energy (sum of squares). Returns - ======= + ------- std_signals: numpy.ndarray copy of signals, normalized. """ @@ -47,12 +45,13 @@ def _standardize(signals, detrend=False, normalize=True): signals = _detrend(signals, inplace=False) else: signals = signals.copy() - if signals.shape[0] == 1: - warnings.warn('Standardization of 3D signal has been requested but ' - 'would lead to zero values. Skipping.') - return signals if normalize: + if signals.shape[0] == 1: + warnings.warn('Standardization of 3D signal has been requested but ' + 'would lead to zero values. Skipping.') + return signals + if not detrend: # remove mean if not already detrended signals = signals - signals.mean(axis=0) @@ -74,7 +73,7 @@ def _mean_of_squares(signals, n_batches=20): but uses a lot less memory. Parameters - ========== + ---------- signals : numpy.ndarray, shape (n_samples, n_features) signal whose mean of squares must be computed. @@ -106,7 +105,7 @@ def _detrend(signals, inplace=False, type="linear", n_batches=10): case and uses a lot less memory. Parameters - ========== + ---------- signals : numpy.ndarray This parameter must be two-dimensional. Signals to detrend. A signal is a column. @@ -125,21 +124,20 @@ def _detrend(signals, inplace=False, type="linear", n_batches=10): the value, the lower the memory consumption. Returns - ======= + ------- detrended_signals: numpy.ndarray Detrended signals. The shape is that of 'signals'. Notes - ===== + ----- If a signal of lenght 1 is given, it is returned unchanged. """ - if not inplace: - signals = signals.copy() + signals = as_float_array(signals, copy=not inplace) if signals.shape[0] == 1: warnings.warn('Detrending of 3D signal has been requested but ' - 'would lead to zero values. Skipping.') + 'would lead to zero values. Skipping.') return signals signals -= np.mean(signals, axis=0) @@ -167,17 +165,32 @@ def _detrend(signals, inplace=False, type="linear", n_batches=10): def _check_wn(btype, freq, nyq): wn = freq / float(nyq) - if wn > 1.: - warnings.warn('The frequency specified for the %s pass filter is ' - 'too high to be handled by a digital filter (superior to ' - 'nyquist frequency). It has been lowered to %.2f (nyquist ' - 'frequency).' % (btype, nyq)) - wn = 1. + if wn >= 1.: + # results looked unstable when the critical frequencies are + # exactly at the Nyquist frequency. See issue at SciPy + # https://github.com/scipy/scipy/issues/6265. Before, SciPy 1.0.0 ("wn + # should be btw 0 and 1"). But, after ("0 < wn < 1"). Due to unstable + # results as pointed in the issue above. Hence, we forced the + # critical frequencies to be slightly less than 1. but not 1. + wn = 1 - 10 * np.finfo(1.).eps + warnings.warn( + 'The frequency specified for the %s pass filter is ' + 'too high to be handled by a digital filter (superior to ' + 'nyquist frequency). It has been lowered to %.2f (nyquist ' + 'frequency).' % (btype, wn)) + + if wn < 0.0: # equal to 0.0 is okay + wn = np.finfo(1.).eps + warnings.warn( + 'The frequency specified for the %s pass filter is ' + 'too low to be handled by a digital filter (must be non-negative).' + ' It has been set to eps: %.5e' % (btype, wn)) + return wn def butterworth(signals, sampling_rate, low_pass=None, high_pass=None, - order=5, copy=False, save_memory=False): + order=5, copy=False): """ Apply a low-pass, high-pass or band-pass Butterworth filter Apply a filter to remove signal below the `low` frequency and above the @@ -217,9 +230,9 @@ def butterworth(signals, sampling_rate, low_pass=None, high_pass=None, """ if low_pass is None and high_pass is None: if copy: - return signal.copy() + return signals.copy() else: - return signal + return signals if low_pass is not None and high_pass is not None \ and high_pass >= low_pass: @@ -245,75 +258,72 @@ def butterworth(signals, sampling_rate, low_pass=None, high_pass=None, else: critical_freq = critical_freq[0] - b, a = signal.butter(order, critical_freq, btype=btype) + b, a = sp_signal.butter(order, critical_freq, btype=btype, output='ba') if signals.ndim == 1: # 1D case - output = signal.filtfilt(b, a, signals) + output = sp_signal.filtfilt(b, a, signals) if copy: # filtfilt does a copy in all cases. signals = output else: signals[...] = output else: if copy: - if (LooseVersion(scipy.__version__) < LooseVersion('0.10.0')): - # filtfilt is 1D only in scipy 0.9.0 - signals = signals.copy() - for timeseries in signals.T: - timeseries[:] = signal.filtfilt(b, a, timeseries) - else: - # No way to save memory when a copy has been requested, - # because filtfilt does out-of-place processing - signals = signal.filtfilt(b, a, signals, axis=0) + # No way to save memory when a copy has been requested, + # because filtfilt does out-of-place processing + signals = sp_signal.filtfilt(b, a, signals, axis=0) else: # Lesser memory consumption, slower. for timeseries in signals.T: - timeseries[:] = signal.filtfilt(b, a, timeseries) + timeseries[:] = sp_signal.filtfilt(b, a, timeseries) + + # results returned in-place + return signals def high_variance_confounds(series, n_confounds=5, percentile=2., detrend=True): """ Return confounds time series extracted from series with highest - variance. - - Parameters - ========== - series: numpy.ndarray - Timeseries. A timeseries is a column in the "series" array. - shape (sample number, feature number) - - n_confounds: int, optional - Number of confounds to return - - percentile: float, optional - Highest-variance series percentile to keep before computing the - singular value decomposition, 0. <= `percentile` <= 100. - series.shape[0] * percentile / 100 must be greater than n_confounds - - detrend: bool, optional - If True, detrend timeseries before processing. - - Returns - ======= - v: numpy.ndarray - highest variance confounds. Shape: (samples, n_confounds) - - Notes - ====== - This method is related to what has been published in the literature - as 'CompCor' (Behzadi NeuroImage 2007). - - The implemented algorithm does the following: - - - compute sum of squares for each time series (no mean removal) - - keep a given percentile of series with highest variances (percentile) - - compute an svd of the extracted series - - return a given number (n_confounds) of series from the svd with - highest singular values. - - See also - ======== - nilearn.image.high_variance_confounds + variance. + + Parameters + ---------- + series: numpy.ndarray + Timeseries. A timeseries is a column in the "series" array. + shape (sample number, feature number) + + n_confounds: int, optional + Number of confounds to return + + percentile: float, optional + Highest-variance series percentile to keep before computing the + singular value decomposition, 0. <= `percentile` <= 100. + series.shape[0] * percentile / 100 must be greater than n_confounds + + detrend: bool, optional + If True, detrend timeseries before processing. + + Returns + ------- + v: numpy.ndarray + highest variance confounds. Shape: (samples, n_confounds) + + Notes + ----- + This method is related to what has been published in the literature + as 'CompCor' (Behzadi NeuroImage 2007). + + The implemented algorithm does the following: + + - compute sum of squares for each time series (no mean removal) + - keep a given percentile of series with highest variances (percentile) + - compute an svd of the extracted series + - return a given number (n_confounds) of series from the svd with + highest singular values. + + See also + -------- + nilearn.image.high_variance_confounds """ if detrend: @@ -345,76 +355,103 @@ def _ensure_float(data): def clean(signals, sessions=None, detrend=True, standardize=True, - confounds=None, low_pass=None, high_pass=None, t_r=2.5): + confounds=None, low_pass=None, high_pass=None, t_r=2.5, + ensure_finite=False): """Improve SNR on masked fMRI signals. - This function can do several things on the input signals, in - the following order: - - detrend - - standardize - - remove confounds - - low- and high-pass filter + This function can do several things on the input signals, in + the following order: - Low-pass filtering improves specificity. + - detrend + - standardize + - remove confounds + - low- and high-pass filter - High-pass filtering should be kept small, to keep some - sensitivity. + Low-pass filtering improves specificity. - Filtering is only meaningful on evenly-sampled signals. + High-pass filtering should be kept small, to keep some + sensitivity. - Parameters - ========== - signals: numpy.ndarray - Timeseries. Must have shape (instant number, features number). - This array is not modified. + Filtering is only meaningful on evenly-sampled signals. + + Parameters + ---------- + signals: numpy.ndarray + Timeseries. Must have shape (instant number, features number). + This array is not modified. sessions : numpy array, optional Add a session level to the cleaning process. Each session will be cleaned independently. Must be a 1D array of n_samples elements. - confounds: numpy.ndarray, str or list of - Confounds timeseries. Shape must be - (instant number, confound number), or just (instant number,) - The number of time instants in signals and confounds must be - identical (i.e. signals.shape[0] == confounds.shape[0]). - If a string is provided, it is assumed to be the name of a csv file - containing signals as columns, with an optional one-line header. - If a list is provided, all confounds are removed from the input - signal, as if all were in the same array. - - t_r: float - Repetition time, in second (sampling period). - - low_pass, high_pass: float - Respectively low and high cutoff frequencies, in Hertz. - - detrend: bool - If detrending should be applied on timeseries (before - confound removal) - - standardize: bool - If True, returned signals are set to unit variance. - - Returns - ======= - cleaned_signals: numpy.ndarray - Input signals, cleaned. Same shape as `signals`. - - Notes - ===== - Confounds removal is based on a projection on the orthogonal - of the signal space. See `Friston, K. J., A. P. Holmes, - K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak. - "Statistical Parametric Maps in Functional Imaging: A General - Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210. - `_ + confounds: numpy.ndarray, str or list of + Confounds timeseries. Shape must be + (instant number, confound number), or just (instant number,) + The number of time instants in signals and confounds must be + identical (i.e. signals.shape[0] == confounds.shape[0]). + If a string is provided, it is assumed to be the name of a csv file + containing signals as columns, with an optional one-line header. + If a list is provided, all confounds are removed from the input + signal, as if all were in the same array. + + t_r: float + Repetition time, in second (sampling period). + + low_pass, high_pass: float + Respectively low and high cutoff frequencies, in Hertz. + + detrend: bool + If detrending should be applied on timeseries (before + confound removal) + + standardize: bool + If True, returned signals are set to unit variance. + + ensure_finite: bool + If True, the non-finite values (NANs and infs) found in the data + will be replaced by zeros. + + Returns + ------- + cleaned_signals: numpy.ndarray + Input signals, cleaned. Same shape as `signals`. + + Notes + ----- + Confounds removal is based on a projection on the orthogonal + of the signal space. See `Friston, K. J., A. P. Holmes, + K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak. + "Statistical Parametric Maps in Functional Imaging: A General + Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210. + `_ + + See Also + -------- + nilearn.image.clean_img """ + if isinstance(low_pass, bool): + raise TypeError("low pass must be float or None but you provided " + "low_pass='{0}'".format(low_pass)) + if isinstance(high_pass, bool): + raise TypeError("high pass must be float or None but you provided " + "high_pass='{0}'".format(high_pass)) + if not isinstance(confounds, (list, tuple, _basestring, np.ndarray, type(None))): raise TypeError("confounds keyword has an unhandled type: %s" % confounds.__class__) - + + if not isinstance(ensure_finite, bool): + raise ValueError("'ensure_finite' must be boolean type True or False " + "but you provided ensure_finite={0}".format(ensure_finite)) + + if not isinstance(signals, np.ndarray): + signals = as_ndarray(signals) + + if ensure_finite: + signals[np.logical_not(np.isfinite(signals))] = 0 + # Read confounds if confounds is not None: if not isinstance(confounds, (list, tuple)): @@ -465,7 +502,7 @@ def clean(signals, sessions=None, detrend=True, standardize=True, clean(signals[sessions == s], detrend=detrend, standardize=standardize, confounds=session_confounds, low_pass=low_pass, - high_pass=high_pass, t_r=2.5) + high_pass=high_pass, t_r=t_r) # detrend signals = _ensure_float(signals) @@ -474,25 +511,26 @@ def clean(signals, sessions=None, detrend=True, standardize=True, # Remove confounds if confounds is not None: confounds = _ensure_float(confounds) - confounds = _standardize(confounds, normalize=True, detrend=detrend) - - if (LooseVersion(scipy.__version__) > LooseVersion('0.9.0')): - # Pivoting in qr decomposition was added in scipy 0.10 - Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True) - Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float).eps * 100.] - signals -= Q.dot(Q.T).dot(signals) - else: - Q, R = linalg.qr(confounds, mode='economic') - non_null_diag = np.abs(np.diag(R)) > np.finfo(np.float).eps * 100. - if np.all(non_null_diag): - signals -= Q.dot(Q.T).dot(signals) - elif np.any(non_null_diag): - R = R[:, non_null_diag] - confounds = confounds[:, non_null_diag] - inv = scipy.linalg.inv(np.dot(R.T, R)) - signals -= confounds.dot(inv).dot(confounds.T).dot(signals) + confounds = _standardize(confounds, normalize=standardize, + detrend=detrend) + if not standardize: + # Improve numerical stability by controlling the range of + # confounds. We don't rely on _standardize as it removes any + # constant contribution to confounds. + confound_max = np.max(np.abs(confounds), axis=0) + confound_max[confound_max == 0] = 1 + confounds /= confound_max + + # Pivoting in qr decomposition was added in scipy 0.10 + Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True) + Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float).eps * 100.] + signals -= Q.dot(Q.T).dot(signals) if low_pass is not None or high_pass is not None: + if t_r is None: + raise ValueError("Repetition time (t_r) must be specified for " + "filtering") + signals = butterworth(signals, sampling_rate=1. / t_r, low_pass=low_pass, high_pass=high_pass) @@ -501,3 +539,5 @@ def clean(signals, sessions=None, detrend=True, standardize=True, signals *= np.sqrt(signals.shape[0]) # for unit variance return signals + + diff --git a/nilearn/surface/__init__.py b/nilearn/surface/__init__.py new file mode 100644 index 0000000000..5d0f188ddb --- /dev/null +++ b/nilearn/surface/__init__.py @@ -0,0 +1,9 @@ +""" +Functions for surface manipulation. +""" + +from .surface import (vol_to_surf, load_surf_data, + load_surf_mesh, check_mesh_and_data) + +__all__ = ['vol_to_surf', 'load_surf_data', 'load_surf_mesh', + 'check_mesh_and_data'] diff --git a/nilearn/surface/data/README.txt b/nilearn/surface/data/README.txt new file mode 100644 index 0000000000..28fc1ea0f2 --- /dev/null +++ b/nilearn/surface/data/README.txt @@ -0,0 +1,18 @@ +Each file named ball_cloud_n_samples.csv contains the 3D coordinates of n points +evenly spaced in the unit ball. They have been precomputed and stored to save +time when using the 'ball' sampling in nilearn.surface.vol_to_surf. +They can be re-created like this: + +import numpy as np +from nilearn import surface + +for n in [10, 20, 40, 80, 160]: + ball_cloud = surface._uniform_ball_cloud(n_points=n) + np.savetxt('./ball_cloud_{}_samples.csv'.format(n), ball_cloud) + +test_load_uniform_ball_cloud in nilearn/surface/tests/test_surface.py compares +these loaded values and freshly computed ones. + +These values were computed with version 0.2 of scikit-learn, so positions +computed with scikit-learn < 0.18 would be different (but just as good for our +purposes), because the k-means implementation changed in 0.18. diff --git a/nilearn/surface/data/__init__.py b/nilearn/surface/data/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/nilearn/surface/data/__init__.py @@ -0,0 +1 @@ + diff --git a/nilearn/surface/data/ball_cloud_10_samples.csv b/nilearn/surface/data/ball_cloud_10_samples.csv new file mode 100644 index 0000000000..6f6db4b565 --- /dev/null +++ b/nilearn/surface/data/ball_cloud_10_samples.csv @@ -0,0 +1,10 @@ +4.430692879471407331e-01 -6.727915282170289502e-02 5.217389116286745843e-01 +-5.317840981053942873e-01 -4.068448137668991826e-01 -8.569400476605214950e-03 +-4.922159923177735208e-01 4.405315020917301427e-01 1.164216441727377732e-01 +1.484607430814552376e-01 4.755451354444120149e-01 -4.550029691377610597e-01 +-2.694911402331963690e-01 -5.344932789330818917e-02 6.175229456732103106e-01 +6.490070953481569260e-01 -8.013888661041402434e-03 -1.770466541122445447e-01 +-4.396591656457559116e-01 8.092645769942548306e-02 -5.102020795701791567e-01 +1.544817889619344853e-01 -6.305953266357835485e-01 1.690989055075702852e-01 +2.195223141814307000e-01 5.678294290151263413e-01 2.729949839370077735e-01 +1.045641017925853095e-01 -3.893522723613875858e-01 -5.458543865437600617e-01 diff --git a/nilearn/surface/data/ball_cloud_160_samples.csv b/nilearn/surface/data/ball_cloud_160_samples.csv new file mode 100644 index 0000000000..e387ac757a --- /dev/null +++ b/nilearn/surface/data/ball_cloud_160_samples.csv @@ -0,0 +1,160 @@ +2.468214947373665313e-01 -2.792850579444242642e-01 7.644226031554264278e-01 +1.648811851375835691e-01 3.616226868928677507e-01 -5.157645439591338477e-01 +-6.421752485071410277e-01 -5.964630001035132745e-01 3.684182500200331584e-02 +3.098133088463730922e-01 -1.556702688221980602e-01 -1.868509332088056918e-01 +-6.694078139715229359e-01 2.877337087850644348e-01 5.112262704678833813e-01 +-2.982795964828102719e-01 6.995748968182190497e-01 1.679292639609455129e-01 +4.683911941561200720e-01 6.401872506633516924e-01 3.667912859733313269e-01 +-1.123444834661871672e-01 -1.625558343775632408e-01 -5.155715331026244908e-01 +4.351652218844926923e-01 -4.158680235419797144e-01 2.727055478028755919e-01 +-7.558753510293154676e-01 1.132205313861586876e-01 -4.222422687716799738e-01 +-1.870348481730470802e-01 -2.183136165729039069e-01 2.960101250829366010e-02 +8.464072766139388859e-01 2.637261705243060597e-02 -1.255928883824821152e-01 +8.094501200785853001e-02 -6.138955375785578594e-01 -1.384977215929950600e-01 +8.690841769531902627e-02 2.833922715363133560e-01 3.124778677202241228e-01 +1.500123143536800235e-02 -4.526430803330960817e-01 4.411353627408791667e-01 +-4.571504526262001877e-01 -3.649385158845419030e-01 6.145270027899357723e-01 +3.394919669151344466e-01 -5.474642580767447031e-01 -5.765913672740774487e-01 +9.140571202348847513e-02 6.262937330369138245e-01 -1.812513774153057255e-01 +3.470036166199403915e-01 4.257384055243893517e-01 -1.299675661180787756e-01 +-2.976965219792702122e-01 -7.071349012631434894e-01 -3.881158582899634224e-01 +3.390489656277657082e-01 -4.829266990248882313e-02 -7.761601132808267911e-01 +-2.309375199904392106e-01 1.449483491921083889e-01 -8.938795355593670555e-02 +-1.837116450402472567e-01 2.600023477836322927e-01 8.321252683298318020e-01 +8.782463555499016028e-01 -1.104622580592412584e-01 1.060906436299221822e-01 +-4.182090770253469869e-01 6.202743060412704290e-01 -4.515260298536341588e-01 +-8.282988768244941014e-01 -8.840818471237661680e-02 -1.887080043480907632e-01 +-3.923004183297359448e-01 -3.250216882983871652e-02 4.812198031278616117e-01 +-2.552972289204707632e-02 3.645535496631692740e-01 -7.923197490243029106e-01 +-3.385209106958740333e-01 2.771121326049834355e-03 -4.628226600308524841e-01 +-5.663559401608060373e-01 6.180701265410207812e-01 2.274603204069024498e-01 +1.320819709094266381e-01 3.475857019364561595e-01 7.473864284767448574e-01 +7.985108336016614183e-01 -2.806506730934799076e-01 -1.745671254230875269e-01 +-1.439795810027073297e-01 -8.507212452391853530e-01 1.222092760833896896e-01 +-5.754891553434456943e-03 5.000614029166573771e-02 5.265861894786806607e-02 +5.879752914091816995e-01 8.209667474469410642e-02 6.525024140559799735e-01 +-5.079588816660038919e-01 9.323514279738794042e-02 -1.878042190797122590e-01 +4.673456034191262320e-01 5.584000892320106768e-01 -4.139346881877839501e-01 +3.652636862051302780e-01 -7.334859868623129220e-01 -3.365223201393693353e-01 +-5.135220924039070045e-01 6.339468484335901433e-02 -7.129873108632988066e-01 +1.239169968349617679e-01 -2.665713263768796151e-02 3.741128835641054362e-01 +5.904857231460559630e-01 1.513857747295261624e-01 -1.604818143804731534e-01 +-1.453386733208530679e-01 5.469290016433041579e-01 6.823073676037709756e-01 +-7.864711593445462556e-01 7.435003039976385841e-02 3.240844473582713547e-01 +5.339281525574167409e-02 -5.998355599661194804e-01 -6.431867619248139434e-01 +-2.918349165125022071e-01 -7.130525109615926116e-01 3.765915017990039138e-01 +-2.365124063189308623e-01 -5.094205725279126185e-01 -1.047602328789979942e-01 +-8.065439155000714289e-02 -8.504013525754576808e-01 -1.347623312809653473e-01 +1.927454944422858363e-01 8.670324814366928701e-01 -2.679127063901753925e-02 +-5.302080641343123180e-01 -1.935259306395876178e-01 -2.544859069173710520e-01 +6.735400215247770994e-01 3.534035763667250363e-01 -4.466532921578159065e-01 +-4.172125099047981345e-01 1.798382134076172423e-01 7.135167829844780707e-01 +-1.821432898043334747e-01 3.686276732025608460e-01 4.842679521162811529e-01 +7.606009482740337668e-02 2.790486207747422745e-02 -2.676882966778288409e-01 +8.082843982156702189e-01 2.165481178233469584e-01 2.022511250407308669e-01 +-3.138271296202648775e-01 4.112349304752613288e-01 -6.769540992745032293e-01 +-4.150293320715858214e-01 3.816598222173525068e-01 -1.228577213586428026e-02 +4.999977575998048041e-01 2.238158781221078197e-01 1.443146361648925402e-01 +7.003313251553200391e-01 -4.569438280391905916e-01 4.773927573237642975e-03 +-5.458361115861972701e-01 -6.202114867903514295e-01 -2.261447670030410362e-01 +-6.366842590768960752e-01 5.688579850534767335e-01 -8.651551879613575413e-02 +4.711508966071004112e-02 6.296005486882787494e-01 1.823678017198123447e-01 +5.954192395922288661e-01 -5.707751775556215712e-01 3.282951895750269022e-01 +5.853351856229285222e-01 -1.335266614750007796e-01 5.079644102743441358e-02 +4.303038216863468435e-01 7.048545171086511019e-01 1.009260704304623668e-01 +5.920889602706936455e-02 -4.205022037679206903e-02 -8.631068965031709617e-01 +6.407380600152227101e-01 -2.109159490119343339e-01 4.763987887343109073e-01 +-7.572624200813509177e-01 3.719318448297973534e-01 1.751053559571232321e-01 +-7.018952599924107583e-01 -1.699557048188446995e-01 -4.799025683573010514e-01 +-5.734289712534911265e-01 -5.191406976304878684e-01 3.738262444740965695e-01 +2.196210522411545241e-01 2.331889682521952056e-01 -8.104709363207512895e-01 +2.959040228839492914e-01 -7.047044723642490460e-01 4.214810425205403610e-01 +5.309929197105632548e-02 3.283756162280757063e-01 -2.622555845213042081e-01 +-4.759436734683606396e-01 4.510983451346927131e-01 5.625293137069560245e-01 +-3.938106019927344881e-01 7.745299428052411228e-01 -1.028226922691525408e-01 +3.681347873318818742e-01 1.923060124837356055e-01 -3.553281716070366691e-01 +6.661319480525484948e-01 5.183720771078287326e-01 -1.428865712852173397e-01 +-5.213726141515270113e-02 -4.670760441316204048e-01 -4.402021306443001936e-01 +-8.200545534911118928e-02 8.328992336602568658e-01 -2.921045212511030886e-01 +-1.064890498681915626e-01 -4.223117349851726599e-01 7.642105013142310721e-01 +-6.302367537463960945e-01 -7.065814578095064302e-02 6.220258258883528679e-01 +6.100718842659123009e-01 3.396451626919621547e-01 4.721615099088823375e-01 +-8.612976828896171111e-02 6.290874956670777784e-01 -5.500073295049022759e-01 +-7.520388753130112514e-02 1.041413252002766354e-01 6.053251042949002736e-01 +2.369495338128007472e-02 -7.768555338421141432e-01 -4.002287767206850200e-01 +-6.807830147234464890e-01 4.051230109024499537e-01 -3.079060051500838724e-01 +8.581181584346736155e-03 -2.758755905162316724e-01 2.380411698997120795e-01 +1.066898388038960421e-01 -3.530861112851200745e-01 -7.728786344258047336e-01 +2.083728781094499327e-01 -8.406799658155466037e-01 -6.641630077411513067e-02 +-1.231967450261398922e-01 7.635102777258215845e-01 4.340332671140461618e-01 +-7.155301965960304544e-01 -2.065582778135153508e-01 3.549473996991046598e-01 +2.789336473770727409e-02 -2.980371267213346442e-01 -2.155511486601477955e-01 +-4.916444551490664616e-02 4.220976957082471226e-01 -9.814851625390088111e-03 +4.569326945368793469e-01 7.467668508769826907e-01 -1.510203195542751953e-01 +-1.373696474512782831e-02 7.693146030118874767e-02 8.824340335625189269e-01 +7.650763120111574445e-01 -3.234136723303453476e-01 2.669356473783646155e-01 +2.530924862189743996e-01 -1.749692729758766940e-01 1.062718806406038052e-01 +-5.035671050201951937e-01 -1.644007248577817118e-02 1.282983186083589411e-01 +7.211446017328163594e-01 -2.099486395562358554e-01 -4.565077390634975085e-01 +-7.983243128399025723e-01 2.208270092300380905e-01 -5.931041737608694353e-02 +4.593375041407231629e-01 -7.275080434543551577e-01 1.179482174670776345e-01 +-4.638289200512769050e-01 -2.384799710571752218e-01 -6.536041397015064369e-01 +3.669819618625125845e-01 4.969104497408465271e-01 5.359542848495167933e-01 +5.970786273129006227e-01 -4.680534710767403350e-01 -3.637091929637646937e-01 +4.067841164328352410e-01 2.782184703563588823e-01 7.546115963061841292e-01 +5.457758845684220761e-01 -6.647288636440732423e-01 -1.288193837764944061e-01 +5.849061595725044693e-01 7.800334371480646505e-03 -6.073912669715755275e-01 +2.881343154260685435e-01 4.478068849994625844e-01 1.740563450003982937e-01 +-5.066166631680895094e-01 -2.880226428457104970e-01 5.852096467941169528e-03 +-2.522682222299320043e-01 -4.985327174441900322e-01 -6.744249559398650673e-01 +-1.269400041067952900e-01 2.353700846748265352e-01 -4.740730498462337739e-01 +1.309874966533219609e-01 5.858628592998125217e-01 5.640061020832705108e-01 +4.568535145338776804e-01 2.737315484773085728e-01 -6.711626318151925341e-01 +-3.506755032090303481e-01 -3.298709593519617012e-01 2.811358057228456331e-01 +-2.219420818347545288e-01 5.638658666419928966e-01 -2.133276985894274336e-01 +4.530931617539931122e-01 2.845663153934350001e-02 3.248328399937018829e-01 +2.577969803053107856e-01 3.077749059797242268e-02 7.815861807769756142e-01 +2.346699780589680817e-01 -2.421109904959737880e-01 -5.081070469445037041e-01 +-7.843726188677656308e-02 -1.794222739461515392e-01 5.423577066293749915e-01 +3.122410559116524098e-01 -2.426760449035975375e-01 4.284037251295017557e-01 +-3.484635442783707293e-01 6.133596453606899868e-01 4.143046923453875263e-01 +-2.529399500694943792e-01 -5.638243059815465363e-01 5.735409548714441863e-01 +1.120829745752853907e-01 5.120340505748527393e-02 -5.809703689704703855e-01 +-3.622550481418110868e-01 -8.149773266064591626e-01 -1.080471581753022214e-01 +-1.853408062026692393e-01 3.708998219166448340e-01 2.182880768325157206e-01 +-2.257001725852141039e-01 -9.309706670487845337e-02 -1.996596342827560433e-01 +-2.008584060224647416e-01 -2.075040301992876479e-01 -8.134693585302662511e-01 +1.649298085988213924e-01 7.821552396427334797e-01 3.511606273600750838e-01 +-5.489709082748043256e-01 -4.823561396678392454e-01 -4.940722557558900330e-01 +2.030012727824936636e-01 7.176283421105453808e-01 -4.279890122616394899e-01 +-7.669562805377801995e-01 -3.983170381191364906e-01 -2.104138197570341873e-01 +8.061134875002256006e-01 3.099455397398410694e-01 -6.618655562457691210e-02 +2.265538342430305566e-04 -7.538362707287225017e-01 4.498426707603629393e-01 +8.931433401460766447e-03 -1.878959994130762756e-01 8.723841776857562680e-01 +-4.199158990083718956e-01 -6.770122868209182965e-01 1.233473639343237588e-01 +2.694418010592212198e-01 1.249046905753480041e-01 1.310531483759063559e-02 +7.798778964064685404e-01 4.207426885888206763e-02 3.850585469343850153e-01 +-3.763845387380292395e-01 3.422740604127513797e-01 -3.598968981176510762e-01 +-8.599518475701178888e-02 8.499259403005439140e-01 6.082481889346037091e-02 +-2.204791281044143347e-01 1.062849323126351109e-01 -7.915744581551200643e-01 +-5.742473846094791057e-01 2.905248214049359623e-01 -5.603509224412124290e-01 +4.603913045119732983e-01 -1.451431000335821619e-01 6.954632396115836279e-01 +2.745124083136074589e-01 -4.816456989393068100e-01 -3.197265423036383947e-01 +4.563465192521649616e-01 -4.524959147008916216e-01 6.069929997245064879e-01 +1.500693245523793962e-01 -8.308148608219538822e-01 2.128737479430521662e-01 +-5.012038604949585752e-01 2.544265105200320676e-01 2.646634101322346000e-01 +2.034365116603361334e-01 -4.877363248568518661e-01 1.363474764350168233e-01 +1.644494555455654206e-01 -5.392335034104438485e-01 6.575922634376994980e-01 +-1.965526298587466081e-01 4.541537984281525386e-02 3.063549809837030069e-01 +5.223936277221787883e-01 -1.085479334645074179e-01 -3.185624259239997480e-01 +-7.935792144860089881e-01 -3.450292305076666510e-01 1.269266633384188436e-01 +-3.294802560459185736e-01 -3.896032553367997919e-01 -3.415786905955628949e-01 +6.766295192641297307e-01 5.044547361238745919e-01 1.839948362535360205e-01 +2.835588170606819780e-01 1.844072938727307209e-01 5.034443100482813849e-01 +2.712698599400479860e-01 5.530819865924837542e-01 -6.701390881303301850e-01 +3.912762540896945196e-01 -4.347992197582773999e-01 -5.949908193635398967e-02 +7.855949526701798691e-01 1.308624669259461082e-01 -3.959208239067619761e-01 +-9.250385008389788721e-02 -5.494664516439904389e-01 1.264457922344831287e-01 +4.551698116500383362e-01 -3.086125409484862248e-01 -6.794461883909840294e-01 +-3.192949948158811946e-01 -1.058942542901264022e-01 7.903204360151183661e-01 +-8.612147447600198014e-01 -5.786066842228993617e-02 7.198467807550619491e-02 diff --git a/nilearn/surface/data/ball_cloud_20_samples.csv b/nilearn/surface/data/ball_cloud_20_samples.csv new file mode 100644 index 0000000000..92bc638f9c --- /dev/null +++ b/nilearn/surface/data/ball_cloud_20_samples.csv @@ -0,0 +1,20 @@ +5.842216548676750776e-01 4.376164784861447665e-01 1.663757942055911487e-02 +-2.194847686244247587e-01 -7.109372386034291669e-01 -6.537449345099249909e-02 +-4.471528844891878296e-02 7.064973136116384778e-01 -1.786583114313875786e-01 +-6.707160253685089391e-01 -2.367542857005384138e-01 -1.792212957579401300e-01 +2.178549901595938643e-01 -2.785285000717222514e-01 -6.560721021198115954e-01 +6.871573796912544552e-01 -2.228533963342110569e-01 1.723513240770535859e-01 +2.080943661175675696e-01 -5.279849681932176075e-01 4.646502878400943226e-01 +-3.486157656624189194e-01 5.515283507105400718e-01 3.393222567555757574e-01 +-1.483797620716929000e-01 2.554959811707789227e-02 7.213829985668764877e-01 +6.399999939748245437e-01 -4.124943553074976454e-02 -3.554664972590649286e-01 +3.473983519451797619e-01 -6.311387933802345973e-01 -1.511337279634965280e-01 +2.851339828515243591e-01 3.933725239953038133e-01 -5.586017146778773368e-01 +-2.767360871741600947e-01 1.978903233653403482e-01 -6.509497378458691808e-01 +1.926190211704723876e-01 5.646672295229083760e-01 4.209662240925434173e-01 +-2.788672784884560496e-01 -4.035829215738325204e-01 -5.581754700210062037e-01 +-3.954094300072152901e-01 -4.734944802989128343e-01 4.183409557265382328e-01 +4.370919278787561701e-01 6.281309763243778099e-02 6.011114535121920843e-01 +-5.973605410158238094e-01 3.709749576328147391e-01 -1.957838205252134534e-01 +-3.346005309885992721e-03 -2.204224341652283314e-02 1.993257851754927368e-02 +-6.589176106946886824e-01 5.817647547833440513e-02 3.533542909878894522e-01 diff --git a/nilearn/surface/data/ball_cloud_40_samples.csv b/nilearn/surface/data/ball_cloud_40_samples.csv new file mode 100644 index 0000000000..a9c7d0fb95 --- /dev/null +++ b/nilearn/surface/data/ball_cloud_40_samples.csv @@ -0,0 +1,40 @@ +-4.355925735060561799e-01 4.744387185092658332e-01 4.865608526234390663e-01 +-3.652317993224113435e-02 -7.908381270745744596e-01 -1.251777185204012988e-01 +2.333397505291869289e-01 2.802253440898189174e-01 3.600474979927715397e-02 +2.111812351590668796e-02 2.961613030236745492e-01 -7.508650501371789687e-01 +-4.766488848393080069e-01 -8.479208598375591321e-02 5.993662032710967980e-01 +3.141316285062427660e-01 -1.678132075039663051e-01 -9.064069879073227698e-02 +7.298781240946433657e-01 -2.630212394709173784e-01 -2.286430079318703401e-01 +-4.656458831270897381e-02 6.886613737175500960e-01 -3.245004469456924112e-01 +-4.441568138983346126e-01 -5.876342599005159734e-02 -6.461578053536478627e-01 +-3.348592051245692369e-01 -5.572140611078181260e-01 4.536692150796629575e-01 +7.163492809829055119e-01 2.935295978963431973e-01 -1.738748070031403969e-01 +-1.138927122007272058e-01 1.460799819052024107e-01 7.919312916851095618e-01 +-7.492691258603926263e-01 1.611454568055841752e-01 -2.534102745009135238e-01 +5.290834163851196870e-01 5.068273815856546571e-01 3.226095807699721485e-01 +3.575754697470022170e-01 7.257307657568657921e-01 -3.857038869544984760e-02 +-3.104237107461114697e-01 1.222528056235336058e-01 -7.513375731056939899e-02 +6.137718777492528338e-02 -1.609172754731106880e-01 3.350016021106668185e-01 +3.843526282477446321e-01 1.844676363336974767e-01 6.505544125182312065e-01 +3.795275138004317594e-01 4.867028817977501198e-01 -4.815570704861783513e-01 +5.871613686594342463e-01 -4.527320145979236155e-01 2.273577737549672129e-01 +1.492828191477571786e-01 -7.033946805226763965e-01 3.498588808145969287e-01 +2.427625040176864882e-02 -1.840277657726235583e-01 -7.857292897854342151e-01 +-6.528696478949383986e-01 -3.161253853198419117e-01 -2.924521439930459765e-01 +7.392272378701251201e-01 4.257102296353597326e-02 2.476561477376221787e-01 +-2.381793617014762721e-01 -5.232800307892668812e-01 -5.280100914603548246e-01 +-4.024240850485799048e-01 4.152504770568481995e-01 -5.301851804296517923e-01 +4.631048310055247974e-01 -2.513511941777509251e-01 5.941151204256300833e-01 +2.830704807156398939e-01 -4.886571409792251353e-01 -5.411117627281345488e-01 +-7.190884146531447696e-01 2.031575934922867088e-01 2.468785592176648280e-01 +-1.520360951548836714e-01 -3.353976849406489769e-01 -2.215707978639088610e-02 +5.118089854881623380e-01 1.808855737854670256e-02 -5.920409525060285061e-01 +4.048638420577529984e-02 -3.278125647232136841e-01 7.507151173577555348e-01 +3.959345839124422950e-01 -6.653021545747872523e-01 -1.190012904514499564e-01 +-5.132697037326207079e-01 6.009270879951913447e-01 -4.781001945560951760e-02 +-1.117043936062098869e-02 3.505178394610215797e-02 -3.709295515413487121e-01 +-1.184643048675626253e-01 7.519029958046901108e-01 1.558972399018141719e-01 +-4.647612097508849738e-01 -6.727320587892747783e-01 -3.159514863799117029e-02 +-7.038654999077433860e-01 -2.507108609297006296e-01 1.780364813053967044e-01 +-1.159153922062834408e-01 2.400759284123138715e-01 3.067756561560779915e-01 +8.885437884847594003e-02 5.971260827465554311e-01 5.340625055704769641e-01 diff --git a/nilearn/surface/data/ball_cloud_80_samples.csv b/nilearn/surface/data/ball_cloud_80_samples.csv new file mode 100644 index 0000000000..5d74eae9f2 --- /dev/null +++ b/nilearn/surface/data/ball_cloud_80_samples.csv @@ -0,0 +1,80 @@ +3.262555142760086846e-01 -3.854636161803240979e-01 -6.405913920868614442e-01 +-3.332508030308519165e-01 2.397322704687279982e-01 -1.759194149338177415e-01 +3.946205752335784078e-01 4.469596871557266621e-02 2.475783731288624945e-01 +-2.698262853500476011e-01 -6.702922019782792118e-01 3.917143756213053440e-01 +-3.772515586198929016e-01 -5.154965029400488952e-01 -4.739218571501211952e-01 +6.147098388511126332e-01 4.914510555614984288e-01 2.188045787455347724e-01 +-5.575197499412268970e-02 1.483376456697227397e-01 8.213707485711944090e-01 +1.972455582767498050e-01 7.321495652968792456e-01 -3.000696732828219138e-01 +-7.512186315618207821e-02 7.479681693352847605e-01 3.802239381509175398e-01 +-8.228920647326849469e-01 1.044704429997933975e-01 1.545615843076205698e-01 +-6.038276170274072818e-01 3.287790237275440286e-01 4.627635126178621938e-01 +4.922148499181822623e-01 -5.486158829283857008e-01 3.201847413101034934e-01 +-2.404020158017483866e-01 9.860112333542396523e-02 -7.698187867920961436e-01 +6.425883456152909137e-03 -4.747621878385193117e-01 1.126924368580077412e-01 +7.821090662179631670e-01 1.673272549263735975e-01 2.219534981876404023e-01 +5.231871762794176178e-02 8.163108991953083038e-01 1.172786463441832620e-02 +3.302703311502487304e-01 1.550291269531196559e-01 7.438113705479559856e-01 +3.812002873204423103e-01 2.934803589349491704e-01 -6.569345041348834124e-01 +-3.435171503525978332e-01 -8.279828523635693327e-02 -3.306119449185566106e-01 +-5.043181294925269675e-01 -4.306633896467955136e-01 4.476122856102467407e-01 +-5.183106244606362489e-01 5.841391886205067330e-01 -2.169003386990010218e-01 +6.575267066155660034e-01 -4.954813310478879407e-01 -4.223476793674293267e-02 +7.329406638685140862e-01 -2.515797467053866310e-01 2.906347910343493046e-01 +-1.744079311377981223e-01 3.663860101700270322e-01 1.894222054519310861e-01 +-6.805253528342450897e-01 -4.521471349707480436e-01 -1.952663038227593351e-01 +-5.773067250476234813e-01 1.590944012837150201e-01 -5.940514691077994902e-01 +-4.455140404174229990e-02 -3.324744962058523989e-01 -7.380776950989580421e-01 +2.592616770890403266e-02 8.841013134651969085e-02 -4.560627480591995520e-01 +3.213150846548978867e-01 -7.644932653869754846e-01 3.403045280897368935e-02 +2.291700544105458853e-03 -3.494251037871047849e-01 -2.843657153813490823e-01 +1.233803162885783178e-01 5.168491365870080401e-01 6.358608177699590636e-01 +2.517025147534251439e-01 -5.162970264425378319e-01 6.292509173747389273e-01 +-2.318209167164397566e-01 -3.026995005771295974e-02 4.078120467475971345e-01 +4.461373732622516686e-01 7.177293120218344313e-01 -4.556990930868482642e-02 +6.476714138224308748e-01 6.455329883597886453e-02 5.562676380144352395e-01 +-1.860326782314073585e-03 4.137848616614330788e-01 -2.304991323652210156e-01 +8.021401343655394367e-01 -7.084166892440788010e-02 -5.894136190358661986e-02 +7.349419441573475098e-02 -2.276451363781770987e-01 3.675758106983507689e-01 +-2.975633001797062249e-01 7.517144175645111837e-01 1.263782341998433492e-02 +-1.654806314595003991e-01 7.354338964201146966e-01 -3.396881656072840983e-01 +-1.120639096348921143e-01 -8.344382423010976435e-01 6.577606706062909259e-02 +-3.637489639080681014e-01 -1.767479509276985439e-01 7.311727593762991084e-01 +-4.021033756576388174e-01 1.529386000292534964e-01 7.130353977625731865e-01 +3.415615651866612534e-01 -3.007930900605256253e-01 4.073969792121461680e-02 +1.502188553525203585e-02 3.235836151696099838e-01 -7.877870216196302122e-01 +-3.345006802541954372e-01 4.558299016383890456e-01 -5.755683485402420807e-01 +1.203756082224029592e-02 2.627887156059839982e-02 -1.542444189930148972e-02 +-4.663240545186318475e-01 2.929327299897975870e-02 1.169613316765236999e-01 +4.473564721638071395e-01 -2.258065583148393396e-01 6.109621127614093261e-01 +-6.768460698420405430e-01 4.315266410020058840e-01 7.725832435508381280e-02 +-7.568922787390219842e-01 -3.119072325319900418e-01 1.778584871590067895e-01 +-4.461819946533983061e-01 6.252691311717589739e-01 2.881396758171432260e-01 +5.057441613625023313e-01 5.273956235309013030e-01 -3.929472433882583715e-01 +3.181508175870436661e-01 6.899555095762603951e-01 3.339656790967062139e-01 +4.788324441630524841e-01 4.041751499797106018e-01 5.249591625440308551e-01 +-7.880995627272365800e-01 -9.689671450119406726e-02 -1.727745844121635899e-01 +6.524771249266615725e-02 -7.667568209801455259e-01 -2.303001221313602798e-01 +6.863938253770456299e-01 -3.016940030855146393e-01 -3.609839592462449342e-01 +5.382811868381239817e-01 -4.656238114393324717e-02 -6.279592111782595909e-01 +-6.337394409017619301e-01 -2.042583410740495542e-01 -5.177854902903320511e-01 +1.423836081915543991e-02 -6.227240795530529782e-01 -5.727832240846502110e-01 +-4.920209959256136023e-01 -6.609401457507861899e-01 7.994743993583935948e-02 +-7.419867464825107284e-01 2.566204459980288011e-01 -2.680642576349015482e-01 +4.040763122972179655e-01 2.104976972928298828e-01 -1.649815270206576034e-01 +-3.202796101404180029e-01 -7.329760198396092497e-01 -2.272786033767917324e-01 +-2.600236201539403158e-01 4.611380693255858199e-01 5.907607132512578962e-01 +3.403009680656256530e-01 -1.308499373870331828e-01 -2.817626826700376785e-01 +9.911633677629802519e-02 -1.983687757144718955e-01 8.099661750503094337e-01 +9.931747735602791671e-02 1.874480194406295819e-01 4.172954251140651438e-01 +7.377533848158671637e-01 4.063241974571336068e-01 -1.021876308804053474e-01 +1.604103558010863684e-01 -4.396206357012594906e-02 -7.997339754708899795e-01 +2.174087251782576280e-01 4.273786499174761500e-01 1.036442893327847309e-01 +-3.419068379666388435e-01 -2.570568273197633147e-01 -7.386581700006532580e-01 +-6.716836479592275255e-01 -5.198136093900693894e-02 4.843655667794159458e-01 +-1.478851135845805076e-01 -4.783618056577351174e-01 6.528436707760401081e-01 +1.182268438677049893e-01 5.938126576606220786e-01 -5.849609632701525674e-01 +7.177074680573874987e-01 1.587754580273513239e-01 -3.933932296629837899e-01 +4.035343034611867941e-01 -6.122033662110912822e-01 -3.533445368316120438e-01 +-3.114489072184036811e-01 -2.986883311327169221e-01 1.577086348400994278e-02 +1.164072967095364369e-01 -7.538461188639004051e-01 3.909952695742038853e-01 diff --git a/nilearn/surface/surface.py b/nilearn/surface/surface.py new file mode 100644 index 0000000000..00c1f795d2 --- /dev/null +++ b/nilearn/surface/surface.py @@ -0,0 +1,721 @@ +""" +Functions for surface manipulation. +""" +import os +import warnings +import gzip +from distutils.version import LooseVersion + +import numpy as np +from scipy import sparse, interpolate +import sklearn.preprocessing +import sklearn.cluster +try: + from sklearn.exceptions import EfficiencyWarning +except ImportError: + class EfficiencyWarning(UserWarning): + """Warning used to notify the user of inefficient computation.""" + +import nibabel +from nibabel import gifti + +from ..image import load_img +from ..image import resampling +from .._utils.compat import _basestring +from .. import _utils + + +def _uniform_ball_cloud(n_points=20, dim=3, n_monte_carlo=50000): + """Get points uniformly spaced in the unit ball.""" + rng = np.random.RandomState(0) + mc_cube = rng.uniform(-1, 1, size=(n_monte_carlo, dim)) + mc_ball = mc_cube[(mc_cube**2).sum(axis=1) <= 1.] + centroids, assignments, _ = sklearn.cluster.k_means( + mc_ball, n_clusters=n_points, random_state=0) + return centroids + + +def _load_uniform_ball_cloud(n_points=20): + stored_points = os.path.abspath( + os.path.join(__file__, '..', 'data', + 'ball_cloud_{}_samples.csv'.format(n_points))) + if os.path.isfile(stored_points): + points = np.loadtxt(stored_points) + return points + warnings.warn( + 'Cached sample positions are provided for ' + 'n_samples = 10, 20, 40, 80, 160. Since the number of samples does ' + 'have a big impact on the result, we strongly recommend using one ' + 'of these values when using kind="ball" for much better performance.', + EfficiencyWarning) + return _uniform_ball_cloud(n_points=n_points) + + +def _face_outer_normals(mesh): + """Get the normal to each triangle in a mesh. + + They are the outer normals if the mesh respects the convention that the + direction given by the direct order of a triangle's vertices (right-hand + rule) points outwards. + """ + vertices, faces = load_surf_mesh(mesh) + face_vertices = vertices[faces] + # The right-hand rule gives the direction of the outer normal + normals = np.cross(face_vertices[:, 1, :] - face_vertices[:, 0, :], + face_vertices[:, 2, :] - face_vertices[:, 0, :]) + normals = sklearn.preprocessing.normalize(normals) + return normals + + +def _surrounding_faces(mesh): + """Get matrix indicating which faces the nodes belong to. + + i, j is set if node i is a vertex of triangle j. + """ + vertices, faces = load_surf_mesh(mesh) + n_faces = faces.shape[0] + return sparse.csr_matrix((np.ones(3 * n_faces), (faces.ravel(), np.tile( + np.arange(n_faces), (3, 1)).T.ravel())), (vertices.shape[0], n_faces)) + + +def _vertex_outer_normals(mesh): + """Get the normal at each vertex in a triangular mesh. + + They are the outer normals if the mesh respects the convention that the + direction given by the direct order of a triangle's vertices (right-hand + rule) points outwards. + """ + vertices, faces = load_surf_mesh(mesh) + vertex_faces = _surrounding_faces(mesh) + face_normals = _face_outer_normals(mesh) + normals = vertex_faces.dot(face_normals) + return sklearn.preprocessing.normalize(normals) + + +def _ball_sample_locations(mesh, affine, ball_radius=3., n_points=20): + """Locations to draw samples from to project volume data onto a mesh. + + For each mesh vertex, the locations of `n_points` points evenly spread in a + ball around the vertex are returned. + + Parameters + ---------- + mesh : pair of np arrays. + mesh[0] contains the 3d coordinates of the vertices + (shape n_vertices, 3) + mesh[1] contains, for each triangle, the indices into mesh[0] of its + vertices (shape n_triangles, 3) + + affine : array of shape (4, 4) + affine transformation from image voxels to the vertices' coordinate + space. + + ball_radius : float, optional (default=3.) + size in mm of the neighbourhood around each vertex in which to draw + samples + + n_points : int, optional (default=20) + number of samples to draw for each vertex. + + Returns + ------- + numpy array, shape (n_vertices, n_points, 3) + The locations, in voxel space, from which to draw samples. + First dimension iterates over mesh vertices, second dimension iterates + over the sample points associated to a vertex, third dimension is x, y, + z in voxel space. + + """ + vertices, faces = mesh + offsets_world_space = _load_uniform_ball_cloud( + n_points=n_points) * ball_radius + mesh_voxel_space = np.asarray( + resampling.coord_transform(*vertices.T, + affine=np.linalg.inv(affine))).T + linear_map = np.eye(affine.shape[0]) + linear_map[:-1, :-1] = affine[:-1, :-1] + offsets_voxel_space = np.asarray( + resampling.coord_transform(*offsets_world_space.T, + affine=np.linalg.inv(linear_map))).T + sample_locations_voxel_space = (mesh_voxel_space[:, np.newaxis, :] + + offsets_voxel_space[np.newaxis, :]) + return sample_locations_voxel_space + + +def _line_sample_locations( + mesh, affine, segment_half_width=3., n_points=10): + """Locations to draw samples from to project volume data onto a mesh. + + For each mesh vertex, the locations of `n_points` points evenly spread in a + segment of the normal to the vertex are returned. The line segment has + length 2 * `segment_half_width` and is centered at the vertex. + + Parameters + ---------- + mesh : pair of numpy.ndarray. + mesh[0] contains the 3d coordinates of the vertices + (shape n_vertices, 3) + mesh[1] contains, for each triangle, the indices into mesh[0] of its + vertices (shape n_triangles, 3) + + affine : numpy.ndarray of shape (4, 4) + affine transformation from image voxels to the vertices' coordinate + space. + + segment_half_width : float, optional (default=3.) + size in mm of the neighbourhood around each vertex in which to draw + samples + + n_points : int, optional (default=10) + number of samples to draw for each vertex. + + Returns + ------- + numpy array, shape (n_vertices, n_points, 3) + The locations, in voxel space, from which to draw samples. + First dimension iterates over mesh vertices, second dimension iterates + over the sample points associated to a vertex, third dimension is x, y, + z in voxel space. + + """ + vertices, faces = mesh + normals = _vertex_outer_normals(mesh) + offsets = np.linspace(-segment_half_width, segment_half_width, n_points) + sample_locations = vertices[ + np.newaxis, :, :] + normals * offsets[:, np.newaxis, np.newaxis] + sample_locations = np.rollaxis(sample_locations, 1) + sample_locations_voxel_space = np.asarray( + resampling.coord_transform( + *np.vstack(sample_locations).T, + affine=np.linalg.inv(affine))).T.reshape(sample_locations.shape) + return sample_locations_voxel_space + + +def _sample_locations(mesh, affine, radius, kind='line', n_points=None): + """Get either ball or line sample locations.""" + projectors = { + 'line': _line_sample_locations, + 'ball': _ball_sample_locations + } + if kind not in projectors: + raise ValueError( + '"kind" must be one of {}'.format(tuple(projectors.keys()))) + projector = projectors[kind] + # let the projector choose the default for n_points + # (for example a ball probably needs more than a line) + loc_kwargs = ({} if n_points is None else {'n_points': n_points}) + sample_locations = projector( + mesh, affine, radius, **loc_kwargs) + return sample_locations + + +def _masked_indices(sample_locations, img_shape, mask=None): + """Get the indices of sample points which should be ignored. + + Parameters: + ----------- + sample_locations : array, shape(n_sample_locations, 3) + The coordinates of candidate interpolation points + + img_shape : tuple + The dimensions of the image to be sampled + + mask : array of shape img_shape or None + Part of the image to be masked. If None, don't apply any mask. + + Returns + ------- + array of shape (n_sample_locations,) + True if this particular location should be ignored (outside of image or + masked). + + """ + kept = (sample_locations >= 0).all(axis=1) + for dim, size in enumerate(img_shape): + kept = np.logical_and(kept, sample_locations[:, dim] < size) + if mask is not None: + indices = np.asarray(np.round(sample_locations[kept]), dtype=int) + kept[kept] = mask[ + indices[:, 0], indices[:, 1], indices[:, 2]] != 0 + return ~kept + + +def _projection_matrix(mesh, affine, img_shape, + kind='line', radius=3., n_points=None, mask=None): + """Get a sparse matrix that projects volume data onto a mesh. + + Parameters + ---------- + mesh : str or numpy.ndarray + Either a file containing surface mesh geometry (valid formats + are .gii or Freesurfer specific files such as .orig, .pial, + .sphere, .white, .inflated) or a list of two Numpy arrays, + the first containing the x-y-z coordinates of the mesh + vertices, the second containing the indices (into coords) + of the mesh faces. + + affine : array of shape (4, 4) + affine transformation from image voxels to the vertices' coordinate + space. + + img_shape : 3-tuple of integers + The shape of the image to be projected. + + kind : {'line', 'ball'} + The strategy used to sample image intensities around each vertex. + + - 'line' (the default): + samples are regularly spaced along the normal to the mesh, over the + interval [-radius, +radius]. + - 'ball': + samples are regularly spaced inside a ball centered at the mesh + vertex. + + radius : float, optional (default=3.). + The size (in mm) of the neighbourhood from which samples are drawn + around each node. + + n_points : int or None, optional (default=None) + How many samples are drawn around each vertex and averaged. If None, + use a reasonable default for the chosen sampling strategy (20 for + 'ball' or 10 for 'line'). + For performance reasons, if using kind="ball", choose `n_points` in + [10, 20, 40, 80, 160] (default is 20), because cached positions are + available. + + mask : array of shape img_shape or None + Part of the image to be masked. If None, don't apply any mask. + + Returns + ------- + scipy.sparse.csr_matrix + Shape (n_voxels, n_mesh_vertices). The dot product of this matrix with + an image (represented as a column vector) gives the projection onto mesh + vertices. + + See Also + -------- + nilearn.surface.vol_to_surf + Compute the projection for one or several images. + + """ + # A user might want to call this function directly so check mask size. + if mask is not None and tuple(mask.shape) != img_shape: + raise ValueError('mask should have shape img_shape') + mesh = load_surf_mesh(mesh) + sample_locations = _sample_locations( + mesh, affine, kind=kind, radius=radius, n_points=n_points) + sample_locations = np.asarray(np.round(sample_locations), dtype=int) + n_vertices, n_points, img_dim = sample_locations.shape + masked = _masked_indices(np.vstack(sample_locations), img_shape, mask=mask) + sample_locations = np.rollaxis(sample_locations, -1) + sample_indices = np.ravel_multi_index( + sample_locations, img_shape, mode='clip').ravel() + row_indices, _ = np.mgrid[:n_vertices, :n_points] + row_indices = row_indices.ravel() + row_indices = row_indices[~masked] + sample_indices = sample_indices[~masked] + weights = np.ones(len(row_indices)) + proj = sparse.csr_matrix( + (weights, (row_indices, sample_indices.ravel())), + shape=(n_vertices, np.prod(img_shape))) + proj = sklearn.preprocessing.normalize(proj, axis=1, norm='l1') + return proj + + +def _nearest_voxel_sampling(images, mesh, affine, kind='ball', radius=3., + n_points=None, mask=None): + """In each image, measure the intensity at each node of the mesh. + + Image intensity at each sample point is that of the nearest voxel. + A 2-d array is returned, where each row corresponds to an image and each + column to a mesh vertex. + See documentation of vol_to_surf for details. + + """ + proj = _projection_matrix( + mesh, affine, images[0].shape, kind=kind, radius=radius, + n_points=n_points, mask=mask) + data = np.asarray(images).reshape(len(images), -1).T + texture = proj.dot(data) + # if all samples around a mesh vertex are outside the image, + # there is no reasonable value to assign to this vertex. + # in this case we return NaN for this vertex. + texture[np.asarray(proj.sum(axis=1) == 0).ravel()] = np.nan + return texture.T + + +def _interpolation_sampling(images, mesh, affine, kind='ball', radius=3, + n_points=None, mask=None): + """In each image, measure the intensity at each node of the mesh. + + Image intensity at each sample point is computed with trilinear + interpolation. + A 2-d array is returned, where each row corresponds to an image and each + column to a mesh vertex. + See documentation of vol_to_surf for details. + + """ + sample_locations = _sample_locations( + mesh, affine, kind=kind, radius=radius, n_points=n_points) + n_vertices, n_points, img_dim = sample_locations.shape + grid = [np.arange(size) for size in images[0].shape] + interp_locations = np.vstack(sample_locations) + masked = _masked_indices(interp_locations, images[0].shape, mask=mask) + # loop over images rather than building a big array to use less memory + all_samples = [] + for img in images: + interpolator = interpolate.RegularGridInterpolator( + grid, img, + bounds_error=False, method='linear', fill_value=None) + samples = interpolator(interp_locations) + # if all samples around a mesh vertex are outside the image, + # there is no reasonable value to assign to this vertex. + # in this case we return NaN for this vertex. + samples[masked] = np.nan + all_samples.append(samples) + all_samples = np.asarray(all_samples) + all_samples = all_samples.reshape((len(images), n_vertices, n_points)) + texture = np.nanmean(all_samples, axis=2) + return texture + + +def vol_to_surf(img, surf_mesh, + radius=3., interpolation='linear', kind='line', + n_samples=None, mask_img=None): + """Extract surface data from a Nifti image. + + .. versionadded:: 0.4.0 + + Parameters + ---------- + + img : Niimg-like object, 3d or 4d. + See http://nilearn.github.io/manipulating_images/input_output.html + + surf_mesh : str or numpy.ndarray + Either a file containing surface mesh geometry (valid formats + are .gii or Freesurfer specific files such as .orig, .pial, + .sphere, .white, .inflated) or a list of two Numpy arrays, + the first containing the x-y-z coordinates of the mesh + vertices, the second containing the indices (into coords) + of the mesh faces. + + radius : float, optional (default=3.). + The size (in mm) of the neighbourhood from which samples are drawn + around each node. + + interpolation : {'linear', 'nearest'} + How the image intensity is measured at a sample point. + + - 'linear' (the default): + Use a trilinear interpolation of neighboring voxels. + - 'nearest': + Use the intensity of the nearest voxel. + + For one image, the speed difference is small, 'linear' takes about x1.5 + more time. For many images, 'nearest' scales much better, up to x20 + faster. + + kind : {'line', 'ball'} + The strategy used to sample image intensities around each vertex. + + - 'line' (the default): + samples are regularly spaced along the normal to the mesh, over the + interval [- `radius`, + `radius`]. + (sometimes called thickness sampling) + - 'ball': + samples are regularly spaced inside a ball centered at the mesh + vertex. + + n_samples : int or None, optional (default=None) + How many samples are drawn around each vertex and averaged. If + ``None``, use a reasonable default for the chosen sampling strategy + (20 for 'ball' or 10 for 'line'). + For performance reasons, if using `kind` ="ball", choose `n_samples` in + [10, 20, 40, 80, 160] (default is 20), because cached positions are + available. + + mask_img : Niimg-like object or None, optional (default=None) + Samples falling out of this mask or out of the image are ignored. + If ``None``, don't apply any mask. + + Returns + ------- + texture : numpy.ndarray, 1d or 2d. + If 3D image is provided, a 1d vector is returned, containing one value + for each mesh node. + If 4D image is provided, a 2d array is returned, where each row + corresponds to a mesh node. + + Notes + ----- + This function computes a value for each vertex of the mesh. In order to do + so, it selects a few points in the volume surrounding that vertex, + interpolates the image intensities at these sampling positions, and + averages the results. + + Two strategies are available to select these positions. + - 'ball' uses points regularly spaced in a ball centered at the mesh + vertex. The radius of the ball is controlled by the parameter + `radius`. + - 'line' starts by drawing the normal to the mesh passing through this + vertex. It then selects a segment of this normal, centered at the + vertex, of length 2 * `radius`. Image intensities are measured at + points regularly spaced on this normal segment. + + You can control how many samples are drawn by setting `n_samples`. + + Once the sampling positions are chosen, those that fall outside of the 3d + image (or ouside of the mask if you provided one) are discarded. If all + sample positions are discarded (which can happen, for example, if the + vertex itself is outside of the support of the image), the projection at + this vertex will be ``numpy.nan``. + + The 3d image then needs to be interpolated at each of the remaining points. + Two options are available: 'nearest' selects the value of the nearest + voxel, and 'linear' performs trilinear interpolation of neighbouring + voxels. 'linear' may give better results - for example, the projected + values are more stable when resampling the 3d image or applying affine + transformations to it. For one image, the speed difference is small, + 'linear' takes about x1.5 more time. For many images, 'nearest' scales much + better, up to x20 faster. + + Once the 3d image has been interpolated at each sample point, the + interpolated values are averaged to produce the value associated to this + particular mesh vertex. + + WARNING: This function is experimental and details such as the + interpolation method are subject to change. + + """ + sampling_schemes = {'linear': _interpolation_sampling, + 'nearest': _nearest_voxel_sampling} + if interpolation not in sampling_schemes: + raise ValueError('"interpolation" should be one of {}'.format( + tuple(sampling_schemes.keys()))) + img = load_img(img) + if mask_img is not None: + mask_img = _utils.check_niimg(mask_img) + mask = resampling.resample_to_img( + mask_img, img, interpolation='nearest', copy=False).get_data() + else: + mask = None + original_dimension = len(img.shape) + img = _utils.check_niimg(img, atleast_4d=True) + frames = np.rollaxis(img.get_data(), -1) + mesh = load_surf_mesh(surf_mesh) + sampling = sampling_schemes[interpolation] + texture = sampling( + frames, mesh, img.affine, radius=radius, kind=kind, + n_points=n_samples, mask=mask) + if original_dimension == 3: + texture = texture[0] + return texture.T + + +def _load_surf_files_gifti_gzip(surf_file): + """Load surface data Gifti files which are gzipped. This + function is used by load_surf_mesh and load_surf_data for + extracting gzipped files. + + Part of the code can be removed while bumping nibabel 2.0.2 + """ + with gzip.open(surf_file) as f: + as_bytes = f.read() + if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'): + parser = gifti.GiftiImage.parser() + parser.parse(as_bytes) + gifti_img = parser.img + else: + from nibabel.gifti.parse_gifti_fast import ParserCreate, Outputter + parser = ParserCreate() + parser.buffer_text = True + out = Outputter() + parser.StartElementHandler = out.StartElementHandler + parser.EndElementHandler = out.EndElementHandler + parser.CharacterDataHandler = out.CharacterDataHandler + parser.Parse(as_bytes) + gifti_img = out.img + return gifti_img + + +def _gifti_img_to_data(gifti_img): + """Load surface image e.g. sulcal depth or statistical map in + nibabel.gifti.GiftiImage to data + + Used by load_surf_data function in common to surface sulcal data + acceptable to .gii or .gii.gz + """ + if not gifti_img.darrays: + raise ValueError('Gifti must contain at least one data array') + return np.asarray([arr.data for arr in gifti_img.darrays]).T.squeeze() + + +# function to figure out datatype and load data +def load_surf_data(surf_data): + """Loading data to be represented on a surface mesh. + + Parameters + ---------- + surf_data : str or numpy.ndarray + Either a file containing surface data (valid format are .gii, + .gii.gz, .mgz, .nii, .nii.gz, or Freesurfer specific files such as + .thickness, .curv, .sulc, .annot, .label) or + a Numpy array containing surface data. + Returns + ------- + data : numpy.ndarray + An array containing surface data + """ + # if the input is a filename, load it + if isinstance(surf_data, _basestring): + if (surf_data.endswith('nii') or surf_data.endswith('nii.gz') or + surf_data.endswith('mgz')): + data = np.squeeze(nibabel.load(surf_data).get_data()) + elif (surf_data.endswith('curv') or surf_data.endswith('sulc') or + surf_data.endswith('thickness')): + data = nibabel.freesurfer.io.read_morph_data(surf_data) + elif surf_data.endswith('annot'): + data = nibabel.freesurfer.io.read_annot(surf_data)[0] + elif surf_data.endswith('label'): + data = nibabel.freesurfer.io.read_label(surf_data) + elif surf_data.endswith('gii'): + if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'): + gii = nibabel.load(surf_data) + else: + gii = gifti.read(surf_data) + data = _gifti_img_to_data(gii) + elif surf_data.endswith('gii.gz'): + gii = _load_surf_files_gifti_gzip(surf_data) + data = _gifti_img_to_data(gii) + else: + raise ValueError(('The input type is not recognized. %r was given ' + 'while valid inputs are a Numpy array or one of ' + 'the following file formats: .gii, .gii.gz, ' + '.mgz, .nii, .nii.gz, Freesurfer specific files ' + 'such as .curv, .sulc, .thickness, .annot, ' + '.label') % surf_data) + # if the input is a numpy array + elif isinstance(surf_data, np.ndarray): + data = np.squeeze(surf_data) + else: + raise ValueError('The input type is not recognized. ' + 'Valid inputs are a Numpy array or one of the ' + 'following file formats: .gii, .gii.gz, .mgz, .nii, ' + '.nii.gz, Freesurfer specific files such as .curv, ' + '.sulc, .thickness, .annot, .label') + return data + + +def _gifti_img_to_mesh(gifti_img): + """Load surface image in nibabel.gifti.GiftiImage to data + + Used by load_surf_mesh function in common to surface mesh + acceptable to .gii or .gii.gz + """ + if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'): + try: + coords = gifti_img.get_arrays_from_intent( + nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data + except IndexError: + raise ValueError('Gifti file needs to contain a data array ' + 'with intent NIFTI_INTENT_POINTSET') + try: + faces = gifti_img.get_arrays_from_intent( + nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data + except IndexError: + raise ValueError('Gifti file needs to contain a data array ' + 'with intent NIFTI_INTENT_TRIANGLE') + else: + try: + coords = gifti_img.getArraysFromIntent( + nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data + except IndexError: + raise ValueError('Gifti file needs to contain a data array ' + 'with intent NIFTI_INTENT_POINTSET') + try: + faces = gifti_img.getArraysFromIntent( + nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data + except IndexError: + raise ValueError('Gifti file needs to contain a data array ' + 'with intent NIFTI_INTENT_TRIANGLE') + + return coords, faces + + +# function to figure out datatype and load data +def load_surf_mesh(surf_mesh): + """Loading a surface mesh geometry + + Parameters + ---------- + surf_mesh : str or numpy.ndarray + Either a file containing surface mesh geometry (valid formats + are .gii .gii.gz or Freesurfer specific files such as .orig, .pial, + .sphere, .white, .inflated) or a list or tuple of two Numpy arrays, + the first containing the x-y-z coordinates of the mesh + vertices, the second containing the indices (into coords) + of the mesh faces. + + Returns + -------- + [coords, faces] : List of two numpy.ndarray + The first containing the x-y-z coordinates of the mesh vertices, + the second containing the indices (into coords) of the mesh faces. + """ + # if input is a filename, try to load it + if isinstance(surf_mesh, _basestring): + if (surf_mesh.endswith('orig') or surf_mesh.endswith('pial') or + surf_mesh.endswith('white') or surf_mesh.endswith('sphere') or + surf_mesh.endswith('inflated')): + coords, faces = nibabel.freesurfer.io.read_geometry(surf_mesh) + elif surf_mesh.endswith('gii'): + if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'): + gifti_img = nibabel.load(surf_mesh) + else: + gifti_img = gifti.read(surf_mesh) + coords, faces = _gifti_img_to_mesh(gifti_img) + elif surf_mesh.endswith('.gii.gz'): + gifti_img = _load_surf_files_gifti_gzip(surf_mesh) + coords, faces = _gifti_img_to_mesh(gifti_img) + else: + raise ValueError(('The input type is not recognized. %r was given ' + 'while valid inputs are one of the following ' + 'file formats: .gii, .gii.gz, Freesurfer specific' + ' files such as .orig, .pial, .sphere, .white, ' + '.inflated or a list containing two Numpy ' + 'arrays [vertex coordinates, face indices]' + ) % surf_mesh) + elif isinstance(surf_mesh, (list, tuple)): + try: + coords, faces = surf_mesh + except Exception: + raise ValueError(('If a list or tuple is given as input, ' + 'it must have two elements, the first is ' + 'a Numpy array containing the x-y-z coordinates ' + 'of the mesh vertices, the second is a Numpy ' + 'array containing the indices (into coords) of ' + 'the mesh faces. The input was a list with ' + '%r elements.') % len(surf_mesh)) + else: + raise ValueError('The input type is not recognized. ' + 'Valid inputs are one of the following file ' + 'formats: .gii, .gii.gz, Freesurfer specific files ' + 'such as .orig, .pial, .sphere, .white, .inflated ' + 'or a list containing two Numpy arrays ' + '[vertex coordinates, face indices]') + + return [coords, faces] + + +def check_mesh_and_data(mesh, data): + """Load surface mesh and data, check that they have compatible shapes.""" + mesh = load_surf_mesh(mesh) + nodes, faces = mesh + data = load_surf_data(data) + if len(data) != len(nodes): + raise ValueError( + 'Mismatch between number of nodes in mesh ({}) and ' + 'size of surface data ({})'.format(len(nodes), len(data))) + return mesh, data diff --git a/nilearn/surface/tests/__init__.py b/nilearn/surface/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nilearn/surface/tests/data/__init__.py b/nilearn/surface/tests/data/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/nilearn/surface/tests/data/__init__.py @@ -0,0 +1 @@ + diff --git a/nilearn/surface/tests/data/test.annot b/nilearn/surface/tests/data/test.annot new file mode 100644 index 0000000000..016592fa19 Binary files /dev/null and b/nilearn/surface/tests/data/test.annot differ diff --git a/nilearn/surface/tests/data/test.label b/nilearn/surface/tests/data/test.label new file mode 100644 index 0000000000..4feb5ed57c --- /dev/null +++ b/nilearn/surface/tests/data/test.label @@ -0,0 +1,12 @@ +#!ascii label , from subject fsaverage5 vox2ras=TkReg +326 +5900 -15.869 -33.770 74.187 0.4444440007 +5899 -16.323 -32.170 73.531 0.5555559993 +5901 -15.718 -36.573 72.549 0.4444440007 +5902 -17.190 -39.268 70.851 0.3333329856 +2638 -18.197 -33.185 73.204 0.5555559993 +8756 -61.004 -17.019 24.824 0.1111110002 +6241 -60.198 -18.047 23.736 0.1111110002 +8757 -61.604 -15.225 23.800 0.1111110002 +1896 -58.260 -24.190 21.811 0.1111110002 +6243 -58.792 -18.143 22.817 0.1111110002 diff --git a/nilearn/surface/tests/test_surface.py b/nilearn/surface/tests/test_surface.py new file mode 100644 index 0000000000..d8bf0ae57e --- /dev/null +++ b/nilearn/surface/tests/test_surface.py @@ -0,0 +1,459 @@ +# Tests for functions in surf_plotting.py + +import os +import tempfile +import warnings +import itertools + +from distutils.version import LooseVersion +from nose import SkipTest +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_equal) +from nose.tools import assert_true, assert_raises +from nilearn._utils.testing import assert_raises_regex, assert_warns + +import numpy as np +from scipy.spatial import Delaunay +import sklearn + +import nibabel as nb +from nibabel import gifti + +from nilearn import datasets +from nilearn import image +from nilearn.image import resampling +from nilearn.image.tests.test_resampling import rotation +from nilearn.surface import surface +from nilearn.surface import load_surf_data, load_surf_mesh, vol_to_surf +from nilearn.surface.surface import (_gifti_img_to_mesh, + _load_surf_files_gifti_gzip) + +currdir = os.path.dirname(os.path.abspath(__file__)) +datadir = os.path.join(currdir, 'data') + + +def _generate_surf(): + rng = np.random.RandomState(42) + coords = rng.rand(20, 3) + faces = rng.randint(coords.shape[0], size=(30, 3)) + return [coords, faces] + + +def test_load_surf_data_array(): + # test loading and squeezing data from numpy array + data_flat = np.zeros((20, )) + data_squeeze = np.zeros((20, 1, 3)) + assert_array_equal(load_surf_data(data_flat), np.zeros((20, ))) + assert_array_equal(load_surf_data(data_squeeze), np.zeros((20, 3))) + + +def test_load_surf_data_file_nii_gii(): + # test loading of fake data from gifti file + filename_gii = tempfile.mktemp(suffix='.gii') + if LooseVersion(nb.__version__) > LooseVersion('2.0.2'): + darray = gifti.GiftiDataArray(data=np.zeros((20, ))) + else: + # Avoid a bug in nibabel 1.2.0 where GiftiDataArray were not + # initialized properly: + darray = gifti.GiftiDataArray.from_array(np.zeros((20, )), + intent='t test') + gii = gifti.GiftiImage(darrays=[darray]) + gifti.write(gii, filename_gii) + assert_array_equal(load_surf_data(filename_gii), np.zeros((20, ))) + os.remove(filename_gii) + + # test loading of data from empty gifti file + filename_gii_empty = tempfile.mktemp(suffix='.gii') + gii_empty = gifti.GiftiImage() + gifti.write(gii_empty, filename_gii_empty) + assert_raises_regex(ValueError, + 'must contain at least one data array', + load_surf_data, filename_gii_empty) + os.remove(filename_gii_empty) + + # test loading of fake data from nifti file + filename_nii = tempfile.mktemp(suffix='.nii') + filename_niigz = tempfile.mktemp(suffix='.nii.gz') + nii = nb.Nifti1Image(np.zeros((20, )), affine=None) + nb.save(nii, filename_nii) + nb.save(nii, filename_niigz) + assert_array_equal(load_surf_data(filename_nii), np.zeros((20, ))) + assert_array_equal(load_surf_data(filename_niigz), np.zeros((20, ))) + os.remove(filename_nii) + os.remove(filename_niigz) + + +def test_load_surf_data_gii_gz(): + # Test the loader `load_surf_data` with gzipped fsaverage5 files + + # surface data + fsaverage = datasets.fetch_surf_fsaverage().sulc_left + gii = _load_surf_files_gifti_gzip(fsaverage) + assert_true(isinstance(gii, gifti.GiftiImage)) + + data = load_surf_data(fsaverage) + assert_true(isinstance(data, np.ndarray)) + + # surface mesh + fsaverage = datasets.fetch_surf_fsaverage().pial_left + gii = _load_surf_files_gifti_gzip(fsaverage) + assert_true(isinstance(gii, gifti.GiftiImage)) + + +def test_load_surf_data_file_freesurfer(): + # test loading of fake data from sulc and thickness files + # using load_surf_data. + # We test load_surf_data by creating fake data with function + # 'write_morph_data' that works only if nibabel + # version is recent with nibabel >= 2.1.0 + if LooseVersion(nb.__version__) >= LooseVersion('2.1.0'): + data = np.zeros((20, )) + filename_sulc = tempfile.mktemp(suffix='.sulc') + nb.freesurfer.io.write_morph_data(filename_sulc, data) + assert_array_equal(load_surf_data(filename_sulc), np.zeros((20, ))) + os.remove(filename_sulc) + + filename_thick = tempfile.mktemp(suffix='.thickness') + nb.freesurfer.io.write_morph_data(filename_thick, data) + assert_array_equal(load_surf_data(filename_thick), np.zeros((20, ))) + os.remove(filename_thick) + + # test loading of data from real label and annot files + label_start = np.array([5900, 5899, 5901, 5902, 2638]) + label_end = np.array([8756, 6241, 8757, 1896, 6243]) + label = load_surf_data(os.path.join(datadir, 'test.label')) + assert_array_equal(label[:5], label_start) + assert_array_equal(label[-5:], label_end) + assert_equal(label.shape, (10, )) + del label, label_start, label_end + + annot_start = np.array([24, 29, 28, 27, 24, 31, 11, 25, 0, 12]) + annot_end = np.array([16, 16, 16, 16, 16, 16, 16, 16, 16, 16]) + annot = load_surf_data(os.path.join(datadir, 'test.annot')) + assert_array_equal(annot[:10], annot_start) + assert_array_equal(annot[-10:], annot_end) + assert_equal(annot.shape, (10242, )) + del annot, annot_start, annot_end + + +def test_load_surf_data_file_error(): + # test if files with unexpected suffixes raise errors + data = np.zeros((20, )) + wrong_suff = ['.vtk', '.obj', '.mnc', '.txt'] + for suff in wrong_suff: + filename_wrong = tempfile.mktemp(suffix=suff) + np.savetxt(filename_wrong, data) + assert_raises_regex(ValueError, + 'input type is not recognized', + load_surf_data, filename_wrong) + os.remove(filename_wrong) + + +def test_load_surf_mesh_list(): + # test if correct list is returned + mesh = _generate_surf() + assert_equal(len(load_surf_mesh(mesh)), 2) + assert_array_equal(load_surf_mesh(mesh)[0], mesh[0]) + assert_array_equal(load_surf_mesh(mesh)[1], mesh[1]) + # test if incorrect list, array or dict raises error + assert_raises_regex(ValueError, 'it must have two elements', + load_surf_mesh, []) + assert_raises_regex(ValueError, 'it must have two elements', + load_surf_mesh, [mesh[0]]) + assert_raises_regex(ValueError, 'it must have two elements', + load_surf_mesh, [mesh[0], mesh[1], mesh[1]]) + assert_raises_regex(ValueError, 'input type is not recognized', + load_surf_mesh, mesh[0]) + assert_raises_regex(ValueError, 'input type is not recognized', + load_surf_mesh, dict()) + del mesh + + +def test_gifti_img_to_mesh(): + mesh = _generate_surf() + + coord_array = gifti.GiftiDataArray(data=mesh[0]) + coord_array.intent = nb.nifti1.intent_codes['NIFTI_INTENT_POINTSET'] + + face_array = gifti.GiftiDataArray(data=mesh[1]) + face_array.intent = nb.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'] + + gii = gifti.GiftiImage(darrays=[coord_array, face_array]) + coords, faces = _gifti_img_to_mesh(gii) + assert_array_equal(coords, mesh[0]) + assert_array_equal(faces, mesh[1]) + + +def test_load_surf_mesh_file_gii_gz(): + # Test the loader `load_surf_mesh` with gzipped fsaverage5 files + + fsaverage = datasets.fetch_surf_fsaverage().pial_left + coords, faces = load_surf_mesh(fsaverage) + assert_true(isinstance(coords, np.ndarray)) + assert_true(isinstance(faces, np.ndarray)) + + +def test_load_surf_mesh_file_gii(): + # Test the loader `load_surf_mesh` + + # If nibabel is of older version we skip tests as nibabel does not + # support intent argument and intent codes are not handled properly with + # older versions + + if not LooseVersion(nb.__version__) >= LooseVersion('2.1.0'): + raise SkipTest + + mesh = _generate_surf() + + # test if correct gii is loaded into correct list + filename_gii_mesh = tempfile.mktemp(suffix='.gii') + + coord_array = gifti.GiftiDataArray(data=mesh[0], + intent=nb.nifti1.intent_codes[ + 'NIFTI_INTENT_POINTSET']) + face_array = gifti.GiftiDataArray(data=mesh[1], + intent=nb.nifti1.intent_codes[ + 'NIFTI_INTENT_TRIANGLE']) + + gii = gifti.GiftiImage(darrays=[coord_array, face_array]) + gifti.write(gii, filename_gii_mesh) + assert_array_equal(load_surf_mesh(filename_gii_mesh)[0], mesh[0]) + assert_array_equal(load_surf_mesh(filename_gii_mesh)[1], mesh[1]) + os.remove(filename_gii_mesh) + + # test if incorrect gii raises error + filename_gii_mesh_no_point = tempfile.mktemp(suffix='.gii') + gifti.write(gifti.GiftiImage(darrays=[face_array, face_array]), + filename_gii_mesh_no_point) + assert_raises_regex(ValueError, 'NIFTI_INTENT_POINTSET', + load_surf_mesh, filename_gii_mesh_no_point) + os.remove(filename_gii_mesh_no_point) + + filename_gii_mesh_no_face = tempfile.mktemp(suffix='.gii') + gifti.write(gifti.GiftiImage(darrays=[coord_array, coord_array]), + filename_gii_mesh_no_face) + assert_raises_regex(ValueError, 'NIFTI_INTENT_TRIANGLE', + load_surf_mesh, filename_gii_mesh_no_face) + os.remove(filename_gii_mesh_no_face) + + +def test_load_surf_mesh_file_freesurfer(): + # Older nibabel versions does not support 'write_geometry' + if LooseVersion(nb.__version__) <= LooseVersion('1.2.0'): + raise SkipTest + + mesh = _generate_surf() + for suff in ['.pial', '.inflated', '.white', '.orig', 'sphere']: + filename_fs_mesh = tempfile.mktemp(suffix=suff) + nb.freesurfer.write_geometry(filename_fs_mesh, mesh[0], mesh[1]) + assert_equal(len(load_surf_mesh(filename_fs_mesh)), 2) + assert_array_almost_equal(load_surf_mesh(filename_fs_mesh)[0], + mesh[0]) + assert_array_almost_equal(load_surf_mesh(filename_fs_mesh)[1], + mesh[1]) + os.remove(filename_fs_mesh) + + +def test_load_surf_mesh_file_error(): + if LooseVersion(nb.__version__) <= LooseVersion('1.2.0'): + raise SkipTest + + # test if files with unexpected suffixes raise errors + mesh = _generate_surf() + wrong_suff = ['.vtk', '.obj', '.mnc', '.txt'] + for suff in wrong_suff: + filename_wrong = tempfile.mktemp(suffix=suff) + nb.freesurfer.write_geometry(filename_wrong, mesh[0], mesh[1]) + assert_raises_regex(ValueError, + 'input type is not recognized', + load_surf_data, filename_wrong) + os.remove(filename_wrong) + + +def _flat_mesh(x_s, y_s, z=0): + x, y = np.mgrid[:x_s, :y_s] + x, y = x.ravel(), y.ravel() + z = np.ones(len(x)) * z + vertices = np.asarray([x, y, z]).T + triangulation = Delaunay(vertices[:, :2]).simplices + mesh = [vertices, triangulation] + return mesh + + +def _z_const_img(x_s, y_s, z_s): + hslice = np.arange(x_s * y_s).reshape((x_s, y_s)) + return np.ones((x_s, y_s, z_s)) * hslice[:, :, np.newaxis] + + +def test_vertex_outer_normals(): + # compute normals for a flat horizontal mesh, they should all be (0, 0, 1) + mesh = _flat_mesh(5, 7) + computed_normals = surface._vertex_outer_normals(mesh) + true_normals = np.zeros((len(mesh[0]), 3)) + true_normals[:, 2] = 1 + assert_array_almost_equal(computed_normals, true_normals) + + +def test_load_uniform_ball_cloud(): + for n_points in [10, 20, 40, 80, 160]: + with warnings.catch_warnings(record=True) as w: + points = surface._load_uniform_ball_cloud(n_points=n_points) + assert_array_equal(points.shape, (n_points, 3)) + assert_equal(len(w), 0) + assert_warns(surface.EfficiencyWarning, + surface._load_uniform_ball_cloud, n_points=3) + for n_points in [3, 10, 20]: + computed = surface._uniform_ball_cloud(n_points) + loaded = surface._load_uniform_ball_cloud(n_points) + assert_array_almost_equal(computed, loaded) + + +def test_sample_locations(): + # check positions of samples on toy example, with an affine != identity + # flat horizontal mesh + mesh = _flat_mesh(5, 7) + affine = np.diagflat([10, 20, 30, 1]) + inv_affine = np.linalg.inv(affine) + # transform vertices to world space + vertices = np.asarray( + resampling.coord_transform(*mesh[0].T, affine=affine)).T + # compute by hand the true offsets in voxel space + # (transformed by affine^-1) + ball_offsets = surface._load_uniform_ball_cloud(10) + ball_offsets = np.asarray( + resampling.coord_transform(*ball_offsets.T, affine=inv_affine)).T + line_offsets = np.zeros((10, 3)) + line_offsets[:, 2] = np.linspace(-1, 1, 10) + line_offsets = np.asarray( + resampling.coord_transform(*line_offsets.T, affine=inv_affine)).T + # check we get the same locations + for kind, offsets in [('line', line_offsets), ('ball', ball_offsets)]: + locations = surface._sample_locations( + [vertices, mesh[1]], affine, 1., kind=kind, n_points=10) + true_locations = np.asarray([vertex + offsets for vertex in mesh[0]]) + assert_array_equal(locations.shape, true_locations.shape) + assert_array_almost_equal(true_locations, locations) + assert_raises(ValueError, surface._sample_locations, + mesh, affine, 1., kind='bad_kind') + + +def test_masked_indices(): + mask = np.ones((4, 3, 8)) + mask[:, :, ::2] = 0 + locations = np.mgrid[:5, :3, :8].ravel().reshape((3, -1)) + masked = surface._masked_indices(locations.T, mask.shape, mask) + # These elements are masked by the mask + assert_true((masked[::2] == 1).all()) + # The last element of locations is one row beyond first image dimension + assert_true((masked[-24:] == 1).all()) + # 4 * 3 * 8 / 2 elements should remain unmasked + assert_true((1 - masked).sum() == 48) + + +def test_projection_matrix(): + mesh = _flat_mesh(5, 7, 4) + img = _z_const_img(5, 7, 13) + proj = surface._projection_matrix( + mesh, np.eye(4), img.shape, radius=2., n_points=10) + # proj matrix has shape (n_vertices, img_size) + assert_equal(proj.shape, (5 * 7, 5 * 7 * 13)) + # proj.dot(img) should give the values of img at the vertices' locations + values = proj.dot(img.ravel()).reshape((5, 7)) + assert_array_almost_equal(values, img[:, :, 0]) + mesh = _flat_mesh(5, 7) + proj = surface._projection_matrix( + mesh, np.eye(4), (5, 7, 1), radius=.1, n_points=10) + assert_array_almost_equal(proj.toarray(), np.eye(proj.shape[0])) + mask = np.ones(img.shape, dtype=int) + mask[0] = 0 + proj = surface._projection_matrix( + mesh, np.eye(4), img.shape, radius=2., n_points=10, mask=mask) + proj = proj.toarray() + # first row of the mesh is masked + assert_array_almost_equal(proj.sum(axis=1)[:7], np.zeros(7)) + assert_array_almost_equal(proj.sum(axis=1)[7:], np.ones(proj.shape[0] - 7)) + # mask and img should have the same shape + assert_raises(ValueError, surface._projection_matrix, + mesh, np.eye(4), img.shape, mask=np.ones((3, 3, 2))) + + +def test_sampling_affine(): + # check sampled (projected) values on a toy image + img = np.ones((4, 4, 4)) + img[1, :, :] = 2 + nodes = [[1, 1, 2], [10, 10, 20], [30, 30, 30]] + mesh = [np.asarray(nodes), None] + affine = 10 * np.eye(4) + affine[-1, -1] = 1 + texture = surface._nearest_voxel_sampling( + [img], mesh, affine=affine, radius=1, kind='ball') + assert_array_equal(texture[0], [1., 2., 1.]) + texture = surface._interpolation_sampling( + [img], mesh, affine=affine, radius=0, kind='ball') + assert_array_almost_equal(texture[0], [1.1, 2., 1.]) + + +def test_sampling(): + mesh = _flat_mesh(5, 7, 4) + img = _z_const_img(5, 7, 13) + mask = np.ones(img.shape, dtype=int) + mask[0] = 0 + projectors = [surface._nearest_voxel_sampling, + surface._interpolation_sampling] + for kind in ('line', 'ball'): + for projector in projectors: + projection = projector([img], mesh, np.eye(4), + kind=kind, radius=0.) + assert_array_almost_equal(projection.ravel(), img[:, :, 0].ravel()) + projection = projector([img], mesh, np.eye(4), + kind=kind, radius=0., mask=mask) + assert_array_almost_equal(projection.ravel()[7:], + img[1:, :, 0].ravel()) + assert_true(np.isnan(projection.ravel()[:7]).all()) + + +def test_vol_to_surf(): + # test 3d niimg to cortical surface projection and invariance to a change + # of affine + mni = datasets.load_mni152_template() + mesh = _generate_surf() + _check_vol_to_surf_results(mni, mesh) + fsaverage = datasets.fetch_surf_fsaverage5().pial_left + _check_vol_to_surf_results(mni, fsaverage) + + +def _check_vol_to_surf_results(img, mesh): + mni_mask = datasets.load_mni152_brain_mask() + for kind, interpolation, mask_img in itertools.product( + ['ball', 'line'], ['linear', 'nearest'], [mni_mask, None]): + proj_1 = vol_to_surf( + img, mesh, kind=kind, interpolation=interpolation, + mask_img=mask_img) + assert_true(proj_1.ndim == 1) + img_rot = image.resample_img( + img, target_affine=rotation(np.pi / 3., np.pi / 4.)) + proj_2 = vol_to_surf( + img_rot, mesh, kind=kind, interpolation=interpolation, + mask_img=mask_img) + # The projection values for the rotated image should be close + # to the projection for the original image + diff = np.abs(proj_1 - proj_2) / np.abs(proj_1) + assert_true(np.mean(diff[diff < np.inf]) < .03) + img_4d = image.concat_imgs([img, img]) + proj_4d = vol_to_surf( + img_4d, mesh, kind=kind, interpolation=interpolation, + mask_img=mask_img) + nodes, _ = surface.load_surf_mesh(mesh) + assert_array_equal(proj_4d.shape, [nodes.shape[0], 2]) + assert_array_almost_equal(proj_4d[:, 0], proj_1, 3) + + +def test_check_mesh_and_data(): + mesh = _generate_surf() + data = mesh[0][:, 0] + m, d = surface.check_mesh_and_data(mesh, data) + assert (m[0] == mesh[0]).all() + assert (m[1] == mesh[1]).all() + assert (d == data).all() + data = mesh[0][::2, 0] + assert_raises(ValueError, surface.check_mesh_and_data, mesh, data) diff --git a/nilearn/tests/test_cache_mixin.py b/nilearn/tests/test_cache_mixin.py index ec0e2c70a2..0578c60a68 100644 --- a/nilearn/tests/test_cache_mixin.py +++ b/nilearn/tests/test_cache_mixin.py @@ -1,18 +1,21 @@ """ Test the _utils.cache_mixin module """ +import glob +import json import os import shutil import tempfile -import json -import glob +from distutils.version import LooseVersion +import sklearn from nose.tools import assert_false, assert_true, assert_equal - from sklearn.externals.joblib import Memory import nilearn -from nilearn._utils import cache_mixin +from nilearn._utils import cache_mixin, CacheMixin +from nilearn._utils.testing import assert_raises_regex + def f(x): @@ -20,6 +23,31 @@ def f(x): return x +def test_check_memory(): + # Test if _check_memory returns a memory object with the cachedir equal to + # input path + try: + temp_dir = tempfile.mkdtemp() + + mem_none = Memory(cachedir=None) + mem_temp = Memory(cachedir=temp_dir) + + for mem in [None, mem_none]: + memory = cache_mixin._check_memory(mem, verbose=False) + assert_true(memory, Memory) + assert_equal(memory.cachedir, mem_none.cachedir) + + for mem in [temp_dir, mem_temp]: + memory = cache_mixin._check_memory(mem, verbose=False) + assert_equal(memory.cachedir, mem_temp.cachedir) + assert_true(memory, Memory) + + finally: + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir) + + + def test__safe_cache_dir_creation(): # Test the _safe_cache function that is supposed to flush the # cache if the nibabel version changes @@ -87,3 +115,84 @@ def test_cache_memory_level(): assert_equal(len(glob.glob(job_glob)), 2) cache_mixin.cache(f, mem)(3) assert_equal(len(glob.glob(job_glob)), 3) + + +class CacheMixinTest(CacheMixin): + """Dummy mock object that wraps a CacheMixin.""" + + def __init__(self, memory=None, memory_level=1): + self.memory = memory + self.memory_level = memory_level + + def run(self): + self._cache(f) + + +def test_cache_mixin_with_expand_user(): + # Test the memory cache is correctly created when using ~. + cache_dir = "~/nilearn_data/test_cache" + expand_cache_dir = os.path.expanduser(cache_dir) + mixin_mock = CacheMixinTest(cache_dir) + + try: + assert_false(os.path.exists(expand_cache_dir)) + mixin_mock.run() + assert_true(os.path.exists(expand_cache_dir)) + finally: + if os.path.exists(expand_cache_dir): + shutil.rmtree(expand_cache_dir) + + +def test_cache_mixin_without_expand_user(): + # Test the memory cache is correctly created when using ~. + cache_dir = "~/nilearn_data/test_cache" + expand_cache_dir = os.path.expanduser(cache_dir) + mixin_mock = CacheMixinTest(cache_dir) + + try: + assert_false(os.path.exists(expand_cache_dir)) + nilearn.EXPAND_PATH_WILDCARDS = False + assert_raises_regex(ValueError, + "Given cache path parent directory doesn't", + mixin_mock.run) + assert_false(os.path.exists(expand_cache_dir)) + nilearn.EXPAND_PATH_WILDCARDS = True + finally: + if os.path.exists(expand_cache_dir): + shutil.rmtree(expand_cache_dir) + + +def test_cache_mixin_wrong_dirs(): + # Test the memory cache raises a ValueError when input base path doesn't + # exist. + + for cache_dir in ("/bad_dir/cache", + "~/nilearn_data/tmp/test_cache"): + expand_cache_dir = os.path.expanduser(cache_dir) + mixin_mock = CacheMixinTest(cache_dir) + + try: + assert_raises_regex(ValueError, + "Given cache path parent directory doesn't", + mixin_mock.run) + assert_false(os.path.exists(expand_cache_dir)) + finally: + if os.path.exists(expand_cache_dir): + shutil.rmtree(expand_cache_dir) + + +def test_cache_shelving(): + try: + temp_dir = tempfile.mkdtemp() + job_glob = os.path.join(temp_dir, 'joblib', 'nilearn', 'tests', + 'test_cache_mixin', 'f', '*') + mem = Memory(cachedir=temp_dir, verbose=0) + res = cache_mixin.cache(f, mem, shelve=True)(2) + assert_equal(res.get(), 2) + assert_equal(len(glob.glob(job_glob)), 1) + res = cache_mixin.cache(f, mem, shelve=True)(2) + assert_equal(res.get(), 2) + assert_equal(len(glob.glob(job_glob)), 1) + finally: + del mem + shutil.rmtree(temp_dir, ignore_errors=True) diff --git a/nilearn/tests/test_masking.py b/nilearn/tests/test_masking.py index 5cd62095da..0ef8ba05d0 100644 --- a/nilearn/tests/test_masking.py +++ b/nilearn/tests/test_masking.py @@ -6,16 +6,23 @@ import numpy as np from numpy.testing import assert_array_equal -from nose.tools import assert_true, assert_false, assert_equal, \ - assert_raises +from nose.tools import ( + assert_true, + assert_false, + assert_equal, + assert_raises, + ) from nibabel import Nifti1Image from nilearn import masking from nilearn.masking import (compute_epi_mask, compute_multi_epi_mask, - compute_background_mask, unmask, _unmask_3d, - _unmask_4d, intersect_masks, MaskWarning) + compute_background_mask, compute_gray_matter_mask, + compute_multi_gray_matter_mask, + unmask, _unmask_3d, _unmask_4d, intersect_masks, + MaskWarning, _extrapolate_out_mask) from nilearn._utils.testing import (write_tmp_imgs, assert_raises_regex) +from nilearn._utils.testing import assert_warns from nilearn._utils.exceptions import DimensionError from nilearn.input_data import NiftiMasker @@ -23,7 +30,11 @@ else np.version.short_version) np_version = distutils.version.LooseVersion(np_version).version +_TEST_DIM_ERROR_MSG = ("Input data has incompatible dimensionality: " + "Expected dimension is 3D and you provided " + "a %s image") + def test_compute_epi_mask(): mean_image = np.ones((9, 9, 3)) mean_image[3:-2, 3:-2, :] = 10 @@ -93,6 +104,28 @@ def test_compute_background_mask(): assert_true(isinstance(w[0].message, masking.MaskWarning)) +def test_compute_gray_matter_mask(): + image = Nifti1Image(np.ones((9, 9, 9)), np.eye(4)) + + mask = compute_gray_matter_mask(image, threshold=-1) + mask1 = np.zeros((9, 9, 9)) + mask1[2:-2, 2:-2, 2:-2] = 1 + + np.testing.assert_array_equal(mask1, mask.get_data()) + + # Check that we get a useful warning for empty masks + assert_warns(masking.MaskWarning, compute_gray_matter_mask, image, threshold=1) + + # Check that masks obtained from same FOV are the same + img1 = Nifti1Image(np.full((9, 9, 9), np.random.rand()), np.eye(4)) + img2 = Nifti1Image(np.full((9, 9, 9), np.random.rand()), np.eye(4)) + + mask_img1 = compute_gray_matter_mask(img1) + mask_img2 = compute_gray_matter_mask(img2) + np.testing.assert_array_equal(mask_img1.get_data(), + mask_img2.get_data()) + + def test_apply_mask(): """ Test smoothing of timeseries extraction """ @@ -133,7 +166,7 @@ def test_apply_mask(): # veriy that 4D masks are rejected mask_img_4d = Nifti1Image(np.ones((40, 40, 40, 2)), np.eye(4)) - assert_raises_regex(DimensionError, "Data must be a 3D", + assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG % "4D", masking.apply_mask, data_img, mask_img_4d) # Check that 3D data is accepted @@ -146,7 +179,7 @@ def test_apply_mask(): assert_equal(sorted(data_3d.tolist()), [3., 4., 12.]) # Check data shape and affine - assert_raises_regex(DimensionError, "Data must be a 3D", + assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG % "2D", masking.apply_mask, data_img, Nifti1Image(mask[20, ...], affine)) assert_raises(ValueError, masking.apply_mask, @@ -175,10 +208,10 @@ def test_unmask(): masked4D = data4D[mask, :].T unmasked4D = data4D.copy() - unmasked4D[-mask, :] = 0 + unmasked4D[np.logical_not(mask), :] = 0 masked3D = data3D[mask] unmasked3D = data3D.copy() - unmasked3D[-mask] = 0 + unmasked3D[np.logical_not(mask)] = 0 # 4D Test, test value ordering at the same time. t = unmask(masked4D, mask_img, order="C").get_data() @@ -321,6 +354,17 @@ def test_intersect_masks(): mask_ab[2, 2] = 1 mask_ab_ = intersect_masks([mask_a_img, mask_b_img], threshold=1.) assert_array_equal(mask_ab, mask_ab_.get_data()) + # Test intersect mask images with '>f8'. This function uses + # largest_connected_component to check if intersect_masks passes with + # connected=True (which is by default) + mask_a_img_change_dtype = Nifti1Image(mask_a_img.get_data().astype('>f8'), + affine=mask_a_img.affine) + mask_b_img_change_dtype = Nifti1Image(mask_b_img.get_data().astype('>f8'), + affine=mask_b_img.affine) + mask_ab_change_type = intersect_masks([mask_a_img_change_dtype, + mask_b_img_change_dtype], + threshold=1.) + assert_array_equal(mask_ab, mask_ab_change_type.get_data()) mask_abc = mask_a + mask_b + mask_c mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img], @@ -369,6 +413,26 @@ def test_compute_multi_epi_mask(): assert_array_equal(mask_ab, mask_ab_.get_data()) +def test_compute_multi_gray_matter_mask(): + assert_raises(TypeError, compute_multi_gray_matter_mask, []) + + # Check error raised if images with different shapes are given as input + imgs = [Nifti1Image(np.ones((9, 9, 9)), np.eye(4)), + Nifti1Image(np.ones((9, 9, 8)), np.eye(4))] + assert_raises(ValueError, compute_multi_gray_matter_mask, imgs) + + # Check results are the same if affine is the same + imgs1 = [Nifti1Image(np.random.randn(9, 9, 9), np.eye(4)), + Nifti1Image(np.random.randn(9, 9, 9), np.eye(4))] + mask1 = compute_multi_gray_matter_mask(imgs1) + + imgs2 = [Nifti1Image(np.random.randn(9, 9, 9), np.eye(4)), + Nifti1Image(np.random.randn(9, 9, 9), np.eye(4))] + mask2 = compute_multi_gray_matter_mask(imgs2) + + assert_array_equal(mask1.get_data(), mask2.get_data()) + + def test_error_shape(random_state=42, shape=(3, 5, 7, 11)): # open-ended `if .. elif` in masking.unmask @@ -397,3 +461,94 @@ def test_nifti_masker_empty_mask_warning(): ValueError, "The mask is invalid as it is empty: it masks all data", NiftiMasker(mask_strategy="epi").fit_transform, X) + + +def test_unmask_list(random_state=42): + rng = np.random.RandomState(random_state) + shape = (3, 4, 5) + affine = np.eye(4) + mask_data = (rng.rand(*shape) < .5) + mask_img = Nifti1Image(mask_data.astype(np.uint8), affine) + a = unmask(mask_data[mask_data], mask_img) + b = unmask(mask_data[mask_data].tolist(), mask_img) # shouldn't crash + assert_array_equal(a.get_data(), b.get_data()) + + +def test__extrapolate_out_mask(): + # Input data: + initial_data = np.zeros((5,5,5)) + initial_data[1,2,2] = 1 + initial_data[2,1,2] = 2 + initial_data[2,2,1] = 3 + initial_data[3,2,2] = 4 + initial_data[2,3,2] = 5 + initial_data[2,2,3] = 6 + initial_mask = initial_data.copy() != 0 + + # Expected result + target_data = np.array([[[0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 1. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ]], + + [[0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 1.5, 0. , 0. ], + [0. , 2. , 1. , 3.5, 0. ], + [0. , 0. , 3. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ]], + + [[0. , 0. , 2. , 0. , 0. ], + [0. , 2.5, 2. , 4. , 0. ], + [3. , 3. , 3.5, 6. , 6. ], + [0. , 4. , 5. , 5.5, 0. ], + [0. , 0. , 5. , 0. , 0. ]], + + [[0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 3. , 0. , 0. ], + [0. , 3.5, 4. , 5. , 0. ], + [0. , 0. , 4.5, 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ]], + + [[0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 4. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ]]]) + target_mask = np.array([[[False, False, False, False, False], + [False, False, False, False, False], + [False, False, True, False, False], + [False, False, False, False, False], + [False, False, False, False, False]], + + [[False, False, False, False, False], + [False, False, True, False, False], + [False, True, True, True, False], + [False, False, True, False, False], + [False, False, False, False, False]], + + [[False, False, True, False, False], + [False, True, True, True, False], + [ True, True, True, True, True], + [False, True, True, True, False], + [False, False, True, False, False]], + + [[False, False, False, False, False], + [False, False, True, False, False], + [False, True, True, True, False], + [False, False, True, False, False], + [False, False, False, False, False]], + + [[False, False, False, False, False], + [False, False, False, False, False], + [False, False, True, False, False], + [False, False, False, False, False], + [False, False, False, False, False]]]) + + + # Test: + extrapolated_data, extrapolated_mask = _extrapolate_out_mask(initial_data, + initial_mask, + iterations=1) + assert_array_equal(extrapolated_data, target_data) + assert_array_equal(extrapolated_mask, target_mask) diff --git a/nilearn/tests/test_ndimage.py b/nilearn/tests/test_ndimage.py index 6ad18d8c49..6f35b2f41c 100644 --- a/nilearn/tests/test_ndimage.py +++ b/nilearn/tests/test_ndimage.py @@ -3,12 +3,12 @@ This test file is in nilearn/tests because nosetests ignores modules whose name starts with an underscore """ -from scipy import ndimage from nose.tools import assert_raises - import numpy as np -from nilearn._utils.ndimage import largest_connected_component, _peak_local_max +from nilearn._utils.ndimage import (largest_connected_component, + _peak_local_max) +from nilearn._utils import testing def test_largest_cc(): @@ -18,9 +18,23 @@ def test_largest_cc(): assert_raises(ValueError, largest_connected_component, a) a[1:3, 1:3, 1:3] = 1 np.testing.assert_equal(a, largest_connected_component(a)) + # A simple test with non-native dtype + a_change_type = a.astype('>f8') + np.testing.assert_equal(a, largest_connected_component(a_change_type)) + b = a.copy() b[5, 5, 5] = 1 np.testing.assert_equal(a, largest_connected_component(b)) + # A simple test with non-native dtype + b_change_type = b.astype('>f8') + np.testing.assert_equal(a, largest_connected_component(b_change_type)) + + # Tests for correct errors, when an image or string are passed. + img = testing.generate_labeled_regions(shape=(10, 11, 12), + n_regions=2) + + assert_raises(ValueError, largest_connected_component, img) + assert_raises(ValueError, largest_connected_component, "Test String") def test_empty_peak_local_max(): diff --git a/nilearn/tests/test_niimg.py b/nilearn/tests/test_niimg.py index ea081b29db..4c63c3f50e 100644 --- a/nilearn/tests/test_niimg.py +++ b/nilearn/tests/test_niimg.py @@ -11,7 +11,6 @@ from nilearn._utils.testing import assert_raises_regex - currdir = os.path.dirname(os.path.abspath(__file__)) @@ -23,7 +22,7 @@ def test_copy_img(): def test_copy_img_side_effect(): img1 = Nifti1Image(np.ones((2, 2, 2, 2)), affine=np.eye(4)) hash1 = joblib.hash(img1) - img2 = niimg.copy_img(img1) + niimg.copy_img(img1) hash2 = joblib.hash(img1) assert_equal(hash1, hash2) @@ -31,6 +30,21 @@ def test_copy_img_side_effect(): def test_new_img_like_side_effect(): img1 = Nifti1Image(np.ones((2, 2, 2, 2)), affine=np.eye(4)) hash1 = joblib.hash(img1) - img2 = new_img_like(img1, np.ones((2, 2, 2, 2)), img1.get_affine().copy(), copy_header=True) + new_img_like(img1, np.ones((2, 2, 2, 2)), img1.affine.copy(), + copy_header=True) hash2 = joblib.hash(img1) assert_equal(hash1, hash2) + + +def test_get_target_dtype(): + img = Nifti1Image(np.ones((2, 2, 2), dtype=np.float64), affine=np.eye(4)) + assert_equal(img.get_data().dtype.kind, 'f') + dtype_kind_float = niimg._get_target_dtype(img.get_data().dtype, + target_dtype='auto') + assert_equal(dtype_kind_float, np.float32) + + img2 = Nifti1Image(np.ones((2, 2, 2), dtype=np.int64), affine=np.eye(4)) + assert_equal(img2.get_data().dtype.kind, 'i') + dtype_kind_int = niimg._get_target_dtype(img2.get_data().dtype, + target_dtype='auto') + assert_equal(dtype_kind_int, np.int32) diff --git a/nilearn/tests/test_niimg_conversions.py b/nilearn/tests/test_niimg_conversions.py index 4fe56b67d9..e359915622 100644 --- a/nilearn/tests/test_niimg_conversions.py +++ b/nilearn/tests/test_niimg_conversions.py @@ -24,6 +24,8 @@ from nilearn._utils.exceptions import DimensionError from nilearn._utils import testing, niimg_conversions from nilearn._utils.testing import assert_raises_regex +from nilearn._utils.testing import with_memory_profiler +from nilearn._utils.testing import assert_memory_less_than from nilearn._utils.niimg_conversions import _iter_check_niimg @@ -98,7 +100,10 @@ def test_check_niimg_3d(): # Test dimensionality error img = Nifti1Image(np.zeros((10, 10, 10)), np.eye(4)) - assert_raises_regex(TypeError, 'Data must be a 3D', + assert_raises_regex(TypeError, + "Input data has incompatible dimensionality: " + "Expected dimension is 3D and you provided a list " + "of 3D images \(4D\).", _utils.check_niimg_3d, [img, img]) # Check that a filename does not raise an error @@ -109,6 +114,10 @@ def test_check_niimg_3d(): with testing.write_tmp_imgs(data_img, create_files=True) as filename: _utils.check_niimg_3d(filename) + # check data dtype equal with dtype='auto' + img_check = _utils.check_niimg_3d(img, dtype='auto') + assert_equal(img.get_data().dtype.kind, img_check.get_data().dtype.kind) + def test_check_niimg_4d(): assert_raises_regex(TypeError, 'nibabel format', @@ -123,11 +132,11 @@ def test_check_niimg_4d(): # Tests with return_iterator=False img_4d_1 = _utils.check_niimg_4d([img_3d, img_3d]) assert_true(img_4d_1.get_data().shape == (10, 10, 10, 2)) - assert_array_equal(img_4d_1.get_affine(), affine) + assert_array_equal(img_4d_1.affine, affine) img_4d_2 = _utils.check_niimg_4d(img_4d_1) assert_array_equal(img_4d_2.get_data(), img_4d_2.get_data()) - assert_array_equal(img_4d_2.get_affine(), img_4d_2.get_affine()) + assert_array_equal(img_4d_2.affine, img_4d_2.affine) # Tests with return_iterator=True img_3d_iterator = _utils.check_niimg_4d([img_3d, img_3d], @@ -142,7 +151,7 @@ def test_check_niimg_4d(): for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): assert_true(img_1.get_data().shape == (10, 10, 10)) assert_array_equal(img_1.get_data(), img_2.get_data()) - assert_array_equal(img_1.get_affine(), img_2.get_affine()) + assert_array_equal(img_1.affine, img_2.affine) img_3d_iterator_1 = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) @@ -151,12 +160,14 @@ def test_check_niimg_4d(): for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): assert_true(img_1.get_data().shape == (10, 10, 10)) assert_array_equal(img_1.get_data(), img_2.get_data()) - assert_array_equal(img_1.get_affine(), img_2.get_affine()) + assert_array_equal(img_1.affine, img_2.affine) # This should raise an error: a 3D img is given and we want a 4D - assert_raises_regex(DimensionError, 'Data must be a 4D Niimg-like object ' - 'but you provided a 3D', - _utils.check_niimg_4d, img_3d) + assert_raises_regex(DimensionError, + "Input data has incompatible dimensionality: " + "Expected dimension is 4D and you provided a " + "3D image.", + _utils.check_niimg_4d, img_3d) # Test a Niimg-like object that does not hold a shape attribute phony_img = PhonyNiimage() @@ -185,13 +196,24 @@ def test_check_niimg(): assert_raises_regex( DimensionError, - 'Data must be a 2D Niimg-like object but you provided a list of list ' - 'of list of 3D images.', _utils.check_niimg, img_3_3d, ensure_ndim=2) + "Input data has incompatible dimensionality: " + "Expected dimension is 2D and you provided " + "a list of list of list of 3D images \(6D\)", + _utils.check_niimg, img_3_3d, ensure_ndim=2) assert_raises_regex( DimensionError, - 'Data must be a 4D Niimg-like object but you provided a list of list ' - 'of 4D images.', _utils.check_niimg, img_2_4d, ensure_ndim=4) + "Input data has incompatible dimensionality: " + "Expected dimension is 4D and you provided " + "a list of list of 4D images \(6D\)", + _utils.check_niimg, img_2_4d, ensure_ndim=4) + + # check data dtype equal with dtype='auto' + img_3d_check = _utils.check_niimg(img_3d, dtype='auto') + assert_equal(img_3d.get_data().dtype.kind, img_3d_check.get_data().dtype.kind) + + img_4d_check = _utils.check_niimg(img_4d, dtype='auto') + assert_equal(img_4d.get_data().dtype.kind, img_4d_check.get_data().dtype.kind) def test_check_niimg_wildcards(): @@ -341,6 +363,23 @@ def test_iter_check_niimgs(): _utils.check_niimg(img_2_4d).get_data()) +def _check_memory(list_img_3d): + # We intentionally add an offset of memory usage to avoid non trustable + # measures with memory_profiler. + mem_offset = b'a' * 100 * 1024 ** 2 + list(_iter_check_niimg(list_img_3d)) + return mem_offset + + +@with_memory_profiler +def test_iter_check_niimgs_memory(): + # Verify that iterating over a list of images doesn't consume extra + # memory. + assert_memory_less_than(100, 0.1, _check_memory, + [Nifti1Image(np.ones((100, 100, 200)), np.eye(4)) + for i in range(10)]) + + def test_repr_niimgs(): # Test with file path assert_equal(_utils._repr_niimgs("test"), "test") @@ -383,15 +422,16 @@ def test_concat_niimgs(): # Regression test for #601. Dimensionality of first image was not checked # properly - assert_raises_regex(DimensionError, 'Data must be a 4D Niimg-like object but ' - 'you provided', + _dimension_error_msg = ("Input data has incompatible dimensionality: " + "Expected dimension is 4D and you provided " + "a list of 4D images \(5D\)") + assert_raises_regex(DimensionError, _dimension_error_msg, _utils.concat_niimgs, [img4d], ensure_ndim=4) # check basic concatenation with equal shape/affine concatenated = _utils.concat_niimgs((img1, img3, img1)) - assert_raises_regex(DimensionError, 'Data must be a 4D Niimg-like object but ' - 'you provided', + assert_raises_regex(DimensionError, _dimension_error_msg, _utils.concat_niimgs, [img1, img4d]) # smoke-test auto_resample @@ -405,12 +445,13 @@ def test_concat_niimgs(): auto_resample=False) # test list of 4D niimgs as input - tmpimg1 = tempfile.mktemp(suffix='.nii') - tmpimg2 = tempfile.mktemp(suffix='.nii') + tempdir = tempfile.mkdtemp() + tmpimg1 = os.path.join(tempdir, '1.nii') + tmpimg2 = os.path.join(tempdir, '2.nii') try: nibabel.save(img1, tmpimg1) nibabel.save(img3, tmpimg2) - concatenated = _utils.concat_niimgs([tmpimg1, tmpimg2]) + concatenated = _utils.concat_niimgs(os.path.join(tempdir, '*')) assert_array_equal( concatenated.get_data()[..., 0], img1.get_data()) assert_array_equal( @@ -418,6 +459,8 @@ def test_concat_niimgs(): finally: _remove_if_exists(tmpimg1) _remove_if_exists(tmpimg2) + if os.path.exists(tempdir): + os.removedirs(tempdir) img5d = Nifti1Image(np.ones((2, 2, 2, 2, 2)), affine) assert_raises_regex(TypeError, 'Concatenated images must be 3D or 4D. ' @@ -425,6 +468,17 @@ def test_concat_niimgs(): [img5d, img5d]) +def test_concat_niimg_dtype(): + shape = [2, 3, 4] + vols = [nibabel.Nifti1Image( + np.zeros(shape + [n_scans]).astype(np.int16), np.eye(4)) + for n_scans in [1, 5]] + nimg = _utils.concat_niimgs(vols) + assert_equal(nimg.get_data().dtype, np.float32) + nimg = _utils.concat_niimgs(vols, dtype=None) + assert_equal(nimg.get_data().dtype, np.int16) + + def nifti_generator(buffer): for i in range(10): buffer.append(Nifti1Image(np.random.random((10, 10, 10)), np.eye(4))) diff --git a/nilearn/tests/test_param_validation.py b/nilearn/tests/test_param_validation.py index f44d6a8b07..05efd3ee01 100644 --- a/nilearn/tests/test_param_validation.py +++ b/nilearn/tests/test_param_validation.py @@ -2,15 +2,24 @@ Test the _utils.param_validation module """ -import warnings import numpy as np +import warnings +import os +import nibabel -from nose.tools import assert_true, assert_equal +from nose.tools import assert_equal, assert_true, assert_raises +from sklearn.base import BaseEstimator from nilearn._utils.testing import assert_raises_regex, assert_warns from nilearn._utils.extmath import fast_abs_percentile -from nilearn._utils.param_validation import check_threshold +from nilearn._utils.param_validation import (MNI152_BRAIN_VOLUME, + _get_mask_volume, + check_feature_screening, + check_threshold) + +mni152_brain_mask = ( + "/usr/share/fsl/data/standard/MNI152_T1_1mm_brain_mask.nii.gz") def test_check_threshold(): @@ -48,10 +57,42 @@ def test_check_threshold(): threshold_numpy_scalar = np.float64(threshold) assert_equal( check_threshold(threshold, matrix, percentile_func=fast_abs_percentile), - check_threshold(threshold_numpy_scalar, matrix, percentile_func=fast_abs_percentile)) + check_threshold(threshold_numpy_scalar, matrix, + percentile_func=fast_abs_percentile)) # Test for threshold provided as a percentile of the data (str ending with a # %) assert_true(1. < check_threshold("50%", matrix, percentile_func=fast_abs_percentile, name=name) <= 2.) + + +def test_get_mask_volume(): + # Test that hard-coded standard mask volume can be corrected computed + if os.path.isfile(mni152_brain_mask): + assert_equal(MNI152_BRAIN_VOLUME, _get_mask_volume(nibabel.load( + mni152_brain_mask))) + else: + warnings.warn("Couldn't find %s (for testing)" % (mni152_brain_mask)) + + +def test_feature_screening(): + # dummy + mask_img_data = np.zeros((182, 218, 182)) + mask_img_data[30:-30, 30:-30, 30:-30] = 1 + affine = np.eye(4) + mask_img = nibabel.Nifti1Image(mask_img_data, affine=affine) + + for is_classif in [True, False]: + for screening_percentile in [100, None, 20, 101, -1, 10]: + + if screening_percentile == 100 or screening_percentile is None: + assert_equal(check_feature_screening( + screening_percentile, mask_img, is_classif), None) + elif screening_percentile == 101 or screening_percentile == -1: + assert_raises(ValueError, check_feature_screening, + screening_percentile, mask_img, is_classif) + elif screening_percentile == 20: + assert_true(isinstance(check_feature_screening( + screening_percentile, mask_img, is_classif), + BaseEstimator)) diff --git a/nilearn/tests/test_segmentation.py b/nilearn/tests/test_segmentation.py index 80d227847c..40a7e4988a 100644 --- a/nilearn/tests/test_segmentation.py +++ b/nilearn/tests/test_segmentation.py @@ -54,3 +54,20 @@ def test_bad_inputs(): labels[6, 8] = 5 np.testing.assert_raises(ValueError, _random_walker, img, labels, spacing=(1,)) + + +def test_reorder_labels(): + # When labels have non-consecutive integers, we make them consecutive + # by reordering them to make no gaps/differences between integers. We expect + # labels to be of same shape even if they are reordered. + # Issue #938, comment #14. + data = np.zeros((5, 5)) + 0.1 * np.random.randn(5, 5) + data[1:5, 1:5] = 1 + + labels = np.zeros_like(data) + labels[3, 3] = 1 + labels[1, 4] = 4 # giving integer which is non-consecutive + + labels = _random_walker(data, labels) + assert data.shape == labels.shape + diff --git a/nilearn/tests/test_signal.py b/nilearn/tests/test_signal.py index 572d51b5b9..ea915b1cb1 100644 --- a/nilearn/tests/test_signal.py +++ b/nilearn/tests/test_signal.py @@ -8,6 +8,8 @@ import numpy as np from nose.tools import assert_true, assert_false, assert_raises +from sklearn.utils.testing import assert_less +import nibabel # Use nisignal here to avoid name collisions (using nilearn.signal is # not possible) @@ -23,7 +25,7 @@ def generate_signals(n_features=17, n_confounds=5, length=41, All returned signals have no trends at all (to machine precision). Parameters - ========== + ---------- n_features, n_confounds : int, optional respectively number of features to generate, and number of confounds to use for generating noise signals. @@ -39,7 +41,7 @@ def generate_signals(n_features=17, n_confounds=5, length=41, gives the contiguousness of the output arrays. Returns - ======= + ------- signals : numpy.ndarray, shape (length, n_features) unperturbed signals. @@ -83,12 +85,12 @@ def generate_trends(n_features=17, length=41): """Generate linearly-varying signals, with zero mean. Parameters - ========== + ---------- n_features, length : int respectively number of signals and number of samples to generate. Returns - ======= + ------- trends : numpy.ndarray, shape (length, n_features) output signals, one per column. """ @@ -99,6 +101,15 @@ def generate_trends(n_features=17, length=41): return trends * factors +def generate_signals_plus_trends(n_features=17, n_samples=41): + + signals, _, _ = generate_signals(n_features=n_features, + length=n_samples) + trends = generate_trends(n_features=n_features, + length=n_samples) + return signals + trends + + def test_butterworth(): rand_gen = np.random.RandomState(0) n_features = 20000 @@ -119,8 +130,9 @@ def test_butterworth(): np.testing.assert_almost_equal(data, data_original) nisignal.butterworth(data, sampling, low_pass=low_pass, high_pass=high_pass, - copy=False, save_memory=True) + copy=False) np.testing.assert_almost_equal(out_single, data) + np.testing.assert_(id(out_single) != id(data)) # multiple timeseries data = rand_gen.randn(n_samples, n_features) @@ -131,6 +143,8 @@ def test_butterworth(): low_pass=low_pass, high_pass=high_pass, copy=True) np.testing.assert_almost_equal(data, data_original) + np.testing.assert_(id(out1) != id(data_original)) + # check that multiple- and single-timeseries filtering do the same thing. np.testing.assert_almost_equal(out1[:, 0], out_single) nisignal.butterworth(data, sampling, @@ -146,6 +160,7 @@ def test_butterworth(): low_pass=80., # Greater than nyq frequency copy=True) np.testing.assert_almost_equal(out1, out2) + np.testing.assert_(id(out1) != id(out2)) def test_standardize(): @@ -215,6 +230,12 @@ def test_detrend(): np.testing.assert_array_equal(length_1_signal, nisignal._detrend(length_1_signal)) + # Mean removal on integers + detrended = nisignal._detrend(x.astype(np.int64), inplace=True, + type="constant") + assert_less(abs(detrended.mean(axis=0)).max(), + 20. * np.finfo(np.float).eps) + def test_mean_of_squares(): """Test _mean_of_squares.""" @@ -243,6 +264,18 @@ def test_clean_detrending(): length=n_samples) x = signals + trends + # if NANs, data out should be False with ensure_finite=True + y = signals + trends + y[20, 150] = np.nan + y[5, 500] = np.nan + y[15, 14] = np.inf + y = nisignal.clean(y, ensure_finite=True) + assert_true(np.any(np.isfinite(y)), True) + + # test boolean is not given to signal.clean + assert_raises(TypeError, nisignal.clean, x, low_pass=False) + assert_raises(TypeError, nisignal.clean, x, high_pass=False) + # This should remove trends x_detrended = nisignal.clean(x, standardize=False, detrend=True, low_pass=None, high_pass=None) @@ -254,6 +287,37 @@ def test_clean_detrending(): assert_false(abs(x_undetrended - signals).max() < 0.06) +def test_clean_t_r(): + """Different TRs must produce different results after filtering""" + rng = np.random.RandomState(0) + n_samples = 34 + # n_features Must be higher than 500 + n_features = 501 + x_orig = generate_signals_plus_trends(n_features=n_features, + n_samples=n_samples) + random_tr_list1 = np.round(rng.rand(3) * 10, decimals=2) + random_tr_list2 = np.round(rng.rand(3) * 10, decimals=2) + for tr1, tr2 in zip(random_tr_list1, random_tr_list2): + low_pass_freq_list = tr1 * np.array([1.0 / 100, 1.0 / 110]) + high_pass_freq_list = tr1 * np.array([1.0 / 210, 1.0 / 190]) + for low_cutoff, high_cutoff in zip(low_pass_freq_list, + high_pass_freq_list): + det_one_tr = nisignal.clean(x_orig, t_r=tr1, low_pass=low_cutoff, + high_pass=high_cutoff) + det_diff_tr = nisignal.clean(x_orig, t_r=tr2, low_pass=low_cutoff, + high_pass=high_cutoff) + + if not np.isclose(tr1, tr2, atol=0.3): + msg = ('results do not differ for different TRs: {} and {} ' + 'at cutoffs: low_pass={}, high_pass={} ' + 'n_samples={}, n_features={}'.format( + tr1, tr2, low_cutoff, high_cutoff, + n_samples, n_features)) + np.testing.assert_(np.any(np.not_equal(det_one_tr, det_diff_tr)), + msg) + del det_one_tr, det_diff_tr + + def test_clean_frequencies(): sx1 = np.sin(np.linspace(0, 100, 2000)) sx2 = np.sin(np.linspace(0, 100, 2000)) @@ -349,9 +413,21 @@ def test_clean_confounds(): confounds=filename1) assert_raises(TypeError, nisignal.clean, signals, confounds=[None]) + assert_raises(ValueError, nisignal.clean, signals, t_r=None, + low_pass=.01) + + # Test without standardizing that constant parts of confounds are + # accounted for + np.testing.assert_almost_equal(nisignal.clean(np.ones((20, 2)), + standardize=False, + confounds=np.ones(20), + detrend=False, + ).mean(), + np.zeros((20, 2))) def test_high_variance_confounds(): + # C and F order might take different paths in the function. Check that the # result is identical. n_features = 1001 diff --git a/nilearn/tests/test_testing.py b/nilearn/tests/test_testing.py index f7fb842514..446c8c62d3 100644 --- a/nilearn/tests/test_testing.py +++ b/nilearn/tests/test_testing.py @@ -1,10 +1,37 @@ import itertools - import numpy as np from nose.tools import assert_equal, assert_raises -from nilearn._utils.testing import generate_fake_fmri +from nilearn._utils.testing import generate_fake_fmri, with_memory_profiler +from nilearn._utils.testing import assert_memory_less_than, assert_raises_regex + + +def create_object(size): + """Just create and return an object containing `size` bytes.""" + mem_use = b'a' * size + return mem_use + + +@with_memory_profiler +def test_memory_usage(): + # Valid measures (larger objects) + for mem in (500, 200): + assert_memory_less_than(mem, 0.1, create_object, mem * 1024 ** 2) + + # Ensure an exception is raised with too small objects as + # memory_profiler can return non trustable memory measure in this case. + assert_raises_regex(ValueError, + "Memory profiler measured an untrustable memory", + assert_memory_less_than, 50, 0.1, + create_object, 25 * 1024 ** 2) + + # Ensure ValueError is raised if memory used is above expected memory + # limit. + assert_raises_regex(ValueError, + "Memory consumption measured", + assert_memory_less_than, 100, 0.1, + create_object, 200 * 1024 ** 2) def test_generate_fake_fmri(): diff --git a/nilearn/version.py b/nilearn/version.py index 3a1475ed1b..52f21101dd 100644 --- a/nilearn/version.py +++ b/nilearn/version.py @@ -2,7 +2,7 @@ """ nilearn version, required package versions, and utilities for checking """ -# Author: Loïc Estève, Ben Cipollini +# Author: Loic Esteve, Ben Cipollini # License: simplified BSD # PEP0440 compatible formatted version, see: @@ -21,32 +21,31 @@ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # -__version__ = '0.2.1' +__version__ = '0.5.0a' _NILEARN_INSTALL_MSG = 'See %s for installation information.' % ( 'http://nilearn.github.io/introduction.html#installation') # This is a tuple to preserve order, so that dependencies are checked -# in some meaningful order (more => less 'core'). We avoid using -# collections.OrderedDict to preserve Python 2.6 compatibility. +# in some meaningful order (more => less 'core'). REQUIRED_MODULE_METADATA = ( ('numpy', { - 'min_version': '1.6.1', + 'min_version': '1.11', 'required_at_installation': True, 'install_info': _NILEARN_INSTALL_MSG}), ('scipy', { - 'min_version': '0.9.0', + 'min_version': '0.17', 'required_at_installation': True, 'install_info': _NILEARN_INSTALL_MSG}), ('sklearn', { - 'min_version': '0.13', + 'min_version': '0.18', 'required_at_installation': True, 'install_info': _NILEARN_INSTALL_MSG}), ('nibabel', { - 'min_version': '1.1.0', + 'min_version': '2.0.2', 'required_at_installation': False})) -OPTIONAL_MATPLOTLIB_MIN_VERSION = '1.1.1' +OPTIONAL_MATPLOTLIB_MIN_VERSION = '1.5.1' def _import_module_with_version_check( @@ -64,6 +63,10 @@ def _import_module_with_version_check( module_name, install_info or 'Please install it properly to use nilearn.') exc.args += (user_friendly_info,) + # Necessary for Python 3 because the repr/str of ImportError + # objects was changed in Python 3 + if hasattr(exc, 'msg'): + exc.msg += '. ' + user_friendly_info raise # Avoid choking on modules with no __version__ attribute diff --git a/setup.cfg b/setup.cfg index 245652c083..48ddd82a97 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,3 +16,9 @@ ignore-files=(plot_.*.py|conf\.py) [wheel] universal=1 + +[flake8] +# For PEP8 error codes see +# http://pep8.readthedocs.org/en/latest/intro.html#error-codes +# E402: module level import not at top of file +ignore=E402 diff --git a/setup.py b/setup.py index 1a13dd27fa..e2c106980d 100755 --- a/setup.py +++ b/setup.py @@ -79,16 +79,21 @@ def is_installing(): 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', ], packages=find_packages(), package_data={'nilearn.datasets.data': ['*.nii.gz', '*.csv'], + 'nilearn.datasets.data.fsaverage5': ['*.gz'], + 'nilearn.surface.data': ['*.csv'], + 'nilearn.plotting.data.js': ['*.js'], + 'nilearn.plotting.data.html': ['*.html'], 'nilearn.plotting.glass_brain_files': ['*.json'], 'nilearn.tests.data': ['*'], 'nilearn.image.tests.data': ['*.mgz'], + 'nilearn.surface.tests.data': ['*.annot', '*.label'], 'nilearn.datasets.tests.data': ['*.*'], 'nilearn.datasets.description': ['*.rst']}, install_requires=install_requires,)