diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 0000000000..0ff888cf54
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,63 @@
+version: 2
+
+jobs:
+ build:
+ docker:
+ - image: circleci/python:3.6
+ environment:
+ DISTRIB: "conda"
+ PYTHON_VERSION: "3.6"
+ NUMPY_VERSION: "*"
+ SCIPY_VERSION: "*"
+ SCIKIT_LEARN_VERSION: "*"
+ MATPLOTLIB_VERSION: "*"
+
+ steps:
+ - checkout
+ # Get rid of existing virtualenvs on circle ci as they conflict with conda.
+ # Trick found here:
+ # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10
+ - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs
+ # We need to remove conflicting texlive packages.
+ - run: sudo -E apt-get -yq remove texlive-binaries --purge
+ # Installing required packages for `make -C doc check command` to work.
+ - run: sudo -E apt-get -yq update
+ - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra
+ - restore_cache:
+ key: v1-packages+datasets-{{ .Branch }}
+ - run: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh
+ - run: chmod +x ~/miniconda.sh && ~/miniconda.sh -b
+ - run:
+ name: Setup conda path in env variables
+ command: |
+ echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV
+ - run:
+ name: Create conda env
+ command: |
+ conda create -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \
+ flake8 lxml nose cython mkl sphinx coverage pillow pandas -yq
+ conda install -n testenv nibabel nose-timer -c conda-forge -yq
+ - run:
+ name: Running CircleCI test (make html)
+ command: |
+ source activate testenv
+ pip install -e .
+ set -o pipefail && cd doc && make html-strict 2>&1 | tee ~/log.txt
+ no_output_timeout: 5h
+ - save_cache:
+ key: v1-packages+datasets-{{ .Branch }}
+ paths:
+ - $HOME/nilearn_data
+ - $HOME/miniconda3
+
+ - store_artifacts:
+ path: doc/_build/html
+ - store_artifacts:
+ path: coverage
+ - store_artifacts:
+ path: $HOME/log.txt
+ destination: log.txt
+
+
+
+
diff --git a/.gitignore b/.gitignore
index e8abe21658..4beefc864a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,3 +31,7 @@ tags
*.nt.bz2
*.tar.gz
*.tgz
+
+.idea/
+
+doc/themes/nilearn/static/jquery.js
\ No newline at end of file
diff --git a/.mailmap b/.mailmap
index fd860f4f0d..e0b2d2bbb3 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1,39 +1,43 @@
-Alexandre Abraham
-Alexandre Abraham
-Alexandre Gramfort
+Aina Frau Pascual
+Alexandre Abadie
+Alexandre Abraham
+Alexandre Gramfort
Alexandre Savio
+Arthur Mensch
Ben Cipollini
Bertrand Thirion
-Chris Filo Gorgolewski
+Chris Filo Gorgolewski
Danilo Bzdok
+Demian Wassermann
+Dimitri Papadopoulos Orfanos
Elvis Dohmatob
Fabian Pedregosa
-Fabian Pedregosa
-Fabian Pedregosa
-Gael Varoquaux
-GaelVaroquaux
Gael Varoquaux
-Jan Margeta
+Jan Margeta
Jaques Grobler
Jason Gors
+Jona Sassenhagen
Jean Kossaifi
-Jean Kossaifi
+Jean Remi King
+Jeff Chiang
+Julia Huntenburg
+J Necus
+Kamalakar Daddy
Konstantin Shmelkov
Loïc Estève
+Martin Perez-Guevara
Matthias Ekman
+Mehdi Rahim
Mehdi Rahim
-Mehdi Rahim
Michael Eickenberg
+Michael Hanke
Michael Waskom
-Philippe Gervais
+Moritz Boos
+Moritz Boos
+Óscar Nájera
+Philippe Gervais
Ronald Phlypo
-Salma Bougacha
+Salma Bougacha
Vincent Michel
Virgile Fritsch
-Yannick Schwartz
-schwarty
Yannick Schwartz
-Óscar Nájera
-Kamalakar Daddy
-Fabian Pedregosa
-Fabian Pedregosa
diff --git a/.travis.yml b/.travis.yml
index 2057fe42d2..4d36ea38c7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,6 @@
+sudo: required
+dist: xenial
+
language: python
virtualenv:
@@ -6,54 +9,47 @@ virtualenv:
env:
global:
- TEST_RUN_FOLDER="/tmp" # folder where the tests are run from
- matrix:
- # Ubuntu 14.04 versions
- - DISTRIB="conda" PYTHON_VERSION="2.7"
- NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.13.3"
- SCIKIT_LEARN_VERSION="0.14.1" MATPLOTLIB_VERSION="1.3.1"
- # Ubuntu 14.04 versions without matplotlib
- - DISTRIB="conda" PYTHON_VERSION="2.7"
- NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.13.3"
- SCIKIT_LEARN_VERSION="0.14.1"
- - DISTRIB="neurodebian" PYTHON_VERSION="2.7"
- # Trying to get as close to the minimum required versions while
- # still having the package version available through conda
- - DISTRIB="conda" PYTHON_VERSION="2.6"
- NUMPY_VERSION="1.6.2" SCIPY_VERSION="0.11.0"
- SCIKIT_LEARN_VERSION="0.13" MATPLOTLIB_VERSION="1.1.1"
- NIBABEL_VERSION="1.1.0"
+
+matrix:
+ # Do not wait for the allowed_failures entry to finish before
+ # setting the status
+ fast_finish: true
+ allow_failures:
+ # allow_failures seems to be keyed on the python version
+ - python: 2.7
+ include:
+ # Oldest supported versions (with neurodebian)
+ - env: DISTRIB="conda" PYTHON_VERSION="2.7"
+ NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.17"
+ SCIKIT_LEARN_VERSION="0.18" MATPLOTLIB_VERSION="1.5.1"
+ PANDAS_VERSION="0.18.0" NIBABEL_VERSION="2.0.2" COVERAGE="true"
+ # Oldest supported versions without matplotlib
+ - env: DISTRIB="conda" PYTHON_VERSION="2.7"
+ NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.17"
+ SCIKIT_LEARN_VERSION="0.18"
+ # Fake Ubuntu Xenial (Travis doesn't support Xenial yet)
+ - env: DISTRIB="conda" PYTHON_VERSION="2.7"
+ NUMPY_VERSION="1.13" SCIPY_VERSION="0.19"
+ SCIKIT_LEARN_VERSION="0.18.1"
+ NIBABEL_VERSION="2.0.2"
# Python 3.4 with intermediary versions
- - DISTRIB="conda" PYTHON_VERSION="3.4"
- NUMPY_VERSION="1.8" SCIPY_VERSION="0.14"
- SCIKIT_LEARN_VERSION="0.15" MATPLOTLIB_VERSION="1.4"
+ - env: DISTRIB="conda" PYTHON_VERSION="3.4"
+ NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.17"
+ SCIKIT_LEARN_VERSION="0.18" MATPLOTLIB_VERSION="1.5.1"
# Most recent versions
- - DISTRIB="conda" PYTHON_VERSION="3.5"
- NUMPY_VERSION="*" SCIPY_VERSION="*"
- SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true"
-
-install:
- - source continuous_integration/install.sh
-
-before_script:
- - make clean
-
-script:
- - python continuous_integration/show-python-packages-versions.py
- # Copy setup.cfg to TEST_RUN_FOLDER where we are going to run the tests from
- # Mainly for nose config settings
- - cp setup.cfg "$TEST_RUN_FOLDER"
- # We want to back out of the current working directory to make
- # sure we are using nilearn installed in site-packages rather
- # than the one from the current working directory
- # Parentheses (run in a subshell) are used to leave
- # the current directory unchanged
- - (cd "$TEST_RUN_FOLDER" && make -f $OLDPWD/Makefile test-code)
- - test "$MATPLOTLIB_VERSION" == "" || make test-doc
-
-after_success:
- # Ignore coveralls failures as the coveralls server is not very reliable
- # but we don't want travis to report a failure in the github UI just
- # because the coverage report failed to be published.
- # coveralls need to be run from the git checkout
- # so we need to copy the coverage results from TEST_RUN_FOLDER
- - if [[ "$COVERAGE" == "true" ]]; then cp "$TEST_RUN_FOLDER/.coverage" .; coveralls || echo "failed"; fi
+ - env: DISTRIB="conda" PYTHON_VERSION="3.5"
+ NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*"
+ SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true"
+ LXML_VERSION="*"
+ # FLAKE8 linting on diff wrt common ancestor with upstream/master
+ # Note: the python value is only there to trigger allow_failures
+ - python: 2.7
+ env: DISTRIB="conda" PYTHON_VERSION="2.7" FLAKE8_VERSION="*" SKIP_TESTS="true"
+
+install: source continuous_integration/install.sh
+
+before_script: make clean
+
+script: source continuous_integration/test_script.sh
+
+after_success: source continuous_integration/after_success.sh
diff --git a/AUTHORS.rst b/AUTHORS.rst
index 9575538012..c67751492c 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -16,13 +16,37 @@ particular:
* `Gael Varoquaux `_
* Philippe Gervais
* Michael Eickenberg
-* `Chris Filo Gorgolewski `_
* Danilo Bzdok
* Loïc Estève
+* Kamalakar Reddy Daddy
+* Elvis Dohmatob
+* Alexandre Abadie
+* Andres Hoyos Idrobo
+* Salma Bougacha
+* Mehdi Rahim
+* Sylvain Lanuzel
+* `Kshitij Chawla `_
+
+Many of also contributed outside of Parietal, notably:
+
+* `Chris Filo Gorgolewski `_
* `Ben Cipollini `_
+* Julia Huntenburg
+* Martin Perez-Guevara
Thanks to M. Hanke and Y. Halchenko for data and packaging.
+Funding
+........
+
+Alexandre Abraham, Gael Varoquaux, Kamalakar Reddy Daddy, Loïc Estève,
+Mehdi Rahim, Philippe Gervais where payed by the `NiConnect
+`_
+project, funded by the French `Investissement d'Avenir
+`_.
+
+NiLearn is also supported by `DigiCosme `_ |digicomse logo|
+
.. _citing:
Citing nilearn
@@ -49,3 +73,7 @@ guarantee the future of the toolkit, if you use it, please cite it.
See the scikit-learn documentation on `how to cite
`_.
+
+.. |digicomse logo| image:: logos/digi-saclay-logo-small.png
+ :height: 25
+ :alt: DigiComse Logo
\ No newline at end of file
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000000..5d6c4220a6
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,49 @@
+.. _contributing:
+
+============
+Contributing
+============
+
+This project is a community effort, and everyone is welcome to
+contribute.
+
+The project is hosted on https://github.com/nilearn/nilearn
+
+The best way to contribute and to help the project is to start working on known
+issues.
+See `Easy issues `_ to get
+started.
+
+Submitting a bug report
+=======================
+
+In case you experience issues using this package, do not hesitate to submit a
+ticket to the
+`Bug Tracker `_. You are
+also welcome to post feature requests or pull requests.
+
+.. _git_repo:
+
+Retrieving the latest code
+==========================
+
+We use `Git `_ for version control and
+`GitHub `_ for hosting our main repository. If you are
+new on GitHub and don't know how to work with it, please first
+have a look at `this `_ to get the basics.
+
+
+You can check out the latest sources with the command::
+
+ git clone git://github.com/nilearn/nilearn.git
+
+or if you have write privileges::
+
+ git clone git@github.com:nilearn/nilearn.git
+
+Coding guidelines
+=================
+
+Nilearn follows the coding conventions used by scikit-learn. `Please read them
+`_
+before you start implementing your changes.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000000..d6af1ad4eb
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,3 @@
+include AUTHORS.rst
+include LICENSE
+include README.rst
diff --git a/Makefile b/Makefile
index 97cebc5699..15d6f8d4dd 100644
--- a/Makefile
+++ b/Makefile
@@ -13,6 +13,7 @@ all: clean test doc-noplot
clean-pyc:
find . -name "*.pyc" | xargs rm -f
+ find . -name "__pycache__" | xargs rm -rf
clean-so:
find . -name "*.so" | xargs rm -f
@@ -66,5 +67,3 @@ doc:
pdf:
make -C doc pdf
-install:
- cd doc && make install
diff --git a/README.rst b/README.rst
index 952a089154..7885ed878b 100644
--- a/README.rst
+++ b/README.rst
@@ -8,8 +8,8 @@
:target: https://ci.appveyor.com/project/nilearn-ci/nilearn
:alt: AppVeyor Build Status
-.. image:: https://coveralls.io/repos/nilearn/nilearn/badge.svg?branch=master
- :target: https://coveralls.io/r/nilearn/nilearn
+.. image:: https://codecov.io/gh/nilearn/nilearn/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/nilearn/nilearn
nilearn
=======
@@ -25,7 +25,7 @@ This work is made available by a community of people, amongst which
the INRIA Parietal Project Team and the scikit-learn folks, in particular
P. Gervais, A. Abraham, V. Michel, A.
Gramfort, G. Varoquaux, F. Pedregosa, B. Thirion, M. Eickenberg, C. F. Gorgolewski,
-D. Bzdok, L. Estève and B. Cipollini.
+D. Bzdok, L. Esteve and B. Cipollini.
Important links
===============
@@ -38,15 +38,15 @@ Dependencies
The required dependencies to use the software are:
-* Python >= 2.6,
+* Python >= 2.7,
* setuptools
-* Numpy >= 1.6.1
-* SciPy >= 0.9
-* Scikit-learn >= 0.13 (Some examples require 0.14 to run)
-* Nibabel >= 1.1.0
+* Numpy >= 1.11
+* SciPy >= 0.17
+* Scikit-learn >= 0.18
+* Nibabel >= 2.0.2
If you are using nilearn plotting functionalities or running the
-examples, matplotlib >= 1.1.1 is required.
+examples, matplotlib >= 1.5.1 is required.
If you want to run the tests, you need nose >= 1.2.1 and coverage >= 3.6.
@@ -66,16 +66,5 @@ http://nilearn.github.io/introduction.html#installation.
Development
===========
-Code
-----
-
-GIT
-~~~
-
-You can check the latest sources with the command::
-
- git clone git://github.com/nilearn/nilearn
-
-or if you have write privileges::
-
- git clone git@github.com:nilearn/nilearn
+Detailed instructions on how to contribute are available at
+http://nilearn.github.io/contributing.html
diff --git a/appveyor.yml b/appveyor.yml
index ae181aff5f..c62ba4e777 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -9,8 +9,8 @@ environment:
PYTHON_VERSION: "2.7.x"
PYTHON_ARCH: "64"
- - PYTHON: "C:\\Miniconda3-x64"
- PYTHON_VERSION: "3.4.x"
+ - PYTHON: "C:\\Miniconda35-x64"
+ PYTHON_VERSION: "3.5.x"
PYTHON_ARCH: "64"
install:
@@ -24,6 +24,10 @@ install:
- "python -c \"import struct; print(struct.calcsize('P') * 8)\""
# Installed prebuilt dependencies from conda
+ # a temporary work around with failures related to matplotlib 2.1.0
+ # See similar fix which made for travis and circleci
+ # https://github.com/nilearn/nilearn/pull/1525
+ # Should be removed after a new matplotlib release 2.1.1
- "conda install pip numpy scipy scikit-learn nose wheel matplotlib -y -q"
# Install other nilearn dependencies
diff --git a/circle.yml b/circle.yml
deleted file mode 100644
index 2426370514..0000000000
--- a/circle.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-dependencies:
- cache_directories:
- - "~/nilearn_data"
-
- pre:
- # We need to remove conflicting texlive packages.
- - sudo -E apt-get -yq remove texlive-binaries --purge
- # Installing required packages for `make -C doc check command` to work.
- - sudo -E apt-get -yq update
- - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra
-
- override:
- - pip install --upgrade pip
- # Installing sphinx 1.2.3 to work-around autosummary issues in 1.3
- # They should be fixed in sphinx 1.4
- - pip install sphinx==1.2.3 matplotlib coverage Pillow
- - pip install scipy
- - pip install scikit-learn
- - pip install nose-timer
- - pip install -e .
- # we need to do this here so the datasets will be cached
- # pipefail is necessary to propagate exit codes
- - set -o pipefail && cd doc && make html-strict 2>&1 | tee ~/log.txt
-
-test:
- override:
- - make clean test test-coverage
- # workaround - make html returns 0 even if examples fail to build
- # (see https://github.com/sphinx-gallery/sphinx-gallery/issues/45)
- - cat ~/log.txt && if grep -q "Traceback (most recent call last):" ~/log.txt; then false; else true; fi
-
-general:
- artifacts:
- - "doc/_build/html"
- - "coverage"
- - "~/log.txt"
diff --git a/continuous_integration/after_success.sh b/continuous_integration/after_success.sh
new file mode 100755
index 0000000000..a7475f623e
--- /dev/null
+++ b/continuous_integration/after_success.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -e
+
+# Ignore codecov failures because we don't want travis to report a failure
+# in the github UI just because the coverage report failed to be published.
+# codecov needs to be run from the git checkout
+# so we need to copy the coverage results from TEST_RUN_FOLDER
+if [[ "$SKIP_TESTS" != "true" && "$COVERAGE" == "true" ]]; then
+ cp "$TEST_RUN_FOLDER/.coverage" .
+ codecov || echo "Codecov upload failed"
+fi
diff --git a/continuous_integration/flake8_diff.sh b/continuous_integration/flake8_diff.sh
new file mode 100755
index 0000000000..b91e3bd13d
--- /dev/null
+++ b/continuous_integration/flake8_diff.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+set -e
+
+PROJECT=nilearn/nilearn
+PROJECT_URL=https://github.com/$PROJECT.git
+
+echo "Remotes:"
+git remote --verbose
+
+# Find the remote with the project name (upstream in most cases)
+REMOTE=$(git remote -v | grep $PROJECT | cut -f1 | head -1 || echo '')
+
+# Add a temporary remote if needed. For example this is necessary when
+# Travis is configured to run in a fork. In this case 'origin' is the
+# fork and not the reference repo we want to diff against.
+if [[ -z "$REMOTE" ]]; then
+ TMP_REMOTE=tmp_reference_upstream
+ REMOTE=$TMP_REMOTE
+ git remote add $REMOTE $PROJECT_URL
+fi
+
+if [[ "$TRAVIS" == "true" ]]; then
+ if [[ "$TRAVIS_PULL_REQUEST" == "false" ]]
+ then
+ # Travis does the git clone with a limited depth (50 at the time of
+ # writing). This may not be enough to find the common ancestor with
+ # $REMOTE/master so we unshallow the git checkout
+ git fetch --unshallow || echo "Unshallowing the git checkout failed"
+ else
+ # We want to fetch the code as it is in the PR branch and not
+ # the result of the merge into master. This way line numbers
+ # reported by Travis will match with the local code.
+ BRANCH_NAME=travis_pr_$TRAVIS_PULL_REQUEST
+ git fetch $REMOTE pull/$TRAVIS_PULL_REQUEST/head:$BRANCH_NAME
+ git checkout $BRANCH_NAME
+ fi
+fi
+
+
+echo -e '\nLast 2 commits:'
+echo '--------------------------------------------------------------------------------'
+git log -2 --pretty=short
+
+git fetch $REMOTE master
+REMOTE_MASTER_REF="$REMOTE/master"
+
+# Find common ancestor between HEAD and remotes/$REMOTE/master
+COMMIT=$(git merge-base @ $REMOTE_MASTER_REF) || \
+ echo "No common ancestor found for $(git show @ -q) and $(git show $REMOTE_MASTER_REF -q)"
+
+if [[ -n "$TMP_REMOTE" ]]; then
+ git remote remove $TMP_REMOTE
+fi
+
+if [ -z "$COMMIT" ]; then
+ exit 1
+fi
+
+echo -e "\nCommon ancestor between HEAD and $REMOTE_MASTER_REF is:"
+echo '--------------------------------------------------------------------------------'
+git show --no-patch $COMMIT
+
+echo -e '\nRunning flake8 on the diff in the range'\
+ "$(git rev-parse --short $COMMIT)..$(git rev-parse --short @)" \
+ "($(git rev-list $COMMIT.. | wc -l) commit(s)):"
+echo '--------------------------------------------------------------------------------'
+
+# Conservative approach: diff without context so that code that was
+# not changed does not create failures
+git diff --unified=0 $COMMIT | flake8 --diff --show-source
+echo -e "No problem detected by flake8\n"
diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh
index da8f427a56..512cbdf2f3 100755
--- a/continuous_integration/install.sh
+++ b/continuous_integration/install.sh
@@ -27,7 +27,7 @@ create_new_venv() {
print_conda_requirements() {
# Echo a conda requirement string for example
- # "pip nose python='.7.3 scikit-learn=*". It has a hardcoded
+ # "pip nose python='2.7.3 scikit-learn=*". It has a hardcoded
# list of possible packages to install and looks at _VERSION
# environment variables to know whether to install a given package and
# if yes which version to install. For example:
@@ -35,7 +35,8 @@ print_conda_requirements() {
# - for scikit-learn, SCIKIT_LEARN_VERSION is used
TO_INSTALL_ALWAYS="pip nose"
REQUIREMENTS="$TO_INSTALL_ALWAYS"
- TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn"
+ TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn pandas \
+flake8 lxml"
for PACKAGE in $TO_INSTALL_MAYBE; do
# Capitalize package name and add _VERSION
PACKAGE_VERSION_VARNAME="${PACKAGE^^}_VERSION"
@@ -52,29 +53,35 @@ print_conda_requirements() {
}
create_new_conda_env() {
- # Deactivate the travis-provided virtual environment and setup a
- # conda-based environment instead
- deactivate
+ # Skip Travis related code on circle ci.
+ if [ -z $CIRCLECI ]; then
+ # Deactivate the travis-provided virtual environment and setup a
+ # conda-based environment instead
+ deactivate
+ fi
# Use the miniconda installer for faster download / install of conda
# itself
- wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \
- -O miniconda.sh
- chmod +x miniconda.sh && ./miniconda.sh -b
- export PATH=/home/travis/miniconda2/bin:$PATH
- conda update --yes conda
+ wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \
+ -O ~/miniconda.sh
+ chmod +x ~/miniconda.sh && ~/miniconda.sh -b
+ export PATH=$HOME/miniconda3/bin:$PATH
+ echo $PATH
+ conda update --quiet --yes conda
# Configure the conda environment and put it in the path using the
# provided versions
REQUIREMENTS=$(print_conda_requirements)
echo "conda requirements string: $REQUIREMENTS"
- conda create -n testenv --yes $REQUIREMENTS
+ conda create -n testenv --quiet --yes $REQUIREMENTS
source activate testenv
if [[ "$INSTALL_MKL" == "true" ]]; then
# Make sure that MKL is used
- conda install --yes mkl
- else
+ conda install --quiet --yes mkl
+ elif [[ -z $CIRCLECI ]]; then
+ # Travis doesn't use MKL but circle ci does for speeding up examples
+ # generation in the html documentation.
# Make sure that MKL is not used
conda remove --yes --features mkl || echo "MKL not installed"
fi
@@ -98,12 +105,18 @@ elif [[ "$DISTRIB" == "conda" ]]; then
fi
else
- echo "Unrecognized distribution ($DISTRIB); cannot setup travis environment."
+ echo "Unrecognized distribution ($DISTRIB); cannot setup CI environment."
exit 1
fi
+pip install psutil memory_profiler
+
if [[ "$COVERAGE" == "true" ]]; then
- pip install coverage coveralls
+ pip install codecov
fi
-python setup.py install
+# numpy not installed when skipping the tests so we do not want to run
+# setup.py install
+if [[ "$SKIP_TESTS" != "true" ]]; then
+ python setup.py install
+fi
diff --git a/continuous_integration/test_script.sh b/continuous_integration/test_script.sh
new file mode 100755
index 0000000000..1dfa2578d1
--- /dev/null
+++ b/continuous_integration/test_script.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+set -e
+
+if [[ -n "$FLAKE8_VERSION" ]]; then
+ source continuous_integration/flake8_diff.sh
+fi
+
+if [[ "$SKIP_TESTS" != "true" ]]; then
+ python continuous_integration/show-python-packages-versions.py
+ # Copy setup.cfg to TEST_RUN_FOLDER where we are going to run the tests from
+ # Mainly for nose config settings
+ cp setup.cfg "$TEST_RUN_FOLDER"
+ # We want to back out of the current working directory to make
+ # sure we are using nilearn installed in site-packages rather
+ # than the one from the current working directory
+ # Parentheses (run in a subshell) are used to leave
+ # the current directory unchanged
+ (cd "$TEST_RUN_FOLDER" && make -f $OLDPWD/Makefile test-code)
+ test "$MATPLOTLIB_VERSION" == "" || make test-doc
+fi
diff --git a/doc/Makefile b/doc/Makefile
index 1d8127fef6..f5e4288c71 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -38,31 +38,41 @@ clean:
-rm -rf generated/*
-rm -rf modules/generated/*
+sym_links:
+ # Make sym-links to share the cache across various example
+ # directories
+ -cd ../examples/ && mkdir -p nilearn_cache
+ -cd ../examples/01_plotting/ && ln -sf ../nilearn_cache
+ -cd ../examples/02_decoding/ && ln -sf ../nilearn_cache
+ -cd ../examples/03_connectivity/ && ln -sf ../nilearn_cache
+ -cd ../examples/04_manipulating_images/ && ln -sf ../nilearn_cache
+ -cd ../examples/05_advanced/ && ln -sf ../nilearn_cache
+
force_html: force html
force:
find . -name \*.rst -exec touch {} \;
-html:
+html: sym_links
# These two lines make the build a bit more lengthy, and the
# the embedding of images more robust
rm -rf $(BUILDDIR)/html/_images
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- touch $(BUILDDIR)/html .nojekyll
+ touch $(BUILDDIR)/html/.nojekyll
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-html-strict:
+html-strict: sym_links
# Build html documentation using a strict mode: Warnings are
# considered as errors.
make check
- touch $(BUILDDIR)/html .nojekyll
+ touch $(BUILDDIR)/html/.nojekyll
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
html-noplot:
$(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- touch $(BUILDDIR)/html .nojekyll
+ touch $(BUILDDIR)/html/.nojekyll
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
@@ -161,5 +171,6 @@ install:
cp -r html/* nilearn.github.io && \
cd nilearn.github.io && \
git add * && \
+ git add .nojekyll && \
git commit -a -m 'Make install' && \
git push
diff --git a/doc/authors.rst b/doc/authors.rst
new file mode 100644
index 0000000000..e122f914a8
--- /dev/null
+++ b/doc/authors.rst
@@ -0,0 +1 @@
+.. include:: ../AUTHORS.rst
diff --git a/doc/building_blocks/index.rst b/doc/building_blocks/index.rst
index d939521119..73aea4b47d 100644
--- a/doc/building_blocks/index.rst
+++ b/doc/building_blocks/index.rst
@@ -15,4 +15,5 @@ terms of data processing.
.. toctree::
manual_pipeline.rst
+ neurovault.rst
diff --git a/doc/building_blocks/manual_pipeline.rst b/doc/building_blocks/manual_pipeline.rst
index 2b3da743e7..610fba1785 100644
--- a/doc/building_blocks/manual_pipeline.rst
+++ b/doc/building_blocks/manual_pipeline.rst
@@ -40,13 +40,13 @@ example, we can download the data from the
`dataset.func` contains filenames referring to dataset files on the disk::
>>> list(sorted(dataset.keys())) # doctest: +SKIP
- ['anat', 'description', 'func', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target']
+ ['anat', 'description', 'func', 'mask', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target']
>>> dataset.func # doctest: +ELLIPSIS +SKIP
- ['.../haxby2001/subj1/bold.nii.gz']
+ ['.../haxby2001/subj2/bold.nii.gz']
Access supplementary information on the dataset:
- >>> print haxby_dataset['description'] # doctest: +SKIP
+ >>> print(haxby_dataset['description']) # doctest: +SKIP
The complete list of the data-downloading functions can be found in the
:ref:`reference documentation for the datasets `.
@@ -60,19 +60,24 @@ presenting different category of pictures to the subject (face, cat, ...)
and the goal of this experiment is to predict which category is presented
to the subjects from the brain activation.
-These conditions are presented as string into a CSV file. The numpy function
-`recfromcsv` is very useful to load this kind of data.
+These conditions are presented as string into a CSV file. The `pandas
+`__ function
+`read_csv` is very useful to load this kind of data.
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Load the behavioral labels
- :end-before: # Keep only data corresponding to faces or cats
+.. literalinclude:: ../../examples/plot_decoding_tutorial.py
+ :start-after: # We use pandas to load them in an array.
+ :end-before: ###########################################################################
+
+.. seealso::
+ * `pandas `_ is a very useful Python
+ library to load CSV files and process their data
-For example, we will now remove the *rest* condition from our dataset.
+For example, we will now consider only the conditions *cat* and *face* from our dataset.
This can be done as follows:
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Keep only data corresponding to faces or cats
+.. literalinclude:: ../../examples/plot_decoding_tutorial.py
+ :start-after: # mask of the samples belonging to the condition.
:end-before: ###########################################################################
@@ -116,8 +121,8 @@ We use masking to convert 4D data (i.e. 3D volume over time) into 2D data
Applying a mask
................
-.. figure:: ../auto_examples/images/sphx_glr_plot_haxby_simple_002.png
- :target: ../auto_examples/plot_haxby_simple.html
+.. figure:: ../auto_examples/images/sphx_glr_plot_decoding_tutorial_002.png
+ :target: ../auto_examples/plot_decoding_tutorial.html
:align: right
:scale: 30%
@@ -130,8 +135,8 @@ The :class:`NiftiMasker` can be seen as a *tube* that transforms data
from 4D images to 2D arrays, but first it needs to 'fit' this data in
order to learn simple parameters from it, such as its shape:
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Prepare the data: apply the mask
+.. literalinclude:: ../../examples/plot_decoding_tutorial.py
+ :start-after: # Now we use the NiftiMasker.
:end-before: ###########################################################################
@@ -158,9 +163,9 @@ scikit-learn, using its `fit`, `predict` or `transform` methods.
Here, we use scikit-learn Support Vector Classification to learn how to
predict the category of picture seen by the subject:
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # The decoding
- :end-before: ###########################################################################
+.. literalinclude:: ../../examples/plot_decoding_tutorial.py
+ :start-after: # We first fit it on the data
+ :end-before: # Let's measure the error rate:
We will not detail it here since there is a very good documentation about it in the
@@ -176,8 +181,8 @@ masked but also the results of an algorithm), the masker is clever and
can take data of dimension 1D (resp. 2D) to convert it back to 3D
(resp. 4D).
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Retrieve the discriminating weights and save them
+.. literalinclude:: ../../examples/plot_decoding_tutorial.py
+ :start-after: # For this, we can call inverse_transform on the NiftiMasker:
:end-before: ###########################################################################
Here we want to see the discriminating weights of some voxels.
@@ -189,11 +194,11 @@ Again the visualization code is simple. We can use an fMRI slice as a
background and plot the weights. Brighter points have a higher
discriminating weight.
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Visualize the discriminating weights over the mean EPI
+.. literalinclude:: ../../examples/plot_decoding_tutorial.py
+ :start-after: # We can plot the weights, using the subject's anatomical as a background
:end-before: ###########################################################################
-.. figure:: ../auto_examples/images/sphx_glr_plot_haxby_simple_001.png
- :target: ../auto_examples/plot_haxby_simple.html
+.. figure:: ../auto_examples/images/sphx_glr_plot_decoding_tutorial_002.png
+ :target: ../auto_examples/plot_decoding_tutorial.html
:align: center
:scale: 50%
diff --git a/doc/building_blocks/neurovault.rst b/doc/building_blocks/neurovault.rst
new file mode 100644
index 0000000000..719b89430b
--- /dev/null
+++ b/doc/building_blocks/neurovault.rst
@@ -0,0 +1,220 @@
+.. _neurovault:
+
+===========================================================
+Downloading statistical maps from the Neurovault repository
+===========================================================
+
+Neurovault is a public repository of unthresholded statistical maps,
+parcellations, and atlases of the human brain. You can read about it
+and browse the images it contains at http://www.neurovault.org. You
+can download maps from Neurovault with Nilearn.
+
+Neurovault was introduced in [1]_.
+
+Neurovault contains collections of images. We can get information
+about each collection - such as who uploaded it, a link to a paper, a
+description - and about each image - the modality, number of subjects,
+some tags, and more. The nilearn downloaders will fetch this metadata
+and the images themselves.
+
+Nilearn provides two functions to download statistical maps from
+Neurovault.
+
+Specific images or collections
+------------------------------
+
+In the simplest case, you already know the "id" of the collections or
+images you want. Maybe you liked a paper and went to
+http://www.neurovault.org looking for the data. Once on the relevant
+collection's webpage, you can click 'Details' to see its id
+(and more). You can then download it using
+:func:`nilearn.datasets.fetch_neurovault_ids` :
+
+ >>> from nilearn.datasets import fetch_neurovault_ids
+ >>> brainpedia = fetch_neurovault_ids(collection_ids=[1952]) # doctest: +SKIP
+
+Or if you want some images in particular, rather than whole
+collections :
+
+ >>> brainpedia_subset = fetch_neurovault_ids(image_ids=[32015, 32016]) # doctest: +SKIP
+
+Selection filters
+-----------------
+
+You may not know which collections or images you want. For example,
+you may be conducting a meta-analysis and want to grab all the images
+that are related to "language". Using
+:func:`nilearn.datasets.fetch_neurovault`, you can fetch all the images and
+collections that match your criteria - you don't need to know their
+ids.
+
+The filters are applied to images' and collections' metadata.
+
+You can describe filters with dictionaries. Each collection's
+metadata is compared to the parameter ``collection_terms``. Collections
+for which ``collection_metadata['key'] == value`` is not ``True`` for
+every key, value pair in ``collection_terms`` will be discarded. We use
+``image_terms`` in the same way to filter images.
+
+For example, many images on Neurovault have a "modality" field in their
+metadata. BOLD images should have it set to "fMRI-BOLD". We can ask for BOLD
+images only :
+
+ >>> bold = fetch_neurovault(image_terms={'modality': 'fMRI-BOLD'}, # doctest: +SKIP
+ ... max_images=7) # doctest: +SKIP
+
+Here we set the max_images parameter to 7, so that you can try this snippet
+without waiting for a long time. To get all the images which match your
+filters, you should set max_images to ``None``, which means "get as many
+images as possible". The default for max_images is 100.
+
+The default values for the ``collection_terms`` and ``image_terms`` parameters
+filter out empty collections, and exclude an image if one of the following is
+true:
+
+ - it is not in MNI space.
+ - its metadata field "is_valid" is cleared.
+ - it is thresholded.
+ - its map type is one of "ROI/mask", "anatomical", or "parcellation".
+ - its image type is "atlas"
+
+Extra keyword arguments are treated as additional image filters, so if we want
+to keep the default filters, and add the requirement that the modality should
+be "fMRI-BOLD", we can write:
+
+ >>> bold = fetch_neurovault(modality='fMRI-BOLD', max_images=7) # doctest: +SKIP
+
+
+Sometimes the selection criteria are more complex than a simple
+comparison to a single value. For example, we may also be interested
+in CBF and CBV images. In ``nilearn``, the ``dataset.neurovault`` module
+provides ``IsIn`` which makes this easy :
+
+ >>> from nilearn.datasets import neurovault
+ >>> fmri = fetch_neurovault( # doctest: +SKIP
+ ... modality=neurovault.IsIn('fMRI-BOLD', 'fMRI-CBF', 'fMRI-CBV'), # doctest: +SKIP
+ ... max_images=100) # doctest: +SKIP
+
+We could also have used ``Contains`` :
+
+ >>> fmri = fetch_neurovault( # doctest: +SKIP
+ ... modality=neurovault.Contains('fMRI'), # doctest: +SKIP
+ ... max_images=7) # doctest: +SKIP
+
+If we need regular expressions, we can also use ``Pattern`` :
+
+ >>> fmri = fetch_neurovault( # doctest: +SKIP
+ ... modality=neurovault.Pattern('fmri(-.*)?', neurovault.re.IGNORECASE), # doctest: +SKIP
+ ... max_images=7) # doctest: +SKIP
+
+The complete list of such special values available in
+``nilearn.datasets.neurovault`` is:
+``IsNull``, ``NotNull``, ``NotEqual``, ``GreaterOrEqual``,
+``GreaterThan``, ``LessOrEqual``, ``LessThan``, ``IsIn``, ``NotIn``,
+``Contains``, ``NotContains``, ``Pattern``.
+
+You can also use ``ResultFilter`` to easily express boolean logic
+(AND, OR, XOR, NOT).
+
+
+**If you need more complex filters**, and using dictionaries as shown above is
+not convenient, you can express filters as functions. The parameter
+``collection_filter`` should be a callable, which will be called once for each
+collection. The sole argument will be a dictionary containing the metadata for
+the collection. The filter should return ``True`` if the collection is to be
+kept, and ``False`` if it is to be discarded. ``image_filter`` does the same
+job for images. The default values for these parameters don't filter out
+anything.
+Using a filter rather than a dictionary, the first example becomes:
+
+ >>> bold = fetch_neurovault( # doctest: +SKIP
+ ... image_filter=lambda meta: meta.get('modality') == 'fMRI-BOLD', # doctest: +SKIP
+ ... image_terms={}, max_images=7) # doctest: +SKIP
+
+.. note::
+
+ Even if you specify a filter as a function, the default filters for
+ ``image_terms`` and ``collection_terms`` still apply; pass an empty
+ dictionary if you want to disable them. Without ``image_terms={}`` in the
+ call above, parcellations, images not in MNI space, etc. would be still be
+ filtered out.
+
+
+The example above can be rewritten using dictionaries, but in some cases you
+will need to use ``image_filter`` or ``collection_filter``. For example,
+suppose that for some weird reason you only want images that don't have too
+many metadata fields - say, an image should only be kept if its metadata has
+less than 50 fields. This cannot be done by simply comparing each key in a
+metadata dictionary to a required value, so we need to write our own filter:
+
+ >>> small_meta_images = fetch_neurovault(image_filter=lambda meta: len(meta) < 50, # doctest: +SKIP
+ ... max_images=7) # doctest: +SKIP
+
+
+Output
+------
+
+Both functions return a dict-like object which exposes its items as
+attributes.
+
+It contains:
+
+ - ``images``, the paths to downloaded files.
+ - ``images_meta``, the metadata for the images in a list of
+ dictionaries.
+ - ``collections_meta``, the metadata for the collections.
+ - ``description``, a short description of the Neurovault dataset.
+
+Note to ``pandas`` users: passing ``images_meta`` or ``collections_meta``
+to the ``DataFrame`` constructor yields the expected result, with
+images (or collections) as rows and metadata fields as columns.
+
+Neurosynth annotations
+----------------------
+
+It is also possible to ask Neurosynth to annotate the maps found on
+Neurovault. Neurosynth is a platform for large-scale, automated
+synthesis of fMRI data. It can be used to perform decoding. You can
+learn more about Neurosynth at http://www.neurosynth.org.
+
+Neurosynth was introduced in [2]_.
+
+If you set the parameter ``fetch_neurosynth_words`` when calling
+``fetch_neurovault`` or ``fetch_neurovault_ids``, we will also
+download the annotations for the resulting images. They will be stored
+as json files on your disk. The result will also contain (unless you
+clear the ``vectorize_words`` parameter to save computation time):
+
+ - ``vocabulary``, a list of words
+ - ``word_frequencies``, the weight of the words returned by
+ neurosynth.org for each image, such that the weight of word
+ ``vocabulary[j]`` for the image found in ``images[i]`` is
+ ``word_frequencies[i, j]``
+
+Examples using Neurovault
+-------------------------
+
+ - :ref:`sphx_glr_auto_examples_05_advanced_plot_ica_neurovault.py`
+ Download images from Neurovault and extract some networks
+ using ICA.
+
+ - :ref:`sphx_glr_auto_examples_05_advanced_plot_neurovault_meta_analysis.py`
+ Meta-analysis of "Stop minus go" studies available on
+ Neurovault.
+
+References
+----------
+
+.. [1] Gorgolewski KJ, Varoquaux G, Rivera G, Schwartz Y, Ghosh SS,
+ Maumet C, Sochat VV, Nichols TE, Poldrack RA, Poline J-B,
+ Yarkoni T and Margulies DS (2015) NeuroVault.org: a web-based
+ repository for collecting and sharing unthresholded
+ statistical maps of the human brain. Front. Neuroinform. 9:8.
+ doi: 10.3389/fninf.2015.00008
+
+.. [2] Yarkoni, Tal, Russell A. Poldrack, Thomas E. Nichols, David
+ C. Van Essen, and Tor D. Wager. "Large-scale automated synthesis
+ of human functional neuroimaging data." Nature methods 8, no. 8
+ (2011): 665-670.
+
+
diff --git a/doc/conf.py b/doc/conf.py
index 87278cc30b..f95d6f2abf 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -15,6 +15,18 @@
import sys
import os
import shutil
+import sphinx
+from distutils.version import LooseVersion
+
+# jquery is included in plotting package data because it is needed for
+# interactive plots. It is also needed by the documentation, so we copy
+# it to the themes/nilearn/static folder.
+shutil.copy(
+ os.path.join(os.path.dirname(os.path.dirname(__file__)),
+ 'nilearn', 'plotting', 'data', 'js', 'jquery.min.js'),
+ os.path.join(os.path.dirname(__file__), 'themes', 'nilearn', 'static',
+ 'jquery.js'))
+
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
@@ -26,20 +38,17 @@
# We also add the directory just above to enable local imports of nilearn
sys.path.insert(0, os.path.abspath('..'))
-try:
- shutil.copy('../AUTHORS.rst', '.')
-except IOError:
- # When nose scans this file, it is not in the right working
- # directory, and thus the line above fails
- pass
-
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
- 'sphinx.ext.pngmath', 'sphinx.ext.intersphinx',
- 'numpy_ext.numpydoc',
+extensions = ['sphinx.ext.autodoc',
+ 'sphinx.ext.autosummary',
+ ('sphinx.ext.imgmath' # only available for sphinx >= 1.4
+ if sphinx.version_info[:2] >= (1, 4)
+ else 'sphinx.ext.pngmath'),
+ 'sphinx.ext.intersphinx',
+ 'numpydoc.numpydoc',
'sphinx_gallery.gen_gallery',
]
@@ -91,7 +100,10 @@
# List of documents that shouldn't be included in the build.
#unused_docs = []
-exclude_patterns = ['tune_toc.rst', ]
+exclude_patterns = ['tune_toc.rst',
+ 'includes/big_toc_css.rst',
+ 'includes/bigger_toc_css.rst',
+ ]
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
@@ -228,11 +240,6 @@
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
-latex_preamble = r"""
-\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
-\let\oldfootnote\footnote
-\def\footnote#1{\oldfootnote{\small #1}}
-"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
@@ -243,8 +250,24 @@
'printindex': '',
}
+if LooseVersion(sphinx.__version__) < LooseVersion('1.5'):
+ latex_preamble = r"""
+ \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
+ \let\oldfootnote\footnote
+ \def\footnote#1{\oldfootnote{\small #1}}
+ """
+else:
+ latex_elements['preamble'] = r"""
+ \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
+ \let\oldfootnote\footnote
+ \def\footnote#1{\oldfootnote{\small #1}}
+ """
+
+
# If false, no module index is generated.
-latex_use_modindex = False
+if LooseVersion(sphinx.__version__) < LooseVersion('1.5'):
+ latex_use_modindex = False
+
latex_domain_indices = False
# Show the page numbers in the references
@@ -255,7 +278,7 @@
trim_doctests_flags = True
-_python_doc_base = 'http://docs.python.org/2.7'
+_python_doc_base = 'http://docs.python.org/3.6'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
@@ -263,12 +286,12 @@
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://matplotlib.org/': None,
- 'http://scikit-learn.org/stable': None,
+ 'http://scikit-learn.org/0.18': None,
'http://nipy.org/nibabel': None,
+ 'http://pandas.pydata.org': None,
#'http://scikit-image.org/docs/0.8.0/': None,
#'http://docs.enthought.com/mayavi/mayavi/': None,
#'http://statsmodels.sourceforge.net/': None,
- #'http://pandas.pydata.org': None,
}
extlinks = {
@@ -278,15 +301,23 @@
sphinx_gallery_conf = {
'doc_module' : 'nilearn',
+ 'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url' : {
'nilearn': None,
'matplotlib': 'http://matplotlib.org',
- 'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
- 'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
+ 'numpy': 'http://docs.scipy.org/doc/numpy-1.11.0',
+ 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference',
'nibabel': 'http://nipy.org/nibabel',
- 'sklearn': 'http://scikit-learn.org/stable'}
+ 'sklearn': 'http://scikit-learn.org/0.18/',
+ 'pandas': 'http://pandas.pydata.org'}
}
+# Get rid of spurious warnings due to some interaction between
+# autosummary and numpydoc. See
+# https://github.com/phn/pytpm/issues/3#issuecomment-12133978 for more
+# details
+numpydoc_show_class_members = False
+
def touch_example_backreferences(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
@@ -299,6 +330,8 @@ def touch_example_backreferences(app, what, name, obj, options, lines):
# Add the 'copybutton' javascript, to hide/show the prompt in code
# examples
+
+
def setup(app):
app.add_javascript('copybutton.js')
app.connect('autodoc-process-docstring', touch_example_backreferences)
diff --git a/doc/connectivity/connectome_extraction.rst b/doc/connectivity/connectome_extraction.rst
index 1c79848eee..1c523ab4b2 100644
--- a/doc/connectivity/connectome_extraction.rst
+++ b/doc/connectivity/connectome_extraction.rst
@@ -33,7 +33,7 @@ covariance (or correlation) matrix for signals from different brain
regions. The same information can be represented as a weighted graph,
vertices being brain regions, weights on edges being covariances
(gaussian graphical model). However, coefficients in a covariance matrix
-reflects direct as well as indirect connections. Covariance matrices form
+reflect direct as well as indirect connections. Covariance matrices form
very dense brain connectomes, and it is rather difficult to extract from
them only the direct connections between two regions.
@@ -68,19 +68,19 @@ of the estimator::
>>> estimator.precision_ # doctest: +SKIP
-.. |covariance| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_001.png
- :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html
+.. |covariance| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_001.png
+ :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html
:scale: 40
-.. |precision| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_003.png
- :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html
+.. |precision| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_003.png
+ :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html
:scale: 40
-.. |covariance_graph| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png
- :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html
+.. |covariance_graph| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png
+ :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html
:scale: 55
-.. |precision_graph| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png
- :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html
+.. |precision_graph| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png
+ :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html
:scale: 55
.. centered:: |covariance| |precision|
@@ -99,7 +99,7 @@ of the estimator::
.. topic:: **Full example**
See the following example for a full file running the analysis:
- :ref:`sphx_glr_auto_examples_connectivity_plot_inverse_covariance_connectome.py`
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_inverse_covariance_connectome.py`
.. topic:: **Exercise: computing sparse inverse covariance**
:class: green
@@ -144,7 +144,7 @@ One specific case where this may be interesting is for group analysis
across multiple subjects. Indeed, one challenge when doing statistics on
the coefficients of a connectivity matrix is that the number of
coefficients to compare grows quickly with the number of regions, and as
-a result correcting for multiple comparisions takes a heavy toll on
+a result correcting for multiple comparisons takes a heavy toll on
statistical power.
In such a situation, you can use the :class:`GroupSparseCovariance` and
@@ -157,7 +157,7 @@ group analysis only on the non zero coefficients.
.. topic:: **Full example**
See the following example for a full file running the analysis:
- :ref:`sphx_glr_auto_examples_connectivity_plot_multi_subject_connectome.py`
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py`
.. topic:: **Exercise: computing the correlation matrix of rest fmri**
@@ -196,8 +196,8 @@ Finally, we use the
The results are the following:
-.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_simulated_connectome_001.png
- :target: ../auto_examples/connectivity/plot_simulated_connectome.html
+.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_simulated_connectome_001.png
+ :target: ../auto_examples/03_connectivity/plot_simulated_connectome.html
:scale: 60
The group-sparse estimation outputs matrices with
@@ -211,7 +211,7 @@ information.
.. topic:: **Full Example**
The complete source code for this example can be found here:
- :ref:`sphx_glr_auto_examples_connectivity_plot_simulated_connectome.py`
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_simulated_connectome.py`
.. [#] A lot of technical details on the algorithm used for group-sparse
@@ -250,7 +250,7 @@ Deviations from this mean in the tangent space are provided in the connectivitie
.. topic:: **Full example**
See the following example for a full file running the analysis:
- :ref:`sphx_glr_auto_examples_connectivity_plot_connectivity_measures.py`
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_group_level_connectivity.py`
.. topic:: **Exercise: computing connectivity in tangent space**
:class: green
diff --git a/doc/connectivity/functional_connectomes.rst b/doc/connectivity/functional_connectomes.rst
index d31714f8ae..e816d1a081 100644
--- a/doc/connectivity/functional_connectomes.rst
+++ b/doc/connectivity/functional_connectomes.rst
@@ -1,8 +1,8 @@
.. _functional_connectomes:
-===============================================================
+========================================================
Extracting times series to build a functional connectome
-===============================================================
+========================================================
.. topic:: **Page summary**
@@ -17,55 +17,56 @@ Extracting times series to build a functional connectome
.. topic:: **References**
- * `Varoquaux and Craddock, Learning and comparing functional
- connectomes across subjects, NeuroImage 2013
- `_
+ * `Varoquaux and Craddock, "Learning and comparing functional
+ connectomes across subjects", NeuroImage 2013
+ `_.
.. _parcellation_time_series:
Time-series from a brain parcellation or "MaxProb" atlas
-===========================================================
+========================================================
Brain parcellations
---------------------
+-------------------
.. currentmodule:: nilearn.datasets
Regions used to extract the signal can be defined by a "hard"
parcellation. For instance, the :mod:`nilearn.datasets` has functions to
-download atlases forming reference parcellation, eg
+download atlases forming reference parcellation, e.g.,
:func:`fetch_atlas_craddock_2012`, :func:`fetch_atlas_harvard_oxford`,
:func:`fetch_atlas_yeo_2011`.
-For instance to retrieve the Harvard-Oxford cortical parcelation, sampled
-at 2mm, and with a threshold of a probability of .25::
+For instance to retrieve the Harvard-Oxford cortical parcellation, sampled
+at 2mm, and with a threshold of a probability of 0.25::
from nilearn import datasets
dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')
- atlas_filename, labels = dataset.maps, dataset.labels
+ atlas_filename = dataset.maps
+ labels = dataset.labels
Plotting can then be done as::
from nilearn import plotting
plotting.plot_roi(atlas_filename)
-.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_atlas_001.png
- :target: ../auto_examples/manipulating_visualizing/plot_atlas.html
+.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_atlas_001.png
+ :target: ../auto_examples/01_plotting/plot_atlas.html
:scale: 60
.. seealso::
- * The :ref:`plotting documentation `
+ * The :ref:`plotting documentation `;
- * The :ref:`dataset downloaders `
+ * The :ref:`dataset downloaders `.
Extracting signals on a parcellation
-----------------------------------------
+------------------------------------
.. currentmodule:: nilearn.input_data
To extract signal on the parcellation, the easiest option is to use the
-:class:`nilearn.input_data.NiftiLabelsMasker`. As any ''maskers'' in
+:class:`nilearn.input_data.NiftiLabelsMasker`. As any "maskers" in
nilearn, it is a processing object that is created by specifying all
the important parameters, but not the data::
@@ -86,17 +87,17 @@ obtain time series that capture well the functional interactions between
regions, regressing out noise sources is indeed very important
`[Varoquaux & Craddock 2013] `_.
-.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_signal_extraction_001.png
- :target: ../auto_examples/connectivity/plot_signal_extraction.html
+.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_signal_extraction_001.png
+ :target: ../auto_examples/03_connectivity/plot_signal_extraction.html
:scale: 40
-.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_signal_extraction_002.png
- :target: ../auto_examples/connectivity/plot_signal_extraction.html
+.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_signal_extraction_002.png
+ :target: ../auto_examples/03_connectivity/plot_signal_extraction.html
:scale: 40
.. topic:: **Full example**
See the following example for a full file running the analysis:
- :ref:`sphx_glr_auto_examples_connectivity_plot_signal_extraction.py`
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_signal_extraction.py`.
.. topic:: **Exercise: computing the correlation matrix of rest fmri**
@@ -104,46 +105,48 @@ regions, regressing out noise sources is indeed very important
Try using the information above to compute the correlation matrix of
the first subject of the ADHD dataset downloaded with
- :func:`nilearn.datasets.fetch_adhd`
+ :func:`nilearn.datasets.fetch_adhd`.
**Hints:**
* Inspect the '.keys()' of the object returned by
- :func:`nilearn.datasets.fetch_adhd`
+ :func:`nilearn.datasets.fetch_adhd`.
- * :func:`numpy.corrcoef` can be used to compute a correlation matrix
- (check the shape of your matrices)
+ * :class:`nilearn.connectome.ConnectivityMeasure` can be used to compute
+ a correlation matrix (check the shape of your matrices).
- * :func:`matplotlib.pyplot.imshow` can show a correlation matrix
+ * :func:`matplotlib.pyplot.imshow` can show a correlation matrix.
- * The example above has the solution
+ * The example above has the solution.
|
Time-series from a probabilistic atlas
-========================================
+======================================
Probabilistic atlases
-----------------------
+---------------------
The definition of regions as by a continuous probability map captures
better our imperfect knowledge of boundaries in brain images (notably
because of inter-subject registration errors). One example of such an
atlas well suited to resting-state data analysis is the `MSDL atlas
-`_ (:func:`nilearn.datasets.fetch_atlas_msdl`).
+`_
+(:func:`nilearn.datasets.fetch_atlas_msdl`).
Probabilistic atlases are represented as a set of continuous maps, in a
4D nifti image. Visualization the atlas thus requires to visualize each
of these maps, which requires accessing them with
-:func:`nilearn.image.index_img` (see the :ref:`corresponding example `).
+:func:`nilearn.image.index_img` (see the :ref:`corresponding example
+`).
-.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_overlay_001.png
- :target: ../auto_examples/manipulating_visualizing/plot_overlay.html
+.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_overlay_001.png
+ :target: ../auto_examples/01_plotting/plot_overlay.html
:scale: 60
Extracting signals from a probabilistic atlas
-----------------------------------------------
+---------------------------------------------
.. currentmodule:: nilearn.input_data
@@ -164,33 +167,33 @@ The procedure is the same as with `brain parcellations
`_ but using the :class:`NiftiMapsMasker`, and
the same considerations on using confounds regressors apply.
-.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_001.png
- :target: ../auto_examples/connectivity/plot_probabilistic_atlas_extraction.html
+.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_001.png
+ :target: ../auto_examples/03_connectivity/plot_probabilistic_atlas_extraction.html
:scale: 30
.. topic:: **Full example**
A full example of extracting signals on a probabilistic:
- :ref:`sphx_glr_auto_examples_connectivity_plot_probabilistic_atlas_extraction.py`
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py`.
-.. topic:: **Exercise: correlation matrix of rest fmri on probabilistic atlas**
+.. topic:: **Exercise: correlation matrix of rest fMRI on probabilistic atlas**
:class: green
Try to compute the correlation matrix of the first subject of the ADHD
dataset downloaded with :func:`nilearn.datasets.fetch_adhd`
with the MSDL atlas downloaded via
- :func:`nilearn.datasets.fetch_atlas_msdl`
+ :func:`nilearn.datasets.fetch_atlas_msdl`.
- **Hint:** The example above has the solution
+ **Hint:** The example above has the solution.
A functional connectome: a graph of interactions
-====================================================
+================================================
A square matrix, such as a correlation matrix, can also be seen as a
-`"graph" `_: a set
+`"graph" `_: a set
of "nodes", connected by "edges". When these nodes are brain regions, and
the edges capture interactions between them, this graph is a "functional
connectome".
@@ -200,7 +203,10 @@ function that take the matrix, and coordinates of the nodes in MNI space.
In the case of the MSDL atlas
(:func:`nilearn.datasets.fetch_atlas_msdl`), the CSV file readily comes
with MNI coordinates for each region (see for instance example:
-:ref:`sphx_glr_auto_examples_connectivity_plot_probabilistic_atlas_extraction.py`).
+:ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py`).
+
+.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_002.png
+ :target: ../auto_examples/03_connectivity/plot_probabilistic_atlas_extraction.html
..
For doctesting
@@ -208,31 +214,35 @@ with MNI coordinates for each region (see for instance example:
>>> from nilearn import datasets
>>> atlas_filename = datasets.fetch_atlas_msdl().maps # doctest: +SKIP
-For another atlas this information can be computed for each region with
-the :func:`nilearn.plotting.find_xyz_cut_coords` function
-(see example:
-:ref:`sphx_glr_auto_examples_connectivity_plot_multi_subject_connectome.py`)::
+As you can see, the correlation matrix gives a very "full" graph: every
+node is connected to every other one. This is because it also captures
+indirect connections. In the next section we will see how to focus on
+direct connections only.
- >>> from nilearn import image, plotting
- >>> atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in image.iter_img(atlas_filename)] # doctest: +SKIP
+A functional connectome: extracting coordinates of regions
+==========================================================
+For atlases without readily available label coordinates, center coordinates
+can be computed for each region on hard parcellation or probabilistic atlases.
+ * For hard parcellation atlases (eg. :func:`nilearn.datasets.fetch_atlas_destrieux_2009`),
+ use the :func:`nilearn.plotting.find_parcellation_cut_coords`
+ function. See example:
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_atlas_comparison.py`
+ * For probabilistic atlases (eg. :func:`nilearn.datasets.fetch_atlas_msdl`), use the
+ :func:`nilearn.plotting.find_probabilistic_atlas_cut_coords` function.
+ See example: :ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py`::
-.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_002.png
- :target: ../auto_examples/connectivity/plot_probabilistic_atlas_extraction.html
+ >>> from nilearn import plotting
+ >>> atlas_region_coords = plotting.find_probabilistic_atlas_cut_coords(atlas_filename) # doctest: +SKIP
-As you can see, the correlation matrix gives a very "full" graph: every
-node is connected to every other one. This is because is also captures
-indirect connections. In the next section we will see how to focus on
-only direct connections.
|
.. topic:: **References**
- * `Zalesky NeuroImage 2012 "On the use of correlation as a measure of
- network connectivity" `_
+ * `Zalesky et al., NeuroImage 2012, "On the use of correlation as a measure of
+ network connectivity" `_.
- * `Varoquaux NeuroImage 2013, Learning and comparing functional
- connectomes across subjects,
- `_
+ * `Varoquaux et al., NeuroImage 2013, "Learning and comparing functional
+ connectomes across subjects" `_.
diff --git a/doc/connectivity/index.rst b/doc/connectivity/index.rst
index 71f2a99cf9..ce88855ea0 100644
--- a/doc/connectivity/index.rst
+++ b/doc/connectivity/index.rst
@@ -22,6 +22,6 @@ and networks, via resting-state networks or parcellations.
functional_connectomes.rst
connectome_extraction.rst
resting_state_networks.rst
- parcellating.rst
region_extraction.rst
+ parcellating.rst
diff --git a/doc/connectivity/parcellating.rst b/doc/connectivity/parcellating.rst
index cdcd732481..406b4dc2ee 100644
--- a/doc/connectivity/parcellating.rst
+++ b/doc/connectivity/parcellating.rst
@@ -1,51 +1,71 @@
.. _parcellating_brain:
-==================================
-Parcellating the brain in regions
-==================================
+==============================================
+Clustering to parcellate the brain in regions
+==============================================
-.. topic:: **Page summary**
+This page discusses how clustering can be used to parcellate the brain
+into homogeneous regions from functional imaging data.
- This page demonstrates how clustering can be used to parcellate the
- brain into homogeneous regions from resting-state time series.
+|
+.. topic:: **Reference**
-A resting-state dataset
-========================
+ A big-picture reference on the use of clustering for brain
+ parcellations.
+
+ Thirion, et al. `"Which fMRI clustering gives good brain
+ parcellations?."
+ `_
+ Frontiers in neuroscience 8.167 (2014): 13.
+
+Data loading: Resting-state data
+=================================
.. currentmodule:: nilearn.datasets
-Here, we use a `resting-state `_
-dataset from test-retest study performed at NYU. Details on the data
-can be found in the documentation for the downloading function
-:func:`fetch_nyu_rest`.
+Clustering is commonly applied to resting-state data, but any brain
+functional data will give rise of a functional parcellation, capturing
+intrinsic brain architecture in the case of resting-state data.
+In the examples, we use rest data downloaded with the function
+:func:`fetch_adhd` (see :ref:`loading_data`).
+
+Applying clustering
+====================
-Preprocessing: loading and masking
-==================================
+.. topic:: **Which clustering to use**
-We fetch the data from Internet and load it with a dedicated function
-(see :ref:`loading_data`):
+ The question of which clustering method to use is in itself subject
+ to debate. There are many clustering methods; their computational
+ cost will vary, as well as their results. A `well-cited empirical
+ comparison paper, Thirion et al. 2014
+ `_
+ suggests that:
-.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py
- :start-after: ### Load nyu_rest dataset #####################################################
- :end-before: ### Ward ######################################################################
+ * For a large number of clusters, it is preferable to use Ward
+ agglomerative clustering with spatial constraints
-No mask is given with the data so we let the masker compute one.
-The result is a niimg from which we extract a numpy array that is
-used to mask our original images.
+ * For a small number of clusters, it is preferable to use Kmeans
+ clustering after spatially-smoothing the data.
-Applying Ward clustering
-==========================
+ Both clustering algorithms (as well as many others) are provided by
+ this object :class:`nilearn.regions.Parcellations` and full
+ code example in
+ :ref:`here`.
+ Ward clustering is the easiest to use, as it can be done with the Feature
+ agglomeration object. It is also quite fast. We detail it below.
+
+|
**Compute a connectivity matrix**
Before applying Ward's method, we compute a spatial neighborhood matrix,
aka connectivity matrix. This is useful to constrain clusters to form
contiguous parcels (see `the scikit-learn documentation
-`_)
+`_)
+
+This is done from the mask computed by the masker: a niimg from which we
+extract a numpy array and then the connectivity matrix.
-.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py
- :start-after: # Compute connectivity matrix: which voxel is connected to which
- :end-before: # Computing the ward for the first time, this is long...
**Ward clustering principle**
Ward's algorithm is a hierarchical clustering algorithm: it
@@ -62,81 +82,63 @@ the *memory* parameter is used to cache the computed component tree. You
can give it either a *joblib.Memory* instance or the name of a directory
used for caching.
-Running the Ward algorithm
----------------------------
-Here we simply launch Ward's algorithm to find 1000 clusters and we time it.
+.. note::
-.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py
- :start-after: # Computing the ward for the first time, this is long...
- :end-before: # Compute the ward with more clusters, should be faster
+ The Ward clustering computing 1000 parcels runs typically in about 10
+ seconds. Admitedly, this is very fast.
-This runs in about 10 seconds (depending on your computer configuration). Now,
-we are not satisfied of the result and we want to cluster the picture in 2000
-elements.
+.. note::
-.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py
- :start-after: # Compute the ward with more clusters, should be faster
- :end-before: ### Show result ###############################################################
+ The steps detailed above such as computing connectivity matrix for
+ Ward, caching and clustering are all implemented within the
+ :class:`nilearn.regions.Parcellations` object.
-Now that the component tree has been computed, computation is must faster
-thanks to caching. You should have the result in less than 1 second.
+.. seealso::
-Post-Processing and visualizing the parcels
-============================================
+ * A function :func:`nilearn.regions.connected_label_regions` which can be useful to
+ break down connected components into regions. For instance, clusters defined using
+ KMeans whereas it is not necessary for Ward clustering due to its
+ spatial connectivity.
-Unmasking
----------
-After applying the ward, we must unmask the data. This can be done simply :
+Using and visualizing the resulting parcellation
+==================================================
-.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py
- :start-after: # Unmask data
- :end-before: # Display the labels
+.. currentmodule:: nilearn.input_data
-You can see that masked data is filled with -1 values. This is done for the
-sake of visualization. In fact, clusters are labeled from 0 to
-(n_clusters - 1). By putting every background value to -1, we assure that
-they will not mess with the visualization.
+Visualizing the parcellation
+-----------------------------
-Label visualization
---------------------
+The labels of the parcellation are found in the `labels_img_` attribute of
+the :class:`nilearn.regions.Parcellations` object after fitting it to the data
+using *ward.fit*. We directly use the result for visualization.
To visualize the clusters, we assign random colors to each cluster
for the labels visualization.
-.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py
- :start-after: ### Show result ###############################################################
- :end-before: # Display the original data
-
-
-.. figure:: ../auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_001.png
- :target: ../auto_examples/connectivity/plot_rest_clustering.html
+.. figure:: ../auto_examples/03_connectivity/images/sphx_glr_plot_rest_parcellations_001.png
+ :target: ../auto_examples/03_connectivity/plot_rest_parcellations.html
:align: center
:scale: 80
-Compressed picture
-------------------
+Compressed representation
+--------------------------
-By transforming a picture in a new one in which the value of each voxel
-is the mean value of the cluster it belongs to, we are creating a
-compressed version of the original picture. We can obtain this
-representation thanks to a two-step procedure :
+The clustering can be used to transform the data into a smaller
+representation, taking the average on each parcel:
- call *ward.transform* to obtain the mean value of each cluster (for each
scan)
- call *ward.inverse_transform* on the previous result to turn it back into
the masked picture shape
-.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py
- :start-after: # Display the original data
-
-.. |left_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_002.png
- :target: ../auto_examples/connectivity/plot_rest_clustering.html
+.. |left_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_rest_parcellations_002.png
+ :target: ../auto_examples/03_connectivity/plot_rest_parcellations.html
:width: 49%
-.. |right_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_003.png
- :target: ../auto_examples/connectivity/plot_rest_clustering.html
+.. |right_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_rest_parcellations_003.png
+ :target: ../auto_examples/03_connectivity/plot_rest_parcellations.html
:width: 49%
|left_img| |right_img|
@@ -144,3 +146,12 @@ representation thanks to a two-step procedure :
We can see that using only 2000 parcels, the original image is well
approximated.
+|
+
+.. topic:: **Example code**
+
+ All the steps discussed in this section can be seen implemented in
+ :ref:`a full code example
+ `.
+
+
diff --git a/doc/connectivity/region_extraction.rst b/doc/connectivity/region_extraction.rst
index 3b5ccf9881..1b51e04ee2 100644
--- a/doc/connectivity/region_extraction.rst
+++ b/doc/connectivity/region_extraction.rst
@@ -6,10 +6,10 @@ Region Extraction for better brain parcellations
.. topic:: **Page summary**
- This section shows how to use Region Extractor to extract each connected
- brain regions/components into a separate brain activation regions and also
+ This section shows how to use Region Extractor to extract brain connected
+ regions/components into a separate brain activation region and also
shows how to learn functional connectivity interactions between each
- separate regions.
+ separate region.
.. contents:: **Contents**
:local:
@@ -34,50 +34,50 @@ which is already preprocessed and publicly available at
datasets.
-.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py
- :start-after: # utilities
+.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py
+ :start-after: # We use nilearn's datasets downloading utilities
:end-before: ################################################################################
.. currentmodule:: nilearn.decomposition
-Data decomposition using Canonical ICA
-======================================
+Brain maps using Dictionary Learning
+====================================
-Here, we use :class:`CanICA`, a multi subject model to decompose previously
-fetched multi subjects datasets. We do this by setting the parameters in the
-object and calling fit on the functional filenames without necessarily
-converting each filename to Nifti1Image object.
+Here, we use object :class:`DictLearning`, a multi subject model to decompose multi
+subjects fMRI datasets into functionally defined maps. We do this by setting
+the parameters and calling the object fit on the filenames of datasets without
+necessarily converting each file to Nifti1Image object.
-.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py
- :start-after: # decomposition module
- :end-before: # Visualization
+.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py
+ :start-after: # object and fit the model to the functional datasets
+ :end-before: # Visualization of resting state networks
.. currentmodule:: nilearn.plotting
-Visualization of Canonical ICA maps
-===================================
+Visualization of Dictionary Learning maps
+=========================================
-Showing ICA maps stored in components_img using nilearn plotting utilities.
+Showing maps stored in components_img using nilearn plotting utilities.
Here, we use :func:`plot_prob_atlas` for easy visualization of 4D atlas maps
-onto the anatomical standard template. Each ICA map is displayed in different
+onto the anatomical standard template. Each map is displayed in different
color and colors are random and automatically picked.
-.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py
- :start-after: # Show ICA maps by using plotting utilities
+.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py
+ :start-after: # Show networks using plotting utilities
:end-before: ################################################################################
-.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_001.png
- :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html
+.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_001.png
+ :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html
:scale: 60
.. currentmodule:: nilearn.regions
-Region Extraction with CanICA maps
-==================================
+Region Extraction with Dictionary Learning maps
+===============================================
We use object :class:`RegionExtractor` for extracting brain connected regions
-from ICA maps into separated brain activation regions with automatic
+from dictionary maps into separated brain activation regions with automatic
thresholding strategy selected as thresholding_strategy='ratio_n_voxels'. We use
thresholding strategy to first get foreground information present in the maps and
then followed by robust region extraction on foreground information using
@@ -93,9 +93,9 @@ regions. We control the small spurious regions size by thresholding in voxel uni
to adapt well to the resolution of the image. Please see the documentation of
nilearn.regions.connected_regions for more details.
-.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py
- :start-after: # regions, both can be done by importing Region Extractor from regions module
- :end-before: # Visualization
+.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py
+ :start-after: # maps, less the threshold means that more intense non-voxels will be survived.
+ :end-before: # Visualization of region extraction results
.. currentmodule:: nilearn.plotting
@@ -107,12 +107,12 @@ for visualizing extracted regions on a standard template. Each extracted brain
region is assigned a color and as you can see that visual cortex area is extracted
quite nicely into each hemisphere.
-.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py
- :start-after: # Show region extraction results
+.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py
+ :start-after: # Visualization of region extraction results
:end-before: ################################################################################
-.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_002.png
- :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html
+.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_002.png
+ :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html
:scale: 60
.. currentmodule:: nilearn.connectome
@@ -133,9 +133,9 @@ shape=(176, 23) where 176 is the length of time series and 23 is the number of
extracted regions. Likewise, we have a total of 20 subject specific time series signals.
The third step, we compute the mean correlation across all subjects.
-.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py
+.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py
:start-after: # To estimate correlation matrices we import connectome utilities from nilearn
- :end-before: # Visualization
+ :end-before: #################################################################
.. currentmodule:: nilearn.plotting
@@ -148,16 +148,16 @@ automatically the coordinates required, for plotting connectome relations.
Left image is the correlations in a matrix form and right image is the
connectivity relations to brain regions plotted using :func:`plot_connectome`
-.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py
- :start-after: # Import image utilities in utilising to operate on 4th dimension
+.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py
+ :start-after: # Plot resulting connectomes
:end-before: ################################################################################
-.. |matrix| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_003.png
- :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html
+.. |matrix| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_003.png
+ :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html
:scale: 60
-.. |connectome| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_004.png
- :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html
+.. |connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_004.png
+ :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html
:scale: 60
.. centered:: |matrix| |connectome|
@@ -165,22 +165,22 @@ connectivity relations to brain regions plotted using :func:`plot_connectome`
Validating results
==================
-Showing only Default Mode Network (DMN) regions before and after region
-extraction by manually identifying the index of DMN in ICA decomposed maps.
+Showing only one specific network regions before and after region extraction.
-Left image displays the DMN regions without region extraction and right image
-displays the DMN regions after region extraction. Here, we can validate that
-the DMN regions are nicely separated displaying each extracted region in different color.
+Left image displays the regions of one specific resting network without region extraction
+and right image displays the regions split apart after region extraction. Here, we can
+validate that regions are nicely separated identified by each extracted region in different
+color.
-.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py
- :start-after: # First we plot DMN without region extraction, interested in only index=[3]
+.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py
+ :start-after: # First, we plot a network of index=4 without region extraction
-.. |dmn| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_005.png
- :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html
+.. |dmn| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_005.png
+ :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html
:scale: 50
-.. |dmn_reg| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_006.png
- :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html
+.. |dmn_reg| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_006.png
+ :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html
:scale: 50
.. centered:: |dmn| |dmn_reg|
@@ -188,4 +188,4 @@ the DMN regions are nicely separated displaying each extracted region in differe
.. seealso::
The full code can be found as an example:
- :ref:`sphx_glr_auto_examples_connectivity_plot_extract_regions_canica_maps.py`
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_extract_regions_dictlearning_maps.py`
diff --git a/doc/connectivity/resting_state_networks.rst b/doc/connectivity/resting_state_networks.rst
index ab40765733..b4c6ffb104 100644
--- a/doc/connectivity/resting_state_networks.rst
+++ b/doc/connectivity/resting_state_networks.rst
@@ -34,9 +34,9 @@ functions to fetch data from Internet and get the filenames (:ref:`more
on data loading `):
-.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py
+.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py
:start-after: # First we load the ADHD200 data
- :end-before: # Here we apply CanICA on the data
+ :end-before: ####################################################################
Applying CanICA
---------------
@@ -47,12 +47,19 @@ perform a multi-subject ICA decomposition following the CanICA model.
As with every object in nilearn, we give its parameters at construction,
and then fit it on the data.
-.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py
+.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py
:start-after: # Here we apply CanICA on the data
- :end-before: # To visualize we plot the outline of all components on one figure
+ :end-before: ####################################################################
-The components estimated are found as the `components_` attribute of the
-object.
+The components estimated are found as the `components_img_` attribute
+of the object. A 4D Nifti image.
+
+.. note::
+ The `components_img_` attribute is implemented from version 0.4.1 which
+ is easy for visualization without any additional step to unmask to image.
+ For users who have older versions, components image can be done by
+ unmasking attribute `components_`. See :ref:`section Inverse transform:
+ unmasking data `.
Visualizing the results
-----------------------
@@ -61,23 +68,23 @@ We can visualize the components as in the previous examples. The first plot
shows a map generated from all the components. Then we plot an axial cut for
each component separately.
-.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py
+.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py
:start-after: # To visualize we plot the outline of all components on one figure
- :end-before: # Finally, we plot the map for each ICA component separately
+ :end-before: ####################################################################
-.. figure:: ../auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_001.png
+.. figure:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_001.png
:align: center
- :target: ../auto_examples/connectivity/plot_canica_resting_state.html
+ :target: ../auto_examples/03_connectivity/plot_canica_resting_state.html
Finally, we can plot the map for different ICA components separately:
-.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py
+.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py
:start-after: # Finally, we plot the map for each ICA component separately
-.. |left_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_003.png
+.. |left_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_003.png
:width: 23%
-.. |right_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_004.png
+.. |right_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_004.png
:width: 23%
.. centered:: |left_img| |right_img|
@@ -85,7 +92,7 @@ Finally, we can plot the map for different ICA components separately:
.. seealso::
The full code can be found as an example:
- :ref:`sphx_glr_auto_examples_connectivity_plot_canica_resting_state.py`
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_canica_resting_state.py`
.. note::
@@ -106,47 +113,46 @@ good extracted maps.
.. topic:: **References**
- * Gael Varoquaux et al. `Multi-subject dictionary learning to segment an atlas of brain spontaneous activity `_,
- IPMI 2011, pp. 562-573, Lecture
- Notes in Computer Science
+ * Arthur Mensch et al. `Compressed online dictionary learning for fast resting-state fMRI decomposition `_,
+ ISBI 2016, Lecture Notes in Computer Science
Applying DictLearning
---------------------
-:class:'DictLearning' is a ready-to-use class with the same interface as CanICA.
+:class:`DictLearning` is a ready-to-use class with the same interface as CanICA.
Sparsity of output map is controlled by a parameter alpha: using a
larger alpha yields sparser maps.
-.. literalinclude:: ../../examples/connectivity/plot_compare_resting_state_decomposition.py
+.. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py
:start-after: # Dictionary learning
- :end-before: # CanICA
+ :end-before: ###############################################################################
We can fit both estimators to compare them
-.. literalinclude:: ../../examples/connectivity/plot_compare_resting_state_decomposition.py
+.. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py
:start-after: # Fit both estimators
- :end-before: # Visualize the results
+ :end-before: ###############################################################################
Visualizing the results
-----------------------
4D plotting offers an efficient way to compare both resulting outputs
-.. literalinclude:: ../../examples/connectivity/plot_compare_resting_state_decomposition.py
+.. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py
:start-after: # Visualize the results
-.. |left_img_decomp| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_001.png
- :target: ../auto_examples/plot_compare_resting_state_decomposition.html
+.. |left_img_decomp| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_001.png
+ :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html
:width: 50%
-.. |right_img_decomp| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_003.png
- :target: ../auto_examples/plot_compare_resting_state_decomposition.html
+.. |right_img_decomp| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_003.png
+ :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html
:width: 50%
-.. |left_img_decomp_single| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_002.png
- :target: ../auto_examples/plot_compare_resting_state_decomposition.html
+.. |left_img_decomp_single| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_002.png
+ :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html
:width: 50%
-.. |right_img_decomp_single| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_004.png
- :target: ../auto_examples/plot_compare_resting_state_decomposition.html
+.. |right_img_decomp_single| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_004.png
+ :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html
:width: 50%
@@ -163,4 +169,4 @@ classification tasks.
.. seealso::
The full code can be found as an example:
- :ref:`sphx_glr_auto_examples_connectivity_plot_compare_resting_state_decomposition.py`
+ :ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_resting_state_decomposition.py`
diff --git a/doc/contributing.rst b/doc/contributing.rst
new file mode 100644
index 0000000000..e582053ea0
--- /dev/null
+++ b/doc/contributing.rst
@@ -0,0 +1 @@
+.. include:: ../CONTRIBUTING.rst
diff --git a/doc/decoding/decoding_intro.rst b/doc/decoding/decoding_intro.rst
new file mode 100644
index 0000000000..4b92f5ff76
--- /dev/null
+++ b/doc/decoding/decoding_intro.rst
@@ -0,0 +1,528 @@
+.. for doctests to run, we need to define variables that are define in
+ the literal includes
+ >>> import numpy as np
+ >>> from sklearn import datasets
+ >>> iris = datasets.load_iris()
+ >>> fmri_masked = iris.data
+ >>> target = iris.target
+ >>> session = np.ones_like(target)
+ >>> n_samples = len(target)
+
+.. Remove doctest: +SKIP at LDA while dropping support for sklearn older than
+ versions 0.17
+
+.. _decoding_intro:
+
+=============================
+An introduction to decoding
+=============================
+
+This section gives an introduction to the main concept of decoding:
+predicting from brain images.
+
+The discussion and examples are articulated on the analysis of the Haxby
+2001 dataset, showing how to predict from fMRI images the stimuli that
+the subject is viewing. However the process is the same in other settings
+predicting from other brain imaging modalities, for instance predicting
+phenotype or diagnostic status from VBM (Voxel Based Morphometry) maps
+(as illustrated in :ref:`a more complex example
+`), or from FA maps
+to capture diffusion mapping.
+
+
+.. contents:: **Contents**
+ :local:
+ :depth: 1
+
+
+Loading and preparing the data
+===============================
+
+The Haxby 2001 experiment
+-------------------------
+
+In the Haxby experiment,
+subjects were presented visual stimuli from different categories. We are
+going to predict which category the subject is seeing from the fMRI
+activity recorded in masks of the ventral stream. Significant prediction
+shows that the signal in the region contains information on the
+corresponding category.
+
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_stimuli_004.png
+ :target: ../auto_examples/02_decoding/plot_haxby_stimuli.html
+ :scale: 30
+ :align: left
+
+ Face stimuli
+
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_stimuli_002.png
+ :target: ../auto_examples/02_decoding/plot_haxby_stimuli.html
+ :scale: 30
+ :align: left
+
+ Cat stimuli
+
+.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_haxby_masks_001.png
+ :target: ../auto_examples/01_plotting/plot_haxby_masks.html
+ :scale: 30
+ :align: left
+
+ Masks
+
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_full_analysis_001.png
+ :target: ../auto_examples/02_decoding/plot_haxby_full_analysis.html
+ :scale: 35
+ :align: left
+
+ Decoding scores per mask
+
+_____
+
+.. topic:: **fMRI: using beta maps of a first-level analysis**
+
+ The Haxby experiment is unusual because the experimental paradigm is
+ made of many blocks of continuous stimulation. Most cognitive
+ experiments have a more complex temporal structure with rich sequences
+ of events.
+
+ The standard approach to decoding consists in fitting a first-level
+ GLM to retrieve one response map (a beta map) per trial. This is
+ sometimes known as "beta-series regressions" (see Mumford et al,
+ *Deconvolving bold activation in event-related designs for multivoxel
+ pattern classification analyses*, NeuroImage 2012). These maps can
+ then be input to the decoder as below, predicting the conditions
+ associated to trial.
+
+ For simplicity, we will work on the raw time-series of the data.
+ However, **it is strongly recomended that you fit a first level to
+ include an HRF model and isolate the responses from various
+ confounds**.
+
+
+Loading the data into nilearn
+-----------------------------
+
+.. topic:: **Full code example**
+
+ The documentation here just gives the big idea. A full code example,
+ with explanation, can be found on
+ :ref:`sphx_glr_auto_examples_plot_decoding_tutorial.py`
+
+* **Starting an environment**: Launch IPython via "ipython --matplotlib"
+ in a terminal, or use the Jupyter notebook.
+
+* **Retrieving the data**: In the tutorial, we load the data using nilearn
+ data downloading function, :func:`nilearn.datasets.fetch_haxby`.
+ However, all this function does is to download the data and return
+ paths to the files downloaded on the disk. To input your own data to
+ nilearn, you can pass in the path to your own files
+ (:ref:`more on data input `).
+
+* **Loading the behavioral labels**: Behavioral information is often stored
+ in a text file such as a CSV, and must be load with
+ **numpy.recfromcsv** or `pandas `_
+
+* **Extracting the fMRI data**: we then use the
+ :class:`nilearn.input_data.NiftiMasker`: we extract only the voxels on
+ the mask of the ventral temporal cortex that comes with the data,
+ applying the `mask_vt` mask to
+ the 4D fMRI data. The resulting data is then a matrix with a shape that is
+ (n_timepoints, n_voxels)
+ (see :ref:`mask_4d_2_3d` for a discussion on using masks).
+
+* **Sample mask**: Masking some of the time points may be useful to
+ restrict to a specific pair of conditions (*eg* cats versus faces).
+
+.. note::
+
+ Seemingly minor data preparation can matter a lot on the final score,
+ for instance standardizing the data.
+
+
+.. seealso::
+
+ * :ref:`loading_data`
+ * :ref:`masking`
+
+
+
+Performing a simple decoding analysis
+=======================================
+
+The prediction engine
+---------------------
+
+An estimator object
+...................
+
+To perform decoding we need to use an estimator from the `scikit-learn
+` machine-learning library. This object can
+predict a condition label **y** given a set **X** of imaging data.
+
+A simple and yet performant choice is the `Support Vector Classifier
+`_ (or SVC) with a
+linear kernel. The corresponding class, :class:`sklearn.svm.SVC`, needs
+to be imported from the scikit-learn.
+
+Note that the documentation of the object details all parameters. In
+IPython, it can be displayed as follows::
+
+ In [10]: svc?
+ Type: SVC
+ Base Class:
+ String Form:
+ SVC(kernel=linear, C=1.0, probability=False, degree=3, coef0=0.0, tol=0.001,
+ cache_size=200, shrinking=True, gamma=0.0)
+ Namespace: Interactive
+ Docstring:
+ C-Support Vector Classification.
+ Parameters
+ ----------
+ C : float, optional (default=1.0)
+ penalty parameter C of the error term.
+ ...
+
+.. seealso::
+
+ the `scikit-learn documentation on SVMs
+ `_
+
+
+Applying it to data: fit (train) and predict (test)
+...................................................
+
+The prediction objects have two important methods:
+
+- a `fit` function that "learns" the parameters of the model from the data.
+ Thus, we need to give some training data to `fit`.
+- a `predict` function that "predicts" a target from new data.
+ Here, we just have to give the new set of images (as the target should be
+ unknown):
+
+.. warning::
+
+ **Do not predict on data used by the fit: this would yield misleadingly optimistic scores.**
+
+.. for doctests (smoke testing):
+ >>> from sklearn.svm import SVC
+ >>> svc = SVC()
+
+Measuring prediction performance
+--------------------------------
+
+Cross-validation
+................
+
+We cannot measure a prediction error on the same set of data that we have
+used to fit the estimator: it would be much easier than on new data, and
+the result would be meaningless. We need to use a technique called
+*cross-validation* to split the data into different sets, called "folds",
+in a `K-Fold strategy
+`_.
+
+.. for doctests:
+ >>> cv = 2
+
+There is a specific function,
+:func:`sklearn.model_selection.cross_val_score` that computes for you
+the score for the different folds of cross-validation::
+
+ >>> from sklearn.model_selection import cross_val_score # doctest: +SKIP
+ >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=5) # doctest: +SKIP
+
+`cv=5` stipulates a 5-fold cross-validation. Note that this function is located
+in `sklearn.model_selection.cross_val_score` in the newest version of
+scikit-learn.
+
+You can speed up the computation by using n_jobs=-1, which will spread
+the computation equally across all processors (but might not work under
+Windows)::
+
+ >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=5, n_jobs=-1, verbose=10) #doctest: +SKIP
+
+**Prediction accuracy**: We can take a look at the results of the
+`cross_val_score` function::
+
+ >>> print(cv_scores) # doctest: +SKIP
+ [0.72727272727272729, 0.46511627906976744, 0.72093023255813948, 0.58139534883720934, 0.7441860465116279]
+
+This is simply the prediction score for each fold, i.e. the fraction of
+correct predictions on the left-out data.
+
+Choosing a good cross-validation strategy
+.........................................
+
+There are many cross-validation strategies possible, including K-Fold or
+leave-one-out. When choosing a strategy, keep in mind that:
+
+* The test set should be as litte correlated as possible with the train
+ set
+* The test set needs to have enough samples to enable a good measure of
+ the prediction error (a rule of thumb is to use 10 to 20% of the data).
+
+In these regards, leave one out is often one of the worst options (see
+Varoquaux et al, *Assessing and tuning brain decoders: cross-validation,
+caveats, and guidelines*, Neuroimage 2017).
+
+Here, in the Haxby example, we are going to leave a session out, in order
+to have a test set independent from the train set. For this, we are going
+to use the session label, present in the behavioral data file, and
+:class:`sklearn.model_selection.LeaveOneGroupOut`.
+
+.. note::
+
+ Full code for the above can be found on
+ :ref:`sphx_glr_auto_examples_plot_decoding_tutorial.py`
+
+|
+
+.. topic:: **Exercise**
+ :class: green
+
+ Compute the mean prediction accuracy using `cv_scores`.
+
+.. topic:: Solution
+
+ >>> classification_accuracy = np.mean(cv_scores) # doctest: +SKIP
+ >>> classification_accuracy # doctest: +SKIP
+ 0.76851...
+
+For discriminating human faces from cats, we measure a total prediction
+accuracy of *77%* across the different sessions.
+
+Choice of the prediction accuracy measure
+.........................................
+
+The default metric used for measuring errors is the accuracy score, i.e.
+the number of total errors. It is not always a sensible metric,
+especially in the case of very imbalanced classes, as in such situations
+choosing the dominant class can achieve a low number of errors.
+
+Other metrics, such as the AUC (Area Under the Curve, for the ROC: the
+Receiver Operating Characteristic), can be used::
+
+ >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv, scoring='roc_auc') # doctest: +SKIP
+
+.. seealso::
+
+ the `list of scoring options
+ `_
+
+Measuring the chance level
+..........................
+
+**Dummy estimators**: The simplest way to measure prediction performance
+at chance, is to use a *"dummy"* classifier,
+:class:`sklearn.dummy.DummyClassifier` (purely random)::
+
+ >>> from sklearn.dummy import DummyClassifier
+ >>> null_cv_scores = cross_val_score(DummyClassifier(), fmri_masked, target, cv=cv) # doctest: +SKIP
+
+**Permutation testing**: A more controlled way, but slower, is to do
+permutation testing on the labels, with
+:func:`sklearn.model_selection.permutation_test_score`::
+
+ >>> from sklearn.model_selection import permutation_test_score
+ >>> null_cv_scores = permutation_test_score(svc, fmri_masked, target, cv=cv) # doctest: +SKIP
+
+|
+
+.. topic:: **Putting it all together**
+
+ The :ref:`ROI-based decoding example
+ ` does a decoding analysis per
+ mask, giving the f1-score of the prediction for each object.
+
+ It uses all the notions presented above, with ``for`` loop to iterate
+ over masks and categories and Python dictionaries to store the
+ scores.
+
+
+.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_haxby_masks_001.png
+ :target: ../auto_examples/01_plotting/plot_haxby_masks.html
+ :scale: 55
+ :align: left
+
+ Masks
+
+
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_full_analysis_001.png
+ :target: ../auto_examples/02_decoding/plot_haxby_full_analysis.html
+ :scale: 70
+ :align: left
+
+
+
+Visualizing the decoder's weights
+---------------------------------
+
+We can visualize the weights of the decoder:
+
+- we first inverse the masking operation, to retrieve a 3D brain volume
+ of the SVC's weights.
+- we then create a figure and plot as a background the first EPI image
+- finally we plot the SVC's weights after masking the zero values
+
+
+.. figure:: ../auto_examples/images/sphx_glr_plot_decoding_tutorial_002.png
+ :target: ../auto_examples/plot_decoding_tutorial.html
+ :scale: 65
+
+.. note::
+
+ Full code for the above can be found on
+ :ref:`sphx_glr_auto_examples_plot_decoding_tutorial.py`
+
+
+.. seealso::
+
+ * :ref:`plotting`
+
+
+Decoding without a mask: Anova-SVM
+==================================
+
+Dimension reduction with feature selection
+------------------------------------------
+
+If we do not start from a mask of the relevant regions, there is a very
+large number of voxels and not all are useful for
+face vs cat prediction. We thus add a `feature selection
+`_
+procedure. The idea is to select the `k` voxels most correlated to the
+task.
+
+For this, we need to import the :mod:`sklearn.feature_selection` module and use
+:func:`sklearn.feature_selection.f_classif`, a simple F-score
+based feature selection (a.k.a.
+`Anova `_),
+that we will put before the SVC in a `pipeline`
+(:class:`sklearn.pipeline.Pipeline`):
+
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py
+ :start-after: # Build the decoder
+ :end-before: # Visualize the results
+
+
+
+We can use our ``anova_svc`` object exactly as we were using our ``svc``
+object previously.
+
+Visualizing the results
+-----------------------
+
+To visualize the results, we need to:
+
+- first get the support vectors of the SVC and inverse the feature
+ selection mechanism
+- then, as before, inverse the masking process to retrieve the weights
+ and plot them.
+
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py
+ :start-after: # Visualize the results
+ :end-before: # Saving the results as a Nifti file may also be important
+
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_anova_svm_001.png
+ :target: ../auto_examples/02_decoding/plot_haxby_anova_svm.html
+ :scale: 65
+
+.. seealso::
+
+ * :ref:`plotting`
+
+
+.. topic:: **Final script**
+
+ The complete script to do an SVM-Anova analysis can be found as
+ :ref:`an example `.
+
+
+.. seealso::
+
+ * :ref:`space_net`
+ * :ref:`searchlight`
+
+
+Going further with scikit-learn
+===============================
+
+We have seen a very simple analysis with scikit-learn, but it may be
+interesting to explore the `wide variety of supervised learning
+algorithms in the scikit-learn
+`_.
+
+Changing the prediction engine
+------------------------------
+
+.. for doctest:
+ >>> from sklearn.feature_selection import SelectKBest, f_classif
+ >>> from sklearn.svm import LinearSVC
+ >>> feature_selection = SelectKBest(f_classif, k=4)
+
+
+We now see how one can easily change the prediction engine, if needed.
+We can try Fisher's `Linear Discriminant Analysis (LDA)
+`_
+
+Import the module::
+
+ >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # doctest: +SKIP
+
+Construct the new estimator object and use it in a pipeline::
+
+ >>> from sklearn.pipeline import Pipeline
+ >>> lda = LinearDiscriminantAnalysis() # doctest: +SKIP
+ >>> anova_lda = Pipeline([('anova', feature_selection), ('LDA', lda)]) # doctest: +SKIP
+
+.. note::
+ Import Linear Discriminant Analysis method in "sklearn.lda.LDA" if you are using
+ scikit-learn older than version 0.17.
+
+and recompute the cross-validation score::
+
+ >>> cv_scores = cross_val_score(anova_lda, fmri_masked, target, cv=cv, verbose=1) # doctest: +SKIP
+ >>> classification_accuracy = np.mean(cv_scores) # doctest: +SKIP
+ >>> n_conditions = len(set(target)) # number of target classes
+ >>> print("Classification accuracy: %.4f / Chance Level: %.4f" % \
+ ... (classification_accuracy, 1. / n_conditions)) # doctest: +SKIP
+ Classification accuracy: 0.7846 / Chance level: 0.5000
+
+
+Changing the feature selection
+------------------------------
+Let's start by defining a linear SVM as a first classifier::
+
+ >>> clf = LinearSVC()
+
+
+Let's say that you want a more sophisticated feature selection, for example a
+`Recursive Feature Elimination (RFE)
+`_
+
+Import the module::
+
+ >>> from sklearn.feature_selection import RFE
+
+Construct your new fancy selection::
+
+ >>> rfe = RFE(SVC(kernel='linear', C=1.), 50, step=0.25)
+
+and create a new pipeline, composing the two classifiers `rfe` and `clf`::
+
+ >>> rfe_svc = Pipeline([('rfe', rfe), ('svc', clf)])
+
+and recompute the cross-validation score::
+
+ >>> cv_scores = cross_val_score(rfe_svc, fmri_masked, target, cv=cv,
+ ... n_jobs=-1, verbose=1) # doctest: +SKIP
+
+But, be aware that this can take *A WHILE*...
+
+|
+
+.. seealso::
+
+ * The `scikit-learn documentation `_
+ has very detailed explanations on a large variety of estimators and
+ machine learning techniques. To become better at decoding, you need
+ to study it.
diff --git a/doc/decoding/decoding_simulated.rst b/doc/decoding/decoding_simulated.rst
deleted file mode 100644
index 3377824fcd..0000000000
--- a/doc/decoding/decoding_simulated.rst
+++ /dev/null
@@ -1,113 +0,0 @@
-.. _decoding_simulated:
-
-==========================
-Decoding on simulated data
-==========================
-
-.. topic:: Objectives
-
- 1. Understand linear estimators (SVM, elastic net, ridge)
- 2. Use the scikit-learn's linear models
-
-Simple NeuroImaging-like simulations
-=====================================
-
-We simulate data as in
-`Michel et al. 2012 `_ :
-a linear model with a random design matrix **X**:
-
-.. math::
-
- \mathbf{y} = \mathbf{X} \mathbf{w} + \mathbf{e}
-
-* **w**: the weights of the linear model correspond to the predictive
- brain regions. Here, in the simulations, they form a 3D image with 5, four
- of which in opposite corners and one in the middle.
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_001.png
- :target: auto_examples/decoding/plot_simulated_data.html
- :align: center
- :scale: 90
-
-* **X**: the design matrix corresponds to the observed fMRI data. Here
- we simulate random normal variables and smooth them as in Gaussian
- fields.
-
-* **e** is random normal noise.
-
-We provide a black-box function to create the data in the
-:ref:`example script `.
-
-
-Running various estimators
-===========================
-
-We can now run different estimators and look at their prediction score,
-as well as the feature maps that they recover. Namely, we will use
-
-* A support vector regression (`SVM
- `_)
-
-* An `elastic-net
- `_
-
-* A *Bayesian* ridge estimator, i.e. a ridge estimator that sets its
- parameter according to a metaprior
-
-* A ridge estimator that set its parameter by cross-validation
-
-Note that the `RidgeCV` and the `ElasticNetCV` have names ending in `CV`
-that stands for `cross-validation`: in the list of possible `alpha`
-values that they are given, they choose the best by cross-validation.
-
-As the estimators expose a fairly consistent API, we can all fit them in
-a for loop: they all have a `fit` method for fitting the data, a `score`
-method to retrieve the prediction score, and because they are all linear
-models, a `coef_` attribute that stores the coefficients **w** estimated
-(see the :ref:`code of the simulation
-`).
-
-.. note:: All parameters estimated from the data end with an underscore
-
-.. |estimator1| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_002.png
- :target: ../auto_examples/decoding/plot_simulated_data.html
- :scale: 60
-
-.. |estimator2| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_003.png
- :target: ../auto_examples/decoding/plot_simulated_data.html
- :scale: 60
-
-.. |estimator3| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_004.png
- :target: ../auto_examples/decoding/plot_simulated_data.html
- :scale: 60
-
-.. |estimator4| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_005.png
- :target: ../auto_examples/decoding/plot_simulated_data.html
- :scale: 60
-
-|estimator1| |estimator2| |estimator3| |estimator4|
-
-.. topic:: **Exercise**
- :class: green
-
- Use recursive feature elimination (RFE) with the SVM::
-
- >>> from sklearn.feature_selection import RFE
-
- Read the object's documentation to find out how to use RFE.
-
- **Performance tip**: increase the `step` parameter, or it will be very
- slow.
-
-
-.. topic:: **Source code to run the simulation**
-
- The full file to run the simulation can be found in
- :ref:`sphx_glr_auto_examples_decoding_plot_simulated_data.py`
-
-.. seealso::
-
- * :ref:`space_net`
- * :ref:`searchlight`
-
-
diff --git a/doc/decoding/decoding_tutorial.rst b/doc/decoding/decoding_tutorial.rst
deleted file mode 100644
index 72b434cb83..0000000000
--- a/doc/decoding/decoding_tutorial.rst
+++ /dev/null
@@ -1,510 +0,0 @@
-.. for doctests to run, we need to define variables that are define in
- the literal includes
- >>> import numpy as np
- >>> from sklearn import datasets
- >>> iris = datasets.load_iris()
- >>> fmri_masked = iris.data
- >>> target = iris.target
- >>> session = np.ones_like(target)
- >>> n_samples = len(target)
-
-.. _decoding_tutorial:
-
-=====================
-A decoding tutorial
-=====================
-
-This page is a decoding tutorial articulated on the analysis of the Haxby
-2001 dataset. It shows how to predict from brain activity images the
-stimuli that the subject is viewing.
-
-
-.. contents:: **Contents**
- :local:
- :depth: 1
-
-
-Data loading and preparation
-================================
-
-The Haxby 2001 experiment
--------------------------
-
-Subjects are presented visual stimuli from different categories. We are
-going to predict which category the subject is seeing from the fMRI
-activity recorded in masks of the ventral stream. Significant prediction
-shows that the signal in the region contains information on the
-corresponding category.
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_stimuli_004.png
- :target: ../auto_examples/decoding/plot_haxby_stimuli.html
- :scale: 30
- :align: left
-
- Face stimuli
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_stimuli_002.png
- :target: ../auto_examples/decoding/plot_haxby_stimuli.html
- :scale: 30
- :align: left
-
- Cat stimuli
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_haxby_masks_001.png
- :target: ../auto_examples/manipulating_visualizing/plot_haxby_masks.html
- :scale: 30
- :align: left
-
- Masks
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_full_analysis_001.png
- :target: ../auto_examples/decoding/plot_haxby_full_analysis.html
- :scale: 35
- :align: left
-
- Decoding scores per mask
-
-
-Loading the data into Python
------------------------------
-
-Launch IPython::
-
- ipython --matplotlib
-
-First, load the data using nilearn data downloading function,
-:func:`nilearn.datasets.fetch_haxby`:
-
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Retrieve and load the Haxby dataset
- :end-before: # Load the behavioral labels
-
-The ``haxby_dataset`` object has several entries that contain paths to the files
-downloaded on the disk::
-
- >>> print(haxby_dataset) # doctest: +SKIP
- {'anat': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/anat.nii.gz'],
- 'func': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/bold.nii.gz'],
- 'mask_face': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask8b_face_vt.nii.gz'],
- 'mask_face_little': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask8_face_vt.nii.gz'],
- 'mask_house': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask8b_house_vt.nii.gz'],
- 'mask_house_little': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask8_house_vt.nii.gz'],
- 'mask_vt': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/mask4_vt.nii.gz'],
- 'session_target': ['/home/varoquau/dev/nilearn/nilearn_data/haxby2001/subj1/labels.txt']}
-
-
-We load the behavioral labels from the corresponding text file and limit
-our analysis to the `face` and `cat` conditions:
-
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Load the behavioral labels
- :end-before: # Prepare the data: apply the mask
-
-.. currentmodule:: nilearn.input_data
-
-Then we prepare the fMRI data: we use the :class:`NiftiMasker` to apply the
-`mask_vt` mask to the 4D fMRI data, so that its shape becomes (n_samples,
-n_features) (see :ref:`mask_4d_2_3d` for a discussion on using masks).
-
-
-.. note::
-
- seemingly minor data preparation can matter a lot on the final score,
- for instance standardizing the data.
-
-
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Prepare the data: apply the mask
- :end-before: # The decoding
-
-.. seealso::
-
- * :ref:`loading_data`
- * :ref:`masking`
-
-
-
-Performing the decoding analysis
-====================================
-
-The prediction engine
------------------------
-
-An estimator object
-....................
-
-To perform decoding we construct an estimator, predicting a condition
-label **y** given a set **X** of images.
-
-We use here a simple `Support Vector Classification
-`_ (or SVC) with a
-linear kernel. We first import the correct module from scikit-learn and we
-define the classifier, :class:`sklearn.svm.SVC`:
-
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Here we use a Support Vector Classification, with a linear kernel
- :end-before: # And we run it
-
-
-The documentation of the object details all parameters. In IPython, it
-can be displayed as follows::
-
- In [10]: svc?
- Type: SVC
- Base Class:
- String Form:
- SVC(kernel=linear, C=1.0, probability=False, degree=3, coef0=0.0, eps=0.001,
- cache_size=100.0, shrinking=True, gamma=0.0)
- Namespace: Interactive
- Docstring:
- C-Support Vector Classification.
- Parameters
- ----------
- C : float, optional (default=1.0)
- penalty parameter C of the error term.
- ...
-
-.. seealso::
-
- the `scikit-learn documentation on SVMs
- `_
-
-
-Applying it to data: fit (train) and predict (test)
-....................................................
-
-In scikit-learn, the prediction objects have two important methods:
-
-- a *fit* function that "learns" the parameters of the model from the data.
- Thus, we need to give some training data to *fit*.
-- a *predict* function that "predicts" a target from new data.
- Here, we just have to give the new set of images (as the target should be
- unknown):
-
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # And we run it
- :end-before: # Compute prediction scores using cross-validation
-
-.. warning::
-
- **Do not predict on data used by the fit:** the prediction that we obtain here
- is to good to be true (see next paragraph). Here we are just doing a sanity
- check.
-
-.. for doctests (smoke testing):
- >>> from sklearn.svm import SVC
- >>> svc = SVC()
-
-Measuring prediction performance
----------------------------------
-
-Cross-validation
-.................
-
-However, the last analysis is *wrong*, as we have learned and tested on
-the same set of data. We need to use a cross-validation to split the data
-into different sets, called "folds", in a `K-Fold strategy
-`_.
-
-We use a cross-validation object,
-:class:`sklearn.cross_validation.KFold`, that simply generates the
-indices of the folds within a loop.
-
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Compute prediction scores using cross-validation
- :end-before: print(cv_scores)
-
-
-.. for doctests:
- >>> cv = 2
-
-There is a specific function,
-:func:`sklearn.cross_validation.cross_val_score` that computes for you
-the score for the different folds of cross-validation::
-
- >>> from sklearn.cross_validation import cross_val_score
- >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv) # doctest: +SKIP
-
-You can speed up the computation by using n_jobs=-1, which will spread
-the computation equally across all processors (but will probably not work
-under Windows)::
-
- >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv, n_jobs=-1, verbose=10) #doctest: +SKIP
-
-**Prediction accuracy**: We can take a look at the results of the
-*cross_val_score* function::
-
- >>> print(cv_scores) # doctest: +SKIP
- [0.72727272727272729, 0.46511627906976744, 0.72093023255813948, 0.58139534883720934, 0.7441860465116279]
-
-This is simply the prediction score for each fold, i.e. the fraction of
-correct predictions on the left-out data.
-
-Choosing a good cross-validation strategy
-.........................................
-
-There are many cross-validation strategies possible, including K-Fold or
-leave-one-out. When choosing a strategy, keep in mind that:
-
-* The test set should be as litte correlated as possible with the train
- set
-* The test set needs to have enough samples to enable a good measure of
- the prediction error (a rule of thumb is to use 10 to 20% of the data).
-
-In these regards, leave one out is often one of the worst options.
-
-Here, in the Haxby example, we are going to leave a session out, in order
-to have a test set independent from the train set. For this, we are going
-to use the session label, present in the behavioral data file, and
-:class:`sklearn.cross_validation.LeaveOneLabelOut`::
-
- >>> from sklearn.cross_validation import LeaveOneLabelOut
- >>> session_label = labels['chunks'] # doctest: +SKIP
- >>> # We need to remember to remove the rest conditions
- >>> session_label = session_label[condition_mask] # doctest: +SKIP
- >>> cv = LeaveOneLabelOut(labels=session_label) # doctest: +SKIP
- >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv) # doctest: +SKIP
- >>> print(cv_scores) # doctest: +SKIP
- [ 1. 0.61111111 0.94444444 0.88888889 0.88888889 0.94444444
- 0.72222222 0.94444444 0.5 0.72222222 0.5 0.55555556]
-
-.. topic:: **Exercise**
- :class: green
-
- Compute the mean prediction accuracy using *cv_scores*
-
-.. topic:: Solution
-
- >>> classification_accuracy = np.mean(cv_scores) # doctest: +SKIP
- >>> classification_accuracy # doctest: +SKIP
- 0.76851851851851849
-
-We have a total prediction accuracy of 77% across the different sessions.
-
-Choice of the prediction accuracy measure
-..........................................
-
-The default metric used for measuring errors is the accuracy score, i.e.
-the number of total errors. It is not always a sensible metric,
-especially in the case of very imbalanced classes, as in such situations
-choosing the dominant class can achieve a low number of errors.
-
-Other metrics, such as the f1-score, can be used::
-
- >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=cv, scoring='f1') # doctest: +SKIP
-
-.. seealso::
-
- the `list of scoring options
- `_
-
-Measuring the chance level
-...........................
-
-**Dummy estimators**: The simplest way to measure prediction performance
-at chance, is to use a dummy classifier,
-:class:`sklearn.dummy.DummyClassifier`::
-
- >>> from sklearn.dummy import DummyClassifier
- >>> null_cv_scores = cross_val_score(DummyClassifier(), fmri_masked, target, cv=cv) # doctest: +SKIP
-
-**Permutation testing**: A more controlled way, but slower, is to do
-permutation testing on the labels, with
-:func:`sklearn.cross_validation.permutation_test_score`::
-
- >>> from sklearn.cross_validation import permutation_test_score
- >>> null_cv_scores = permutation_test_score(svc, fmri_masked, target, cv=cv) # doctest: +SKIP
-
-|
-
-.. topic:: **Putting it all together**
-
- The :ref:`ROI-based decoding example
- ` does a decoding analysis per
- mask, giving the f1-score of the prediction for each object.
-
- It uses all the notions presented above, with ``for`` loop to iterate
- over masks and categories and Python dictionnaries to store the
- scores.
-
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_haxby_masks_001.png
- :target: ../auto_examples/manipulating_visualizing/plot_haxby_masks.html
- :scale: 55
- :align: left
-
- Masks
-
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_full_analysis_001.png
- :target: ../auto_examples/decoding/plot_haxby_full_analysis.html
- :scale: 70
- :align: left
-
-
-
-Visualizing the decoder's weights
----------------------------------
-
-We can visualize the weights of the decoder:
-
-- we first inverse the masking operation, to retrieve a 3D brain volume
- of the SVC's weights.
-- we then create a figure and plot as a background the first EPI image
-- finally we plot the SVC's weights after masking the zero values
-
-.. literalinclude:: ../../examples/plot_haxby_simple.py
- :start-after: # Retrieve the discriminating weights and save them
- :end-before: # Visualize the discriminating weights over the mean EPI
-
-.. figure:: ../auto_examples/images/sphx_glr_plot_haxby_simple_001.png
- :target: ../auto_examples/plot_haxby_simple.html
- :scale: 65
-
-
-.. seealso::
-
- * :ref:`plotting`
-
-
-Decoding without a mask: Anova-SVM
-===================================
-
-Dimension reduction with feature selection
--------------------------------------------
-
-If we do not start from a mask of the relevant regions, there is a very
-large number of voxels and not all are useful for
-face vs cat prediction. We thus add a `feature selection
-`_
-procedure. The idea is to select the `k` voxels most correlated to the
-task.
-
-For this, we need to import the :mod:`sklearn.feature_selection` module and use
-:func:`sklearn.feature_selection.f_classif`, a simple F-score
-based feature selection (a.k.a.
-`Anova `_),
-that we will put before the SVC in a `pipeline`
-(:class:`sklearn.pipeline.Pipeline`):
-
-.. literalinclude:: ../../examples/decoding/plot_haxby_anova_svm.py
- :start-after: # Build the decoder
- :end-before: # Visualize the results
-
-
-
-We can use our ``anova_svc`` object exactly as we were using our ``svc``
-object previously.
-
-Visualizing the results
--------------------------
-
-To visualize the results, we need to:
-
-- first get the support vectors of the SVC and inverse the feature
- selection mechanism
-- then, as before, inverse the masking process to retrieve the weights
- and plot them.
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_anova_svm_001.png
- :target: ../auto_examples/decoding/plot_haxby_anova_svm.html
- :align: right
- :scale: 65
-
-.. literalinclude:: ../../examples/decoding/plot_haxby_anova_svm.py
- :start-after: # Visualize the results
- :end-before: # Obtain prediction scores via cross validation
-
-.. seealso::
-
- * :ref:`plotting`
-
-
-.. topic:: **Final script**
-
- The complete script to do an SVM-Anova analysis can be found as
- :ref:`an example `.
-
-
-.. seealso::
-
- * :ref:`decoding_simulated`
- * :ref:`space_net`
- * :ref:`searchlight`
-
-
-Going further with scikit-learn
-===================================
-
-We have seen a very simple analysis with scikit-learn, but it may be
-interesting to explore the `wide variety of supervised learning
-algorithms in the scikit-learn
-`_.
-
-Changing the prediction engine
---------------------------------
-
-.. for doctest:
- >>> from sklearn.feature_selection import SelectKBest, f_classif
- >>> from sklearn.svm import LinearSVC
- >>> feature_selection = SelectKBest(f_classif, k=4)
- >>> clf = LinearSVC()
-
-We now see how one can easily change the prediction engine, if needed.
-We can try Fisher's `Linear Discriminant Analysis (LDA)
-`_
-
-Import the module::
-
- >>> from sklearn.lda import LDA
-
-Construct the new estimator object and use it in a pipeline::
-
- >>> from sklearn.pipeline import Pipeline
- >>> lda = LDA()
- >>> anova_lda = Pipeline([('anova', feature_selection), ('LDA', lda)])
-
-and recompute the cross-validation score::
-
- >>> cv_scores = cross_val_score(anova_lda, X, y, cv=cv, verbose=1) # doctest: +SKIP
- >>> classification_accuracy = np.mean(cv_scores) # doctest: +SKIP
- >>> print("Classification accuracy: %.4f / Chance Level: %.4f" % \
- ... (classification_accuracy, 1. / n_conditions)) # doctest: +SKIP
- Classification accuracy: 1.0000 / Chance level: 0.5000
-
-
-Changing the feature selection
-------------------------------
-
-Let's say that you want a more sophisticated feature selection, for example a
-`Recursive Feature Elimination (RFE)
-`_
-
-Import the module::
-
- >>> from sklearn.feature_selection import RFE
-
-Construct your new fancy selection::
-
- >>> rfe = RFE(SVC(kernel='linear', C=1.), 50, step=0.25)
-
-and create a new pipeline::
-
- >>> rfe_svc = Pipeline([('rfe', rfe), ('svc', clf)])
-
-and recompute the cross-validation score::
-
- >>> cv_scores = cross_val_score(rfe_svc, X, y, cv=cv, n_jobs=-1,
- ... verbose=1) # doctest: +SKIP
-
-But, be aware that this can take A WHILE...
-
-|
-
-.. seealso::
-
- * The `scikit-learn documentation `_
- has very detailed explanations on a large variety of estimators and
- machine learning techniques. To become better at decoding, you need
- to study it.
-
diff --git a/doc/decoding/estimator_choice.rst b/doc/decoding/estimator_choice.rst
index 23181b794b..78490a0f6b 100644
--- a/doc/decoding/estimator_choice.rst
+++ b/doc/decoding/estimator_choice.rst
@@ -10,7 +10,7 @@ It is slightly oriented towards a *decoding* application, that is the
prediction of external variables such as behavior or clinical traits from
brain images. For a didactic introduction to decoding with nilearn, see
the :ref:`dedicated section of the nilearn documentation
-`.
+`.
.. contents:: **Contents**
:local:
@@ -68,7 +68,7 @@ There are two noteworthy strategies:
The "One vs One" strategy is more computationally costly than the "One
vs All". The former scales as the square of the number of classes,
-whereas the former is linear with the number of classes.
+whereas the latter is linear with the number of classes.
.. seealso::
@@ -81,18 +81,18 @@ whereas the former is linear with the number of classes.
:func:`sklearn.metrics.confusion_matrix` is a useful tool to
understand the classifier's errors in a multiclass problem.
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_multiclass_001.png
- :target: ../auto_examples/decoding/plot_haxby_multiclass.html
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_multiclass_001.png
+ :target: ../auto_examples/02_decoding/plot_haxby_multiclass.html
:align: left
:scale: 60
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_multiclass_002.png
- :target: ../auto_examples/decoding/plot_haxby_multiclass.html
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_multiclass_002.png
+ :target: ../auto_examples/02_decoding/plot_haxby_multiclass.html
:align: left
:scale: 40
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_multiclass_003.png
- :target: ../auto_examples/decoding/plot_haxby_multiclass.html
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_multiclass_003.png
+ :target: ../auto_examples/02_decoding/plot_haxby_multiclass.html
:align: left
:scale: 40
@@ -109,13 +109,13 @@ will have bumps and peaks due to this noise. These will not generalize to
new data and chances are that the corresponding choice of parameter will
not perform as well on new data.
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_grid_search_001.png
- :target: ../auto_examples/decoding/plot_haxby_grid_search.html
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_grid_search_001.png
+ :target: ../auto_examples/02_decoding/plot_haxby_grid_search.html
:align: center
:scale: 60
With scikit-learn nested cross-validation is done via
-:class:`sklearn.grid_search.GridSearchCV`. It is unfortunately time
+:class:`sklearn.model_selection.GridSearchCV`. It is unfortunately time
consuming, but the ``n_jobs`` argument can spread the load on multiple
CPUs.
@@ -125,7 +125,7 @@ CPUs.
* `The scikit-learn documentation on parameter selection
`_
- * The example :ref:`sphx_glr_auto_examples_decoding_plot_haxby_grid_search.py`
+ * The example :ref:`sphx_glr_auto_examples_02_decoding_plot_haxby_grid_search.py`
Different linear models
=======================
@@ -163,8 +163,8 @@ Here we apply a few linear models to fMRI data:
in every situation.
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_001.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_001.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:align: center
:scale: 80
@@ -181,52 +181,50 @@ the other, although the prediction scores are fairly similar. In other
terms, a well-performing estimator in terms of prediction error gives us
little guarantee on the brain maps.
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_007.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
- :align: left
+.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_007.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:scale: 70
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_008.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
- :align: left
+.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_008.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:scale: 70
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_005.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
- :align: left
+.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_005.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:scale: 70
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_006.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
- :align: left
+.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_006.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:scale: 70
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_004.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
- :align: left
+.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_004.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:scale: 70
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_002.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
- :align: left
+.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_002.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:scale: 70
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_003.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
- :align: left
+.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_003.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:scale: 70
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_009.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
- :align: left
+.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_009.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:scale: 70
-
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_010.png
- :target: ../auto_examples/decoding/plot_haxby_different_estimators.html
- :align: left
+.. image:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_010.png
+ :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html
:scale: 70
+
.. seealso::
* :ref:`space_net`
+|
+
+.. topic:: **Decoding on simulated data**
+
+ Simple simulations may be useful to understand the behavior of a given
+ decoder on data. In particular, simulations enable us to set the true
+ weight maps and compare them to the ones retrieved by decoders. A full
+ example running simulations and discussing them can be found in
+ :ref:`sphx_glr_auto_examples_02_decoding_plot_simulated_data.py`
+ Simulated data cannot easily mimic all properties of brain data. An
+ important aspect, however, is its spatial structure, that we create in
+ the simulations.
+
+
diff --git a/doc/decoding/index.rst b/doc/decoding/index.rst
index d11838fb5a..7a3b371b22 100644
--- a/doc/decoding/index.rst
+++ b/doc/decoding/index.rst
@@ -21,9 +21,8 @@ predicting an output value.
.. toctree::
- decoding_tutorial.rst
+ decoding_intro.rst
estimator_choice.rst
- decoding_simulated.rst
space_net.rst
searchlight.rst
diff --git a/doc/decoding/searchlight.rst b/doc/decoding/searchlight.rst
index bc4267ab95..a5a8ddba97 100644
--- a/doc/decoding/searchlight.rst
+++ b/doc/decoding/searchlight.rst
@@ -25,7 +25,7 @@ Loading
Fetching the data from internet and loading it can be done with the
provided functions (see :ref:`loading_data`):
-.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py
:start-after: # Load Haxby dataset
:end-before: # Restrict to faces and houses
@@ -37,9 +37,9 @@ For this example we need:
- to put X in the form *n_samples* x *n_features*
- compute a mean image for visualization background
- limit our analysis to the `face` and `house` conditions
- (like in the :ref:`decoding tutorial `)
+ (like in the :ref:`introduction to decoding `)
-.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py
:start-after: # Restrict to faces and houses
:end-before: # Prepare masks
@@ -62,7 +62,7 @@ be used here :
back of the brain. *mask_img* will ensure that no value outside the brain is
taken into account when iterating with the sphere.
-.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py
:start-after: # brain to speed up computation)
:end-before: # Searchlight computation
@@ -99,7 +99,7 @@ validation method that does not take too much time.
*K*-Fold along with *K* = 4 is a
good compromise between running time and quality.
-.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py
:start-after: # set once and the others as learning sets
:end-before: import nilearn.decoding
@@ -109,11 +109,11 @@ Running Searchlight
Running :class:`SearchLight` is straightforward now that everything is set.
The only
parameter left is the radius of the ball that will run through the data.
-Kriegskorte et al. use a 4mm radius because it yielded the best detection
+Kriegskorte et al. use a 5.6mm radius because it yielded the best detection
performance in their simulation.
-.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py
- :start-after: import nilearn.decoding
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py
+ :start-after: cv = KFold(n_splits=4)
:end-before: # F-scores computation
Visualization
@@ -127,12 +127,12 @@ background. We can see here that voxels in the visual cortex contains
information to distinguish pictures showed to the volunteers, which was the
expected result.
-.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py
:start-after: # Visualization
:end-before: # F_score results
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_searchlight_001.png
- :target: ../auto_examples/decoding/plot_haxby_searchlight.html
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_searchlight_001.png
+ :target: ../auto_examples/02_decoding/plot_haxby_searchlight.html
:align: center
:scale: 80
@@ -149,11 +149,11 @@ parametric tests (F-tests ot t-tests).
Here we compute the *p-values* of the voxels [1]_.
To display the results, we use the negative log of the p-value.
-.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py
:start-after: # F_score results
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_searchlight_002.png
- :target: ../auto_examples/decoding/plot_haxby_searchlight.html
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_searchlight_002.png
+ :target: ../auto_examples/02_decoding/plot_haxby_searchlight.html
:align: center
:scale: 80
@@ -185,7 +185,7 @@ is its associated p-value. The
:func:`nilearn.mass_univariate.permuted_ols` function returns the
p-values computed with a permutation test.
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_haxby_mass_univariate.py
+.. literalinclude:: ../../examples/05_advanced/plot_haxby_mass_univariate.py
:start-after: # Perform massively univariate analysis with permuted OLS
:end-before: neg_log_pvals_unmasked
@@ -206,8 +206,8 @@ every voxel so that the F-statistics are comparable. This correction
strategy is applied in nilearn
:func:`nilearn.mass_univariate.permuted_ols` function.
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_haxby_mass_univariate_001.png
- :target: ../auto_examples/manipulating_visualizing/plot_haxby_searchlight.html
+.. figure:: ../auto_examples/05_advanced/images/sphx_glr_plot_haxby_mass_univariate_001.png
+ :target: ../auto_examples/05_advanced/plot_haxby_mass_univariate.html
:align: center
:scale: 60
@@ -227,7 +227,7 @@ on the original (non-permuted) data. Thus, we can perform two one-sided tests
(a given contrast and its opposite) for the price of one single run.
The example results can be interpreted as follows: viewing faces significantly
activates the Fusiform Face Area as compared to viewing houses, while viewing
-houses does not reveals significant supplementary activations as compared to
+houses does not reveal significant supplementary activations as compared to
viewing faces.
diff --git a/doc/decoding/space_net.rst b/doc/decoding/space_net.rst
index 5f6e7a3660..8c5aebd714 100644
--- a/doc/decoding/space_net.rst
+++ b/doc/decoding/space_net.rst
@@ -10,24 +10,26 @@
.. _space_net:
-=====================================
-Multivariate decoding with SpaceNet
-=====================================
+==========================================================
+SpaceNet: decoding with spatial structure for better maps
+==========================================================
The SpaceNet decoder
---------------------
-SpaceNet implements a suite of multi-variate priors which for improved
-brain decoding. It uses priors like TV (Total Variation) `[Michel et
-al. 2011] `_, TV-L1
-`[Baldassarre et al. 2012]
-`_,
-`[Gramfort et al. 2013] `_
-(option: penalty="tvl1"), and Graph-Net `[Hebiri et al. 2011]
-`_ (known
-as GraphNet in neuroimaging `[Grosenick et al. 2013]
-`_) (option:
-penalty="graph-net") to regularize classification and regression
-problems in brain imaging. The result are brain maps which are both
+=====================
+
+SpaceNet implements spatial penalties which improve brain decoding power as well as decoder maps:
+
+* penalty="tvl1": priors inspired from TV (Total Variation) `[Michel et
+ al. 2011] `_, TV-L1
+ `[Baldassarre et al. 2012]
+ `_,
+ `[Gramfort et al. 2013] `_ (option: ),
+
+* penalty="graph-net": GraphNet prior `[Grosenick et al. 2013]
+ `_)
+
+These regularize classification and regression
+problems in brain imaging. The results are brain maps which are both
sparse (i.e regression coefficients are zero everywhere, except at
predictive voxels) and structured (blobby). The superiority of TV-L1
over methods without structured priors like the Lasso, SVM, ANOVA,
@@ -35,18 +37,11 @@ Ridge, etc. for yielding more interpretable maps and improved
prediction scores is now well established `[Baldassarre et al. 2012]
`_,
`[Gramfort et al. 2013] `_,
-`[Grosenick et al. 2013] `_.
+`[Grosenick et al. 2013] `_.
-The following table summarizes the parameter(s) used to activate a
-given penalty:
-
-- TV-L1: `penalty="tv-l1"`
-- Graph-Net: `penalty="graph-net"` (this is the default prior in
- SpaceNet)
-
-Note that TV-L1 prior leads to a hard optimization problem, and so can
-be slow to run. Under the hood, a few heuristics are used to make
+Note that TV-L1 prior leads to a difficult optimization problem, and so
+can be slow to run. Under the hood, a few heuristics are used to make
things a bit faster. These include:
- Feature preprocessing, where an F-test is used to eliminate
@@ -55,7 +50,7 @@ things a bit faster. These include:
- Continuation is used along the regularization path, where the
solution of the optimization problem for a given value of the
regularization parameter `alpha` is used as initialization
- of for next the regularization (smaller) value on the regularization
+ for the next regularization (smaller) value on the regularization
grid.
**Implementation:** See `[Dohmatob et al. 2015 (PRNI)]
@@ -63,40 +58,44 @@ things a bit faster. These include:
et al. 2014 (PRNI)] `_ for
technical details regarding the implementation of SpaceNet.
-Mixed gambles
-.............
+Empirical comparisons
+=====================
+
+
+Comparison on mixed gambles study
+----------------------------------
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_mixed_gambles_space_net_001.png
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_mixed_gambles_space_net_001.png
:align: right
:scale: 60
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_mixed_gambles_space_net_002.png
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_mixed_gambles_space_net_002.png
:scale: 60
.. topic:: **Code**
The complete script can be found
- :ref:`here `.
+ :ref:`here `.
-Haxby
-.....
+Comparison on Haxby study
+--------------------------
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_space_net_001.png
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_space_net_001.png
:align: right
:scale: 60
-.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_space_net_002.png
+.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_space_net_002.png
:scale: 60
.. topic:: **Code**
The complete script can be found
- :ref:`here `.
+ :ref:`here `.
.. seealso::
- * :ref:`Age prediction on OASIS dataset with SpaceNet `.
+ * :ref:`Age prediction on OASIS dataset with SpaceNet `.
* The `scikit-learn documentation `_
has very detailed explanations on a large variety of estimators and
diff --git a/doc/images/papaya_stat_map_plot_screenshot.png b/doc/images/papaya_stat_map_plot_screenshot.png
new file mode 100644
index 0000000000..7950348745
Binary files /dev/null and b/doc/images/papaya_stat_map_plot_screenshot.png differ
diff --git a/doc/images/papaya_stat_map_plot_screenshot_notebook.png b/doc/images/papaya_stat_map_plot_screenshot_notebook.png
new file mode 100644
index 0000000000..b703dc597b
Binary files /dev/null and b/doc/images/papaya_stat_map_plot_screenshot_notebook.png differ
diff --git a/doc/images/plotly_connectome_plot.png b/doc/images/plotly_connectome_plot.png
new file mode 100644
index 0000000000..e56d9b47eb
Binary files /dev/null and b/doc/images/plotly_connectome_plot.png differ
diff --git a/doc/images/plotly_markers_plot.png b/doc/images/plotly_markers_plot.png
new file mode 100644
index 0000000000..be0e34d3cb
Binary files /dev/null and b/doc/images/plotly_markers_plot.png differ
diff --git a/doc/images/plotly_surface_atlas_plot.png b/doc/images/plotly_surface_atlas_plot.png
new file mode 100644
index 0000000000..44058d0f66
Binary files /dev/null and b/doc/images/plotly_surface_atlas_plot.png differ
diff --git a/doc/images/plotly_surface_plot.png b/doc/images/plotly_surface_plot.png
new file mode 100644
index 0000000000..3a9b357009
Binary files /dev/null and b/doc/images/plotly_surface_plot.png differ
diff --git a/doc/images/plotly_surface_plot_notebook_screenshot.png b/doc/images/plotly_surface_plot_notebook_screenshot.png
new file mode 100644
index 0000000000..38f72c8c3c
Binary files /dev/null and b/doc/images/plotly_surface_plot_notebook_screenshot.png differ
diff --git a/doc/index.rst b/doc/index.rst
index 69004cdfe0..4d5683b53e 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -15,29 +15,32 @@
.. Here we are building the carrousel
-.. |glass_brain| image:: auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_glass_brain_002.png
- :target: auto_examples/manipulating_visualizing/plot_demo_glass_brain.html
+.. |glass_brain| image:: auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_002.png
+ :target: auto_examples/01_plotting/plot_demo_glass_brain.html
-.. |connectome| image:: auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png
- :target: auto_examples/connectivity/plot_inverse_covariance_connectome.html
+.. |connectome| image:: auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png
+ :target: auto_examples/03_connectivity/plot_inverse_covariance_connectome.html
-.. |haxby_weights| image:: auto_examples/images/sphx_glr_plot_haxby_simple_001.png
- :target: auto_examples/plot_haxby_simple.html
+.. |surface_plot| image:: auto_examples/01_plotting/images/sphx_glr_plot_3d_map_to_surface_projection_001.png
+ :target: auto_examples/01_plotting/plot_3d_map_to_surface_projection.html
-.. |oasis_weights| image:: auto_examples/decoding/images/sphx_glr_plot_oasis_vbm_002.png
- :target: auto_examples/decoding/plot_oasis_vbm.html
+.. |haxby_weights| image:: auto_examples/images/sphx_glr_plot_decoding_tutorial_002.png
+ :target: auto_examples/plot_decoding_tutorial.html
-.. |rest_clustering| image:: auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_001.png
- :target: auto_examples/connectivity/plot_rest_clustering.html
+.. |oasis_weights| image:: auto_examples/02_decoding/images/sphx_glr_plot_oasis_vbm_002.png
+ :target: auto_examples/02_decoding/plot_oasis_vbm.html
-.. |canica| image:: auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_011.png
- :target: auto_examples/connectivity/plot_canica_resting_state.html
+.. |rest_parcellations| image:: auto_examples/03_connectivity/images/sphx_glr_plot_rest_parcellations_001.png
+ :target: auto_examples/03_connectivity/plot_rest_parcellations.html
-.. |tvl1_haxby| image:: auto_examples/decoding/images/sphx_glr_plot_haxby_space_net_002.png
- :target: auto_examples/decoding/plot_haxby_space_net.html
+.. |canica| image:: auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_011.png
+ :target: auto_examples/03_connectivity/plot_canica_resting_state.html
-.. |searchlight| image:: auto_examples/decoding/images/sphx_glr_plot_haxby_searchlight_001.png
- :target: auto_examples/decoding/plot_haxby_searchlight.html
+.. |tvl1_haxby| image:: auto_examples/02_decoding/images/sphx_glr_plot_haxby_space_net_002.png
+ :target: auto_examples/02_decoding/plot_haxby_space_net.html
+
+.. |searchlight| image:: auto_examples/02_decoding/images/sphx_glr_plot_haxby_searchlight_001.png
+ :target: auto_examples/02_decoding/plot_haxby_searchlight.html
.. raw:: html
@@ -58,29 +61,31 @@
* |glass_brain|
-* |haxby_weights|
+* |surface_plot|
* |oasis_weights|
* |connectome|
-* |rest_clustering|
+* |rest_parcellations|
* |canica|
* |tvl1_haxby|
+* |haxby_weights|
+
* |searchlight|
.. raw:: html
-
+
‹›
-
+
-
+
@@ -92,10 +97,10 @@
.. toctree::
:hidden:
- AUTHORS.rst
+ authors.rst
user_guide.rst
auto_examples/index.rst
whats_new.rst
+ contributing.rst
Nilearn is part of the `NiPy ecosystem `_.
-
diff --git a/doc/install_doc_component.html b/doc/install_doc_component.html
index 82f8ec85f5..a81b5869f5 100644
--- a/doc/install_doc_component.html
+++ b/doc/install_doc_component.html
@@ -43,16 +43,17 @@
Anaconda
-
We recommend that you install a complete scientific Python
- distribution like 64 bit Anaconda
- . Since it meets all the requirements of nilearn, it will save
+
We recommend that you install a complete
+ 64 bit scientific Python
+ distribution like Anaconda
+ . Since it meets all the requirements of nilearn, it will save
you time and trouble. You could also check PythonXY
+ href="http://python-xy.github.io/" target="_blank">PythonXY
as an alternative.
Nilearn requires a Python installation and the following
- dependencies: ipython, scikit-learn, matplotlib and nibabel
+ dependencies: ipython, scikit-learn, matplotlib and nibabel.
Second: open a Command Prompt
(Press "Win-R", type "cmd" and press "Enter". This will open
@@ -74,14 +75,15 @@
reference external" href="https://store.continuum.io/cshop/anaconda/"
target="_blank">Anaconda
Nilearn requires a Python installation and the following
- dependencies: ipython, scikit-learn, matplotlib and nibabel
+ dependencies: ipython, scikit-learn, matplotlib and nibabel.
Second: open a Terminal
(Navigate to /Applications/Utilities and double-click on
@@ -103,7 +105,7 @@
If you are using Ubuntu or Debian
and you have access to
- Neurodebian, then simply install the
+ Neurodebian, then simply install the
python-nilearn package through Neurodebian.
@@ -113,15 +115,16 @@
packages using the distribution package manager: ipython
, scikit-learn (sometimes called sklearn,
or python-sklearn), matplotlib (sometimes
- called python-matplotlib) and nibabel
- (sometimes called python-nibabel)
+ called python-matplotlib) and nibabel
+ (sometimes called python-nibabel).
If you do not have access to the package manager we recommend
- that you install a complete scientific Python distribution like 64 bit
+ that you install a complete 64 bit
+ scientific Python distribution like
- Anaconda. Since it meets all the requirements of nilearn,
- it will save you time and trouble..
+ Anaconda. Since it meets all the requirements of nilearn,
+ it will save you time and trouble.
Second: open a Terminal
(Press ctrl+alt+t and a Terminal console will pop up)
diff --git a/doc/introduction.rst b/doc/introduction.rst
index aa3a46b248..a9f354b468 100644
--- a/doc/introduction.rst
+++ b/doc/introduction.rst
@@ -1,3 +1,10 @@
+.. for doc tests to run with recent NumPy 1.14, we need to set print options
+ to older versions. See issue #1593 for more details
+ >>> import numpy as np
+ >>> from distutils.version import LooseVersion
+ >>> if LooseVersion(np.__version__) >= LooseVersion('1.14'):
+ ... np.set_printoptions(legacy='1.13')
+
=====================================
Introduction: nilearn in a nutshell
=====================================
@@ -22,9 +29,9 @@ What is nilearn: MVPA, decoding, predictive models, functional connectivity
:ref:`brain parcellations `,
:ref:`connectomes `.
- Nilearn can readily be used on :ref:`task fMRI `,
+ Nilearn can readily be used on :ref:`task fMRI `,
:ref:`resting-state `, or
- :ref:`VBM ` data.
+ :ref:`VBM ` data.
For a machine-learning expert, the value of nilearn can be seen as
domain-specific **feature engineering** construction, that is, shaping
@@ -201,11 +208,11 @@ the file name::
The filename could be given as "~/t_map000.nii' as nilearn expands "~" to
the home directory.
- :ref:`See more on file name matchings `.
+ :ref:`See more on file name matchings `.
-.. image:: auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_glass_brain_001.png
- :target: auto_examples/manipulating_visualizing/plot_demo_glass_brain.html
+.. image:: auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_001.png
+ :target: auto_examples/01_plotting/plot_demo_glass_brain.html
:align: center
:scale: 60
@@ -264,10 +271,20 @@ To loop over each individual volume of a 4D image, use :func:`image.iter_img`::
* To perform a for loop in Python, you can use the "range" function
* The solution can be found :ref:`here
- `
+ `
|
+
+.. topic:: **Warm up examples**
+
+ The two following examples may be useful to get familiar with data
+ representation in nilearn:
+
+ * :ref:`sphx_glr_auto_examples_plot_nilearn_101.py`
+
+ * :ref:`sphx_glr_auto_examples_plot_3d_and_4d_niimg.py`
+
____
Now, if you want out-of-the-box methods to process neuroimaging data, jump
@@ -450,4 +467,4 @@ Finding help
* For machine-learning and scikit-learn questions, expertise can be
found on the scikit-learn mailing list:
- https://lists.sourceforge.net/lists/listinfo/scikit-learn-general
+ https://mail.python.org/mailman/listinfo/scikit-learn
diff --git a/doc/logos/digi-saclay-logo-small.png b/doc/logos/digi-saclay-logo-small.png
new file mode 100644
index 0000000000..2190fc5a51
Binary files /dev/null and b/doc/logos/digi-saclay-logo-small.png differ
diff --git a/doc/logos/nilearn-logo-tagline.svg b/doc/logos/nilearn-logo-tagline.svg
new file mode 100644
index 0000000000..1bbe279035
--- /dev/null
+++ b/doc/logos/nilearn-logo-tagline.svg
@@ -0,0 +1,117 @@
+
+
+
+
\ No newline at end of file
diff --git a/doc/logos/nilearn_logo_tagline.png b/doc/logos/nilearn_logo_tagline.png
new file mode 100644
index 0000000000..c0c381cbd2
Binary files /dev/null and b/doc/logos/nilearn_logo_tagline.png differ
diff --git a/doc/manipulating_images/index.rst b/doc/manipulating_images/index.rst
new file mode 100644
index 0000000000..28870327fe
--- /dev/null
+++ b/doc/manipulating_images/index.rst
@@ -0,0 +1,22 @@
+.. include:: ../tune_toc.rst
+
+
+.. _image_manipulation:
+
+========================================
+Manipulation brain volumes with nilearn
+========================================
+
+In this section, we detail the general tools to manipulate of
+brain images with nilearn.
+
+|
+
+.. include:: ../includes/big_toc_css.rst
+
+
+.. toctree::
+
+ input_output.rst
+ manipulating_images.rst
+ masker_objects.rst
diff --git a/doc/manipulating_images/input_output.rst b/doc/manipulating_images/input_output.rst
new file mode 100644
index 0000000000..0254e0711d
--- /dev/null
+++ b/doc/manipulating_images/input_output.rst
@@ -0,0 +1,264 @@
+.. _extracting_data:
+
+=====================================================
+Input and output: neuroimaging data representation
+=====================================================
+
+.. contents:: **Contents**
+ :local:
+ :depth: 1
+
+|
+
+.. currentmodule:: nilearn.image
+
+.. _loading_data:
+
+Inputing data: file names or image objects
+===========================================
+
+File names and objects, 3D and 4D images
+-----------------------------------------
+
+All Nilearn functions accept file names as arguments::
+
+ >>> from nilearn import image
+ >>> smoothed_img = image.smooth_img('/home/user/t_map001.nii') # doctest: +SKIP
+
+Nilearn can operate on either file names or `NiftiImage objects
+`_. The later represent the
+data loaded in memory. In the example above, the
+function :func:`smooth_img` returns a Nifti1Image object, which can then
+be readily passed to other nilearn functions.
+
+In nilearn, we often use the term *"niimg"* as abbreviation that denotes
+either a file name or a `NiftiImage object
+`_.
+
+Niimgs can be 3D or 4D. A 4D niimg may for instance represent a time
+series of 3D images. It can be **a list of file names**, if these contain
+3D information::
+
+ >>> # dataset folder contains subject1.nii and subject2.nii
+ >>> from nilearn.image import smooth_img
+ >>> result_img = smooth_img(['dataset/subject1.nii', 'dataset/subject2.nii']) # doctest: +SKIP
+
+``result_img`` is a 4D in-memory image, containing the data of both
+subjects.
+
+
+.. _filename_matching:
+
+File name matching: "globbing" and user path expansion
+------------------------------------------------------
+
+You can specify files with *wildcard* matching patterns (as in Unix
+shell):
+
+ * **Matching multiple files**: suppose the dataset folder contains
+ subject_01.nii, subject_03.nii, and subject_03.nii;
+ ``dataset/subject_*.nii`` is a glob expression matching all filenames::
+
+ >>> # Example with a smoothing process:
+ >>> from nilearn.image import smooth_img
+ >>> result_img = smooth_img("dataset/subject_*.nii") # doctest: +SKIP
+
+ Note that the resulting is a 4D image.
+
+ * **Expanding the home directory** ``~`` is expanded to your home
+ directory::
+
+ >>> result_img = smooth_img("~/dataset/subject_01.nii") # doctest: +SKIP
+
+ Using ``~`` rather than specifying the details of the path is good
+ practice, as it will make it more likely that your script work on
+ different computers.
+
+
+.. topic:: **Python globbing**
+
+ For more complicated use cases, Python also provides functions to work
+ with file paths, in particular, :func:`glob.glob`.
+
+ .. warning::
+
+ Unlike nilearn's path expansion, the result of :func:`glob.glob` is
+ not sorted and, depending on the computer you are running, they
+ might not be in alphabetic order. We advise you to rely on
+ nilearn's path expansion.
+
+ To load data with globbing, we suggest that you use
+ :func:`nilearn.image.load_img`.
+
+
+.. currentmodule:: nilearn.datasets
+
+.. _datasets:
+
+Fetching open datasets from Internet
+=====================================
+
+Nilearn provides dataset fetching function that
+automatically downloads reference
+datasets and atlases. They can be imported from
+:mod:`nilearn.datasets`::
+
+ >>> from nilearn import datasets
+ >>> haxby_dataset = datasets.fetch_haxby() # doctest: +SKIP
+
+They return a data structure that contains different pieces of
+information on the retrieved dataset, including the
+file names on hard disk::
+
+ >>> # The different files
+ >>> print(sorted(list(haxby_dataset.keys()))) # doctest: +SKIP
+ ['anat', 'description', 'func', 'mask', 'mask_face', 'mask_face_little',
+ 'mask_house', 'mask_house_little', 'mask_vt', 'session_target']
+ >>> # Path to first functional file
+ >>> print(haxby_dataset.func[0]) # doctest: +SKIP
+ /.../nilearn_data/haxby2001/subj1/bold.nii.gz
+
+Explanation and further resources of the dataset at hand can be retrieved as
+follows::
+
+ >>> print(haxby_dataset.description) # doctest: +SKIP
+ Haxby 2001 results
+
+
+ Notes
+ -----
+ Results from a classical fMRI study that...
+
+|
+
+.. seealso::
+
+ For a list of all the data fetching functions in nilearn, see
+ :ref:`datasets_ref`.
+
+|
+
+.. topic:: **nilearn_data: Where is the downloaded data stored?**
+
+ The fetching functions download the reference datasets to the disk.
+ They save it locally for future use, in one of the
+ following directories (in order of priority, if present):
+
+ * the folder specified by `data_dir` parameter in the fetching function
+ * the global environment variable `NILEARN_SHARED_DATA`
+ * the user environment variable `NILEARN_DATA`
+ * the `nilearn_data` folder in the user home folder
+
+ The two different environment variables (NILEARN_SHARED_DATA and
+ NILEARN_DATA) are provided for multi-user systems, to distinguish a
+ global dataset repository that may be read-only at the user-level.
+ Note that you can copy that folder to another user's computers to
+ avoid the initial dataset download on the first fetching call.
+
+ You can check in which directory nilearn will store the data with the
+ function :func:`nilearn.datasets.get_data_dirs`.
+
+
+|
+
+Understanding neuroimaging data
+===============================
+
+Nifti and Analyze data
+-----------------------
+
+For volumetric data, nilearn works with data stored as in the Nifti
+structure (via the nibabel_ package).
+
+The `NifTi `_ data structure (also used in
+Analyze files) is the standard way of sharing data in neuroimaging
+research. Three main components are:
+
+:data:
+ raw scans in form of a numpy array: ``data = img.get_data()``
+:affine:
+ returns the transformation matrix that maps
+ from voxel indices of the numpy array to actual real-world
+ locations of the brain:
+ ``affine = img.affine``
+:header:
+ low-level informations about the data (slice duration, etc.):
+ ``header = img.header``
+
+If you need to load the data without using nilearn, read the nibabel_
+documentation.
+
+Note: For older versions of nibabel_, affine and header can be retrieved
+with ``get_affine()`` and ``get_header()``.
+
+
+.. topic:: **Dataset formatting: data shape**
+
+ It is important to appreciate two main representations for
+ storing and accessing more than one Nifti images, that is sets
+ of MRI scans:
+
+ - a big 4D matrix representing (3D MRI + 1D for time), stored in a single
+ Nifti file.
+ `FSL `_ users tend to
+ prefer this format.
+ - several 3D matrices representing each time point (single 3D volume) of the
+ session, stored in set of 3D Nifti or analyse files.
+ `SPM `_ users tend
+ to prefer this format.
+
+.. _niimg:
+
+Niimg-like objects
+-------------------
+
+Nilearn functions take as input argument what we call "Niimg-like
+objects":
+
+**Niimg:** A Niimg-like object can be one of the following:
+
+ * A string with a file path to a Nifti or Analyse image
+ * An ``SpatialImage`` from nibabel, ie an object exposing ``get_data()``
+ method and ``affine`` attribute, typically a ``Nifti1Image`` from nibabel_.
+
+**Niimg-4D:** Similarly, some functions require 4D Nifti-like
+data, which we call Niimgs or Niimg-4D. Accepted input arguments are:
+
+ * A path to a 4D Nifti image
+ * List of paths to 3D Nifti images
+ * 4D Nifti-like object
+ * List of 3D Nifti-like objects
+
+.. topic:: **Image affines**
+
+ If you provide a sequence of Nifti images, all of them must have the same
+ affine.
+
+Text files: phenotype or behavior
+----------------------------------
+
+Phenotypic or behavioral data are often provided as text or CSV
+(Comma Separated Values) file. They
+can be loaded with `pd.read_csv` but you may have to specify some options
+(typically `sep` if fields aren't delimited with a comma).
+
+For the Haxby datasets, we can load the categories of the images
+presented to the subject::
+
+ >>> from nilearn import datasets
+ >>> haxby_dataset = datasets.fetch_haxby() # doctest: +SKIP
+ >>> import pandas as pd # doctest: +SKIP
+ >>> labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ") # doctest: +SKIP
+ >>> stimuli = labels['labels'] # doctest: +SKIP
+ >>> print(stimuli.unique()) # doctest: +SKIP
+ ['bottle' 'cat' 'chair' 'face' 'house' 'rest' 'scissors' 'scrambledpix'
+ 'shoe']
+
+.. topic:: **Reading CSV with pandas**
+
+ `Pandas `_ is a powerful package to read
+ data from CSV files and manipulate them.
+
+|
+
+.. _nibabel: http://nipy.sourceforge.net/nibabel/
diff --git a/doc/manipulating_images/manipulating_images.rst b/doc/manipulating_images/manipulating_images.rst
new file mode 100644
index 0000000000..57993de8ad
--- /dev/null
+++ b/doc/manipulating_images/manipulating_images.rst
@@ -0,0 +1,274 @@
+.. _data_manipulation:
+
+=====================================================================
+Manipulating images: resampling, smoothing, masking, ROIs...
+=====================================================================
+
+This chapter discusses how nilearn can be used to do simple operations on
+brain images.
+
+
+.. contents:: **Chapters contents**
+ :local:
+ :depth: 1
+
+.. _preprocessing_functions:
+
+Functions for data preparation and image transformation
+=========================================================
+
+Nilearn comes with many simple functions for simple data preparation and
+transformation. Note that if you want to perform these operations while
+loading the data into a data matrix, most are also integrated in the
+:ref:`masker objects `.
+
+.. currentmodule:: nilearn
+
+
+* Computing the mean of images (along the time/4th dimension):
+ :func:`nilearn.image.mean_img`
+* Applying numpy functions on an image or a list of images:
+ :func:`nilearn.image.math_img`
+* Swapping voxels of both hemisphere (e.g., useful to homogenize masks
+ inter-hemispherically):
+ :func:`nilearn.image.swap_img_hemispheres`
+* Smoothing: :func:`nilearn.image.smooth_img`
+* Cleaning signals (e.g., linear detrending, standardization,
+ confound removal, low/high pass filtering):
+ :func:`nilearn.image.clean_img`
+
+ .. seealso::
+
+ To apply this cleaning on signal matrices rather than images:
+ :func:`nilearn.signal.clean`
+
+.. _resampling:
+
+Resampling images
+=================
+
+Resampling one image to match another one
+------------------------------------------
+
+:func:`nilearn.image.resample_to_img` resamples an image to a reference
+image.
+
+.. topic:: **Example**
+
+ * :ref:`sphx_glr_auto_examples_04_manipulating_images_plot_resample_to_template.py`
+
+.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_resample_to_template_001.png
+ :target: ../auto_examples/04_manipulating_images/plot_resample_to_template.html
+ :width: 45%
+.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_resample_to_template_002.png
+ :target: ../auto_examples/04_manipulating_images/plot_resample_to_template.html
+ :width: 45%
+
+This can be useful to display two images as overlays in some
+viewers (e.g., FSLView) that require all images to be on the same grid.
+
+Resampling to a specific target affine, shape, or resolution
+-------------------------------------------------------------
+
+:func:`nilearn.image.resample_img` specifies the resampling in terms of
+the `target_affine` to match the spatial configuration defined by the new
+affine.
+
+Additionally, a `target_shape` can be used to resize images
+(i.e., cropping or padding with zeros) to match an expected data
+image dimensions (shape composed of x, y, and z).
+
+Resampling can be useful to downsample images to increase processing
+speed and lower memory consumption.
+
+On an advanced note, automatic computation of offset and bounding box
+can be performed by specifying a 3x3 matrix instead of the 4x4 affine.
+In this case, nilearn computes automatically the translation part
+of the transformation matrix (i.e., affine).
+
+.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_affine_transformation_002.png
+ :target: ../auto_examples/04_manipulating_images/plot_affine_transformation.html
+ :width: 30%
+.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_affine_transformation_004.png
+ :target: ../auto_examples/04_manipulating_images/plot_affine_transformation.html
+ :width: 30%
+.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_affine_transformation_003.png
+ :target: ../auto_examples/04_manipulating_images/plot_affine_transformation.html
+ :width: 30%
+
+
+.. topic:: **Special case: resampling to a given voxel size**
+
+ Specifying a 3x3 matrix that is diagonal as a target_affine fixes the
+ voxel size. For instance to resample to 3x3x3 mm voxels::
+
+ >>> import numpy as np
+ >>> target_affine = np.diag((3, 3, 3))
+
+.. seealso::
+
+ :ref:`An example illustrating affine transforms on data and bounding boxes `
+
+Accessing individual volumes in 4D images
+===========================================
+
+* :func:`nilearn.image.index_img`: selects one or more volumes in a 4D
+ image.
+
+* :func:`nilearn.image.iter_img`: loops over all the volumes of a 4D
+ image.
+
+.. seealso::
+
+ * :func:`nilearn.image.concat_imgs`: merge multiple 3D (or 4D) images
+ into one 4D image by concatenation along the 4th (time) axis
+
+ * :func:`nilearn.image.load_img`: load an image into memory. The
+ benefit of this function is that it will convert various
+ representations, such as filename, list of filenames, wildcards,
+ list of in-memory objects, to an in-memory NiftiImage.
+
+ * :func:`nilearn.image.new_img_like`: given data in a numpy array,
+ creates a new image using an existing reference image for the
+ metadata.
+
+|
+
+.. topic:: **Examples**
+
+ * :ref:`sphx_glr_auto_examples_plot_3d_and_4d_niimg.py`
+
+ * :ref:`sphx_glr_auto_examples_01_plotting_plot_overlay.py`
+
+Computing and applying spatial masks
+=====================================
+
+Relevant functions:
+
+* compute a mask from EPI images: :func:`nilearn.masking.compute_epi_mask`
+* compute a grey-matter mask using the MNI template:
+ :func:`nilearn.masking.compute_gray_matter_mask`.
+* compute a mask from images with a flat background:
+ :func:`nilearn.masking.compute_background_mask`
+* compute for multiple sessions/subjects:
+ :func:`nilearn.masking.compute_multi_epi_mask`
+ :func:`nilearn.masking.compute_multi_background_mask`
+* apply: :func:`nilearn.masking.apply_mask`
+* intersect several masks (useful for multi sessions/subjects): :func:`nilearn.masking.intersect_masks`
+* unmasking: :func:`nilearn.masking.unmask`
+
+
+Extracting a brain mask
+------------------------
+
+If we do not have a spatial mask of the target regions, a brain mask
+can be computed from the data:
+
+- :func:`nilearn.masking.compute_background_mask` for brain images where
+ the brain stands out of a constant background. This is typically the
+ case when working on statistic maps output after a brain extraction
+- :func:`nilearn.masking.compute_epi_mask` for EPI images
+- :func:`nilearn.masking.compute_gray_matter_mask` to compute a
+ gray-matter mask using the MNI template.
+
+
+.. literalinclude:: ../../examples/01_plotting/plot_visualization.py
+ :start-after: # Simple computation of a mask from the fMRI data
+ :end-before: # Applying the mask to extract the corresponding time series
+
+.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_visualization_002.png
+ :target: ../auto_examples/01_plotting/plot_visualization.html
+ :scale: 50%
+
+
+.. _mask_4d_2_3d:
+
+Masking data: from 4D Nifti images to 2D data arrays
+---------------------------------------------------------------
+
+fMRI data is usually represented as a 4D block of data: 3 spatial
+dimensions and one time dimension. In practice, we are usually
+interested in working on the voxel time-series in the
+brain. It is thus convenient to apply a brain mask in order to convert the
+4D brain images representation into a restructured 2D data representation,
+`voxel` **x** `time`, as depicted below:
+
+.. image:: ../images/masking.jpg
+ :align: center
+ :width: 100%
+
+Note that in an analysis pipeline, this operation is best done using the
+:ref:`masker objects `. For completness, we give code to
+do it manually below:
+
+.. literalinclude:: ../../examples/01_plotting/plot_visualization.py
+ :start-after: # Applying the mask to extract the corresponding time series
+
+.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_visualization_003.png
+ :target: ../auto_examples/01_plotting/plot_visualization.html
+ :align: center
+ :scale: 40
+
+
+
+Image operations: creating a ROI mask manually
+===============================================
+
+A region of interest (ROI) mask can be computed for instance with a
+statistical test. This requires a chain of image
+operations on the input data. Here is a possible recipe for computing an
+ROI mask:
+
+ * **Smoothing**: Before a statistical test, it is often use to smooth a bit
+ the image using :func:`nilearn.image.smooth_img`, typically fwhm=6 for
+ fMRI.
+
+ * **Selecting voxels**: Given the smoothed data, we can select voxels
+ with a statistical test (eg opposing face and house experimental
+ conditions), for instance with a simple Student's t-test using scipy
+ function :func:`scipy.stats.ttest_ind`.
+
+ * **Thresholding**: Then we need threshold the statistical map to have
+ better representation of voxels of interest.
+
+ * **Mask intersection and dilation**: Post-processing the results with
+ simple morphological operations, mask intersection and dilation.
+
+ * we can use another mask, such as a grey-matter mask, to select
+ only the voxels which are common in both masks.
+
+ * we can do `morphological dilation
+ `_ to achieve
+ more compact blobs with more regular boundaries. The function is
+ used from :func:`scipy.ndimage.binary_dilation`.
+
+ * **Extracting connected components**: We end with splitting the connected
+ ROIs into two separate regions (ROIs), one in each hemisphere. The
+ function :func:`scipy.ndimage.label` from the scipy library is used.
+
+ * **Saving the result**: The final voxel mask is saved to disk using
+ the 'to_filename' method of the image object.
+ (or **nibabel.save**).
+
+
+.. seealso::
+
+ For extracting connected components:
+
+ * A function :func:`nilearn.regions.connected_regions` can be used readily
+ on probabilistic atlas Nifti-like images whereas
+
+ * A function :func:`nilearn.regions.connected_label_regions` can be used on
+ atlases denoted as labels. For instance, atlases labelled using KMeans.
+
+.. _nibabel: http://nipy.sourceforge.net/nibabel/
+
+.. topic:: **Code**
+
+ A complete script of above steps with full description can be found :ref:`here
+ `.
+
+.. seealso::
+
+ * :ref:`Automatic region extraction on 4D atlas images
+ `.
diff --git a/doc/manipulating_images/masker_objects.rst b/doc/manipulating_images/masker_objects.rst
new file mode 100644
index 0000000000..d968254b4b
--- /dev/null
+++ b/doc/manipulating_images/masker_objects.rst
@@ -0,0 +1,423 @@
+.. _masker_objects:
+
+=====================================================================
+From neuroimaging volumes to data matrices: the masker objects
+=====================================================================
+
+This chapter introduces the maskers: objects that go from
+neuroimaging volumes, on the disk or in memory, to data matrices, eg of
+time series.
+
+.. contents:: **Chapters contents**
+ :local:
+ :depth: 1
+
+
+The concept of "masker" objects
+===============================
+
+In any analysis, the first step is to load the data.
+It is often convenient to apply some basic data
+transformations and to turn the data in a 2D (samples x features) matrix,
+where the samples could be different time points, and the features derived
+from different voxels (e.g., restrict analysis to the ventral visual stream),
+regions of interest (e.g., extract local signals from spheres/cubes), or
+pre-specified networks (e.g., look at data from all voxels of a set of
+network nodes). Think of masker objects as swiss-army knifes for shaping
+the raw neuroimaging data in 3D space into the units of observation
+relevant for the research questions at hand.
+
+
+.. |niimgs| image:: ../images/niimgs.jpg
+ :scale: 50%
+
+.. |arrays| image:: ../images/feature_array.jpg
+ :scale: 35%
+
+.. |arrow| raw:: html
+
+ →
+
+.. centered:: |niimgs| |arrow| |arrays|
+
+
+
+"masker" objects (found in modules :mod:`nilearn.input_data`)
+simplify these "data folding" steps that often preceed the
+statistical analysis.
+
+Note that the masker objects may not cover all the image transformations
+for specific tasks. Users who want to make some specific processing may
+have to call :ref:`specific functions `
+(modules :mod:`nilearn.signal`, :mod:`nilearn.masking`).
+
+|
+
+.. topic:: **Advanced: Design philosophy of "Maskers"**
+
+ The design of these classes is similar to `scikit-learn
+ `_\ 's transformers. First, objects are
+ initialized with some parameters guiding the transformation
+ (unrelated to the data). Then the `fit()` method should be called,
+ possibly specifying some data-related information (such as number of
+ images to process), to perform some initial computation (e.g.,
+ fitting a mask based on the data). Finally, `transform()` can be
+ called, with the data as argument, to perform some computation on
+ data themselves (e.g., extracting time series from images).
+
+
+.. currentmodule:: nilearn.input_data
+
+.. _nifti_masker:
+
+:class:`NiftiMasker`: applying a mask to load time-series
+==========================================================
+
+:class:`NiftiMasker` is a powerful tool to load images and
+extract voxel signals in the area defined by the mask.
+It applies some basic preprocessing
+steps with commonly used parameters as defaults.
+But it is *very important* to look at your data to see the effects
+of the preprocessings and validate them.
+
+.. topic:: **Advanced: scikit-learn Pipelines**
+
+ :class:`NiftiMasker` is a `scikit-learn
+ `_ compliant
+ transformer so that you can directly plug it into a `scikit-learn
+ pipeline `_.
+
+
+Custom data loading: loading only the first 100 time points
+------------------------------------------------------------
+
+Suppose we want to restrict a dataset to the first 100 frames. Below, we load
+a resting-state dataset with :func:`fetch_adhd()
+`, restrict it to 100 frames and
+build a new niimg object that we can give to the masker. Although
+possible, there is no need to save your data to a file to pass it to a
+:class:`NiftiMasker`. Simply use :func:`nilearn.image.index_img` to apply a
+slice and create a :ref:`Niimg ` in memory:
+
+
+.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py
+ :start-after: Load ADHD resting-state dataset
+ :end-before: # To display the background
+
+Controlling how the mask is computed from the data
+--------------------------------------------------
+
+In this section, we show how the masker object can compute a mask
+automatically for subsequent statistical analysis.
+On some datasets, the default algorithm may however perform poorly.
+This is why it is very important to
+**always look at your data** before and after feature
+engineering using masker objects.
+
+.. note::
+
+ The full example described in this section can be found here:
+ :doc:`plot_mask_computation.py <../auto_examples/04_manipulating_images/plot_mask_computation>`.
+ It is also related to this example:
+ :doc:`plot_nifti_simple.py <../auto_examples/04_manipulating_images/plot_nifti_simple>`.
+
+
+Visualizing the computed mask
+..............................
+
+If a mask is not specified as an argument, :class:`NiftiMasker` will try to
+compute one from the provided neuroimaging data.
+It is *very important* to verify the quality of the generated mask by visualization.
+This allows to see whether it is suitable for your data and intended analyses.
+Alternatively, the mask computation parameters can still be modified.
+See the :class:`NiftiMasker` documentation for a complete list of
+mask computation parameters.
+
+The mask can be retrieved and visualized from the `mask_img_` attribute
+of the masker:
+
+.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py
+ :start-after: # We need to specify an 'epi' mask_strategy, as this is raw EPI data
+ :end-before: # Generate mask with strong opening
+
+
+.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_004.png
+ :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html
+ :scale: 50%
+
+Different masking strategies
+.............................
+
+The `mask_strategy` argument controls how the mask is computed:
+
+* `background`: detects a continuous background
+* `epi`: suitable for EPI images
+* `template`: uses an MNI grey-matter template
+
+Extra mask parameters: opening, cutoff...
+..........................................
+
+The underlying function is :func:`nilearn.masking.compute_epi_mask`
+called using the `mask_args` argument of the :class:`NiftiMasker`.
+Controling these arguments set the fine aspects of the mask. See the
+functions documentation, or :doc:`the NiftiMasker example
+<../auto_examples/04_manipulating_images/plot_mask_computation>`.
+
+.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_005.png
+ :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html
+ :scale: 50%
+
+.. _masker_preprocessing_steps:
+
+Common data preparation steps: smoothing, filtering, resampling
+----------------------------------------------------------------
+
+:class:`NiftiMasker` comes with many parameters that enable data
+preparation::
+
+ >>> from nilearn import input_data
+ >>> masker = input_data.NiftiMasker()
+ >>> masker
+ NiftiMasker(detrend=False, dtype=None, high_pass=None, low_pass=None,
+ mask_args=None, mask_img=None, mask_strategy='background',
+ memory=Memory(cachedir=None), memory_level=1, sample_mask=None,
+ sessions=None, smoothing_fwhm=None, standardize=False, t_r=None,
+ target_affine=None, target_shape=None, verbose=0)
+
+The meaning of each parameter is described in the documentation of
+:class:`NiftiMasker` (click on the name :class:`NiftiMasker`), here we
+comment on the most important.
+
+.. topic:: **`dtype` argument**
+
+ Forcing your data to have a `dtype` of **float32** can help
+ save memory and is often a good-enough numerical precision.
+ You can force this cast by choosing `dtype` to be 'auto'.
+ In the future this cast will be the default behaviour.
+
+
+.. seealso::
+
+ If you do not want to use the :class:`NiftiMasker` to perform these
+ simple operations on data, note that they can also be manually
+ accessed in nilearn such as in
+ :ref:`corresponding functions `.
+
+Smoothing
+.........
+
+:class:`NiftiMasker` can apply Gaussian spatial smoothing to the
+neuroimaging data, useful to fight noise or for inter-individual
+differences in neuroanatomy. It is achieved by specifying the full-width
+half maximum (FWHM; in millimeter scale) with the `smoothing_fwhm`
+parameter. Anisotropic filtering is also possible by passing 3 scalars
+``(x, y, z)``, the FWHM along the x, y, and z direction.
+
+The underlying function handles properly non-cubic voxels by scaling the
+given widths appropriately.
+
+.. seealso::
+
+ :func:`nilearn.image.smooth_img`
+
+.. _temporal_filtering:
+
+Temporal Filtering and confound removal
+........................................
+
+:class:`NiftiMasker` can also improve aspects of temporal data
+properties, before conversion to voxel signals.
+
+- **Standardization**. Parameter ``standardize``: Signals can be
+ standardized (scaled to unit variance).
+
+- **Frequency filtering**. Low-pass and high-pass filters can be used to
+ remove artifacts. Parameters: ``high_pass`` and ``low_pass``, specified
+ in Hz (note that you must specific the sampling rate in seconds with
+ the ``t_r`` parameter: ``loss_pass=.5, t_r=2.1``).
+
+- **Confound removal**. Two ways of removing confounds are provided: simple
+ detrending or using prespecified confounds, such as behavioral or movement
+ information.
+
+ * Linear trends can be removed by activating the `detrend` parameter.
+ This accounts for slow (as opposed to abrupt or transient) changes
+ in voxel values along a series of brain images that are unrelated to the
+ signal of interest (e.g., the neural correlates of cognitive tasks).
+ It is not activated by default in :class:`NiftiMasker` but is recommended
+ in almost all scenarios.
+
+ * More complex confounds, measured during the acquision, can be removed
+ by passing them to :meth:`NiftiMasker.transform`. If the dataset
+ provides a confounds file, just pass its path to the masker.
+
+.. topic:: **Exercise**
+ :class: green
+
+ You can, more as a training than as an exercise, try to play with
+ the parameters in
+ :ref:`sphx_glr_auto_examples_plot_decoding_tutorial.py`.
+ Try to enable detrending and run the script:
+ does it have a big impact on the result?
+
+
+.. seealso::
+
+ :func:`nilearn.signal.clean`
+
+
+
+
+Resampling: resizing and changing resolutions of images
+.......................................................
+
+:class:`NiftiMasker` and many similar classes enable resampling
+(recasting of images into different resolutions and transformations of
+brain voxel data). Two parameters control resampling:
+
+* `target_affine` to resample (resize, rotate...) images in order to match
+ the spatial configuration defined by the new affine (i.e., matrix
+ transforming from voxel space into world space).
+
+* Additionally, a `target_shape` can be used to resize images
+ (i.e., cropping or padding with zeros) to match an expected data
+ image dimensions (shape composed of x, y, and z).
+
+How to combine these parameter to obtain the specific resampling desired
+is explained in details in :ref:`resampling`.
+
+.. seealso::
+
+ :func:`nilearn.image.resample_img`, :func:`nilearn.image.resample_to_img`
+
+.. _unmasking_step:
+
+Inverse transform: unmasking data
+---------------------------------
+
+Once voxel signals have been processed, the result can be visualized as
+images after unmasking (masked-reduced data transformed back into
+the original whole-brain space). This step is present in many
+:ref:`examples ` provided in nilearn. Below you will find
+an excerpt of :ref:`the example performing Anova-SVM on the Haxby data
+`):
+
+.. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py
+ :start-after: # Look at the SVC's discriminating weights
+ :end-before: # Use the mean image as a background
+
+|
+
+.. topic:: **Examples to better understand the NiftiMasker**
+
+ * :ref:`sphx_glr_auto_examples_04_manipulating_images_plot_nifti_simple.py`
+
+ * :ref:`sphx_glr_auto_examples_04_manipulating_images_plot_mask_computation.py`
+
+|
+
+.. _region:
+
+Extraction of signals from regions:\ :class:`NiftiLabelsMasker`, :class:`NiftiMapsMasker`
+==========================================================================================
+
+The purpose of :class:`NiftiLabelsMasker` and :class:`NiftiMapsMasker` is to
+compute signals from regions containing many voxels. They make it easy to get
+these signals once you have an atlas or a parcellation into brain regions.
+
+Regions definition
+------------------
+
+Nilearn understands two different ways of defining regions, which are called
+labels and maps, handled by :class:`NiftiLabelsMasker` and
+:class:`NiftiMapsMasker`, respectively.
+
+- labels: a single region is defined as the set of all the voxels that have a
+ common label (e.g., anatomical brain region definitions as integers)
+ in the region definition array. The set of
+ regions is defined by a single 3D array, containing a voxel-wise
+ dictionary of label numbers that denote what
+ region a given voxel belongs to. This technique has a big advantage: the
+ required memory load is independent of the number of regions, allowing
+ for a large number of regions. On the other hand, there are
+ several disadvantages: regions cannot spatially overlap
+ and are represented in a binary present/nonpresent coding (no weighting).
+
+- maps: a single region is defined as the set of all the voxels that have a
+ non-zero weight. A set of regions is thus defined by a set of 3D images (or a
+ single 4D image), one 3D image per region (as opposed to all regions in a
+ single 3D image such as for labels, cf. above).
+ While these defined weighted regions can exhibit spatial
+ overlap (as opposed to labels), storage cost scales linearly with the
+ number of regions. Handling a large number (e.g., thousands)
+ of regions will prove difficult with this data transformation of
+ whole-brain voxel data into weighted region-wise data.
+
+.. note::
+
+ These usage are illustrated in the section :ref:`functional_connectomes`.
+
+:class:`NiftiLabelsMasker` Usage
+--------------------------------
+
+Usage of :class:`NiftiLabelsMasker` is similar to that of
+:class:`NiftiMapsMasker`. The main difference is that it requires a labels image
+instead of a set of maps as input.
+
+The `background_label` keyword of :class:`NiftiLabelsMasker` deserves
+some explanation. The voxels that correspond to the brain or a region
+of interest in an fMRI image do not fill the entire image.
+Consequently, in the labels image, there must be a label value that corresponds
+to "outside" the brain (for which no signal should be extracted).
+By default, this label is set to zero in nilearn (refered to as "background").
+Should some non-zero value encoding be necessary, it is possible
+to change the background value with the `background_label` keyword.
+
+.. topic:: **Examples**
+
+ * :ref:`sphx_glr_auto_examples_03_connectivity_plot_signal_extraction.py`
+
+:class:`NiftiMapsMasker` Usage
+------------------------------
+
+This atlas defines its regions using maps. The path to the corresponding
+file is given in the `maps_img` argument.
+
+One important thing that happens transparently during the execution of
+:meth:`NiftiMasker.fit_transform` is resampling. Initially, the images
+and the atlas do typically not have the same shape nor the same affine.
+Casting them into the same format is required for successful signal extraction
+The keyword argument `resampling_target` specifies which format
+(i.e., dimensions and affine) the data should be resampled to.
+See the reference documentation for :class:`NiftiMapsMasker` for every
+possible option.
+
+.. topic:: **Examples**
+
+ * :ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py`
+
+Extraction of signals from seeds:\ :class:`NiftiSpheresMasker`
+===============================================================
+
+The purpose of :class:`NiftiSpheresMasker` is to compute signals from
+seeds containing voxels in spheres. It makes it easy to get these signals once
+you have a list of coordinates.
+A single seed is a sphere defined by the radius (in millimeters) and the
+coordinates (typically MNI or TAL) of its center.
+
+Using :class:`NiftiSpheresMasker` needs to define a list of coordinates.
+`seeds` argument takes a list of 3D coordinates (tuples) of the spheres centers,
+they should be in the same space as the images.
+Seeds can overlap spatially and are represented in a binary present/nonpresent
+coding (no weighting).
+Below is an example of a coordinates list of four seeds from the default mode network::
+
+ >>> dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (0, 50, -5)]
+
+`radius` is an optional argument that takes a real value in millimeters.
+If no value is given for the `radius` argument, the single voxel at the given
+seed position is used.
+
+.. topic:: **Examples**
+
+ * :ref:`sphx_glr_auto_examples_03_connectivity_plot_adhd_spheres.py`
diff --git a/doc/manipulating_visualizing/data_preparation.rst b/doc/manipulating_visualizing/data_preparation.rst
deleted file mode 100644
index 7ea3dc1081..0000000000
--- a/doc/manipulating_visualizing/data_preparation.rst
+++ /dev/null
@@ -1,463 +0,0 @@
-.. _extracting_data:
-
-=========================================================
-Data preparation: loading and basic signal extraction
-=========================================================
-
-.. contents:: **Contents**
- :local:
- :depth: 1
-
-|
-
-.. topic:: **File names as arguments**
-
- Nilearn functions and objects accept file names as arguments::
-
- >>> from nilearn import image
- >>> smoothed_img = image.smooth_img('/home/user/t_map001.nii') # doctest: +SKIP
-
- Nilearn can operate on either file names or `NiftiImage objects
- `_. The later represent
- the specified nifti files loaded in memory.
-
- In nilearn, we often use the term 'niimg' as abbreviation that denotes
- either a file name or a NiftiImage object. In the example above, the
- function smooth_img returns a NiftiImage object, which can then be
- readily passed to any other nilearn function that accepts niimg
- arguments.
-
- Niimgs can be 3D or 4D, and a 4D niimg can be a list of file names, or
- even a *wildcard* matching patterns. The '~' symbol is also expanded to the
- user home folder.For instance, to retrieve a 4D volume of
- all t maps smoothed::
-
- >>> smoothed_imgs = image.smooth_img('~/t_map*.nii') # doctest: +SKIP
-
-
-|
-
-The concept of "masker" objects
-=================================
-
-In any analysis, the first step is to load the data.
-It is often convenient to apply some basic data
-transformations and to turn the data in a 2D (samples x features) matrix,
-where the samples could be different time points, and the features derived
-from different voxels (e.g., restrict analysis to the ventral visual stream),
-regions of interest (e.g., extract local signals from spheres/cubes), or
-prespecified networks (e.g., look at data from all voxels of a set of
-network nodes). Think of masker objects as swiss army knifes for shaping
-the raw neuroimaging data in 3D space into the units of observation
-relevant for the research questions at hand.
-
-
-.. |niimgs| image:: ../images/niimgs.jpg
- :scale: 50%
-
-.. |arrays| image:: ../images/feature_array.jpg
- :scale: 35%
-
-.. |arrow| raw:: html
-
- →
-
-.. centered:: |niimgs| |arrow| |arrays|
-
-
-
-"masker" objects (found in modules :mod:`nilearn.input_data`) aim at
-simplifying these "data folding" steps that often preceed the actual
-statistical analysis.
-
-On an advanced note,
-the underlying philosophy of these classes is similar to `scikit-learn
-`_\ 's
-transformers. First, objects are initialized with some parameters guiding
-the transformation (unrelated to the data). Then the fit() method
-should be called, possibly specifying some data-related
-information (such as number of images to process), to perform some
-initial computation (e.g., fitting a mask based on the data). Finally,
-transform() can be called, with the data as argument, to perform some
-computation on data themselves (e.g. extracting time series from images).
-
-Note that the masker objects may not cover all the image transformations
-for specific tasks. Users who want to make some specific processing may
-have to call low-level functions (see e.g. :mod:`nilearn.signal`,
-:mod:`nilearn.masking`).
-
-.. currentmodule:: nilearn.input_data
-
-.. _nifti_masker:
-
-:class:`NiftiMasker`: loading, masking and filtering
-=========================================================
-
-This section details how to use the :class:`NiftiMasker` class.
-:class:`NiftiMasker` is a
-powerful tool to load images and extract voxel signals in the area
-defined by the mask. It is designed to apply some basic preprocessing
-steps by default with commonly used parameters as defaults. But it is
-*very important* to look at your data to see the effects of the
-preprocessings and validate them.
-
-In particular, :class:`NiftiMasker` is a `scikit-learn
-`_ compliant
-transformer so that you can directly plug it into a `scikit-learn
-pipeline `_.
-
-Custom data loading
---------------------
-
-Sometimes, some custom preprocessing of data is necessary. For instance
-we can restrict a dataset to the first 100 frames. Below, we load
-a resting-state dataset with :func:`fetch_fetch_nyu_rest()
-`, restrict it to 100 frames and
-build a brand new Nifti-like object to give it to the masker. Although
-possible, there is no need to save your data to a file to pass it to a
-:class:`NiftiMasker`. Simply use `nibabel
-`_ to create a :ref:`Niimg `
-in memory:
-
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py
- :start-after: Load NYU resting-state dataset
- :end-before: # To display the background
-
-Controlling how the mask is computed from the data
------------------------------------------------------
-
-In this tutorial, we show how the masker object can compute a mask
-automatically for subsequent statistical analysis.
-On some datasets, the default algorithm may however perform poorly.
-This is why it is very important to
-**always look at your data** before and after feature
-engineering using masker objects.
-
-Computing the mask
-...................
-
-.. note::
-
- The full example described in this section can be found here:
- :doc:`plot_mask_computation.py <../auto_examples/manipulating_visualizing/plot_mask_computation>`.
- It is also related to this example:
- :doc:`plot_nifti_simple.py <../auto_examples/manipulating_visualizing/plot_nifti_simple>`.
-
-If a mask is not specified as an argument,
-:class:`NiftiMasker` will try to compute
-one from the provided neuroimaging data.
-It is *very important* to verify the quality of the generated mask by
-visualization. This allows to see whether it
-is suitable for your data and intended analyses.
-Alternatively, the mask computation parameters can still be modified. See the
-:class:`NiftiMasker` documentation for a complete list of mask computation
-parameters.
-
-As a first example, we will now automatically build a mask from a dataset.
-We will here use the Haxby dataset because it provides the original mask that
-we can compare the data-derived mask against.
-
-The first step is to generate a mask with default parameters and visualize it.
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py
- :start-after: # Simple mask extraction from EPI images
- :end-before: # Generate mask with strong opening
-
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_mask_computation_002.png
- :target: ../auto_examples/plot_mask_computation.html
- :scale: 50%
-
-
-We can then fine-tune the outline of the mask by increasing the number of
-opening steps (*opening=10*) using the `mask_args` argument of the
-:class:`NiftiMasker`. This effectively performs erosion and dilation operations
-on the outer voxel layers of the mask, which can for example remove remaining
-skull parts in the image.
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py
- :start-after: # Generate mask with strong opening
- :end-before: # Generate mask with a high lower cutoff
-
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_mask_computation_003.png
- :target: ../auto_examples/plot_mask_computation.html
- :scale: 50%
-
-
-Looking at the :func:`nilearn.masking.compute_epi_mask` called by the
-:class:`NiftiMasker` object, we see two interesting parameters:
-*lower_cutoff* and *upper_cutoff*. These set the grey-value bounds in
-which the masking algorithm will search for its threshold
-(0 being the minimum of the image and 1 the maximum). We will here increase
-the lower cutoff to enforce selection of those
-voxels that appear as bright in the EPI image.
-
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py
- :start-after: # Generate mask with a high lower cutoff
- :end-before: ################################################################################
-
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_mask_computation_004.png
- :target: ../auto_examples/plot_mask_computation.html
- :scale: 50%
-
-
-
-
-Common data preparation steps: resampling, smoothing, filtering
------------------------------------------------------------------
-
-.. seealso::
-
- If you do not want to use the :class:`NiftiMasker` to perform these
- simple operations on data, note that they can also be manually
- accessed in nilearn such as in
- :ref:`corresponding functions `.
-
-.. _resampling:
-
-Resampling
-..........
-
-:class:`NiftiMasker` and many similar classes enable resampling
-(recasting of images into different resolutions and transformations of
-brain voxel data). The resampling procedure takes as input the
-*target_affine* to resample (resize, rotate...) images in order to match
-the spatial configuration defined by the new affine (i.e., matrix
-transforming from voxel space into world space). Additionally, a
-*target_shape* can be used to resize images (i.e., cropping or padding
-with zeros) to match an expected data image dimensions (shape composed of
-x, y, and z).
-
-As a common use case, resampling can be a viable means to
-downsample image quality on purpose to increase processing speed
-and lower memory consumption of an analysis pipeline.
-In fact, certain image viewers (e.g., FSLView) also require images to be
-resampled to display overlays.
-
-On an advanced note,
-automatic computation of offset and bounding box can be performed by
-specifying a 3x3 matrix instead of the 4x4 affine.
-In this case, nilearn
-computes automatically the translation part of the transformation
-matrix (i.e., affine).
-
-.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_affine_transformation_002.png
- :target: ../auto_examples/plot_affine_transformation.html
- :scale: 33%
-.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_affine_transformation_004.png
- :target: ../auto_examples/plot_affine_transformation.html
- :scale: 33%
-.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_affine_transformation_003.png
- :target: ../auto_examples/plot_affine_transformation.html
- :scale: 33%
-
-
-.. topic:: **Special case: resampling to a given voxel size**
-
- Specifying a 3x3 matrix that is diagonal as a target_affine fixes the
- voxel size. For instance to resample to 3x3x3 mm voxels::
-
- >>> import numpy as np
- >>> target_affine = np.diag((3, 3, 3))
-
-|
-
-.. seealso::
-
- :func:`nilearn.image.resample_img`
-
-
-Smoothing
-.........
-
-:class:`NiftiMasker` can further be used for local spatial filtering of
-the neuroimaging data to make the data more homogeneous and thus account
-for inter-individual differences in neuroanatomy.
-It is achieved by passing the full-width
-half maximum (FWHM; in millimeter scale)
-along the x, y, and z image axes by specifying the `smoothing_fwhm` parameter.
-For an isotropic filtering, passing a scalar is also possible. The underlying
-function handles properly the tricky case of non-cubic voxels by scaling the
-given widths appropriately.
-
-.. seealso::
-
- :func:`nilearn.image.smooth_img`
-
-
-.. _temporal_filtering:
-
-Temporal Filtering
-..................
-
-Rather than optimizing spatial properties of the neuroimaging data,
-the user may want to improve aspects of temporal data properties,
-before conversion to voxel signals.
-:class:`NiftiMasker` can also process voxel signals. Here are the possibilities:
-
-- Confound removal. Two ways of removing confounds are provided. Any linear
- trend can be removed by activating the `detrend` option.
- This accounts for slow (as opposed to abrupt or transient) changes
- in voxel values along a series of brain images that are unrelated to the
- signal of interest (e.g., the neural correlates of cognitive tasks).
- It is not activated
- by default in :class:`NiftiMasker` but is recommended in almost all scenarios.
- More complex confounds can
- be removed by passing them to :meth:`NiftiMasker.transform`. If the
- dataset provides a confounds file, just pass its path to the masker.
-
-- Linear filtering. Low-pass and high-pass filters can be used to remove artifacts.
- It simply removes all voxel values lower or higher than the specified
- parameters, respectively.
- Care has been taken to automatically
- apply this processing to confounds if it appears necessary.
-
-- Normalization. Signals can be normalized (scaled to unit variance) before
- returning them. This is performed by default.
-
-.. topic:: **Exercise**
-
- You can, more as a training than as an exercise, try to play with
- the parameters in :ref:`sphx_glr_auto_examples_plot_haxby_simple.py`. Try to enable detrending
- and run the script: does it have a big impact on the result?
-
-
-.. seealso::
-
- :func:`nilearn.signal.clean`
-
-
-Inverse transform: unmasking data
-----------------------------------
-
-Once voxel signals have been processed, the result can be visualized as
-images after unmasking (masked-reduced data transformed back into
-the original whole-brain space). This step is present in almost all
-the :ref:`examples ` provided in nilearn. Below you will find
-an excerpt of :ref:`the example performing Anova-SVM on the Haxby data
-`):
-
-.. literalinclude:: ../../examples/decoding/plot_haxby_anova_svm.py
- :start-after: # Look at the SVC's discriminating weights
- :end-before: # Create the figure
-
-|
-
-.. topic:: **Examples to better understand the NiftiMasker**
-
- * :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_nifti_simple.py`
-
- * :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_mask_computation.py`
-
-
-.. _region:
-
-Extraction of signals from regions:\ :class:`NiftiLabelsMasker`, :class:`NiftiMapsMasker`.
-===========================================================================================
-
-The purpose of :class:`NiftiLabelsMasker` and :class:`NiftiMapsMasker` is to
-compute signals from regions containing many voxels. They make it easy to get
-these signals once you have an atlas or a parcellation into brain regions.
-
-Regions definition
-------------------
-
-Nilearn understands two different ways of defining regions, which are called
-labels and maps, handled by :class:`NiftiLabelsMasker` and
-:class:`NiftiMapsMasker`, respectively.
-
-- labels: a single region is defined as the set of all the voxels that have a
- common label (e.g., anatomical brain region definitions as integers)
- in the region definition array. The set of
- regions is defined by a single 3D array, containing a voxel-wise
- dictionary of label numbers that denote what
- region a given voxel belongs to. This technique has a big advantage: the
- required memory load is independent of the number of regions, allowing
- for a large number of regions. On the other hand, there are
- several disadvantages: regions cannot spatially overlap
- and are represented in a binary present-nonpresent coding (no weighting).
-- maps: a single region is defined as the set of all the voxels that have a
- non-zero weight. A set of regions is thus defined by a set of 3D images (or a
- single 4D image), one 3D image per region (as opposed to all regions in a
- single 3D image such as for labels, cf. above).
- While these defined weighted regions can exhibit spatial
- overlap (as opposed to labels), storage cost scales linearly with the
- number of regions. Handling a large number (e.g., thousands)
- of regions will prove
- difficult with this data transformation of whole-brain voxel data
- into weighted region-wise data.
-
-.. note::
-
- These usage are illustrated in the section :ref:`functional_connectomes`
-
-:class:`NiftiLabelsMasker` Usage
----------------------------------
-
-Usage of :class:`NiftiLabelsMasker` is similar to that of
-:class:`NiftiMapsMasker`. The main difference is that it requires a labels image
-instead of a set of maps as input.
-
-The `background_label` keyword of :class:`NiftiLabelsMasker` deserves
-some explanation. The voxels that correspond to the brain or a region
-of interest in an fMRI image do not fill the entire
-image. Consequently, in the labels image, there must be a label value that
-corresponds to "outside" the brain (for which no signal should be
-extracted). By default, this label is set to zero in nilearn
-(refered to as "background").
-Should some non-zero value encoding be necessary, it is
-possible to change the background value with the `background_label`
-keyword.
-
-.. topic:: **Examples**
-
- * :ref:`sphx_glr_auto_examples_connectivity_plot_signal_extraction.py`
-
-:class:`NiftiMapsMasker` Usage
-------------------------------
-
-This atlas defines its regions using maps. The path to the corresponding
-file is given in the "maps_img" argument.
-
-One important thing that happens transparently during the execution of
-:meth:`NiftiMasker.fit_transform` is resampling. Initially, the images
-and the atlas do typically not have the same shape nor the same affine. Casting
-them into the same format is required for successful signal extraction
-The keyword argument `resampling_target` specifies which format (i.e.,
-dimensions and affine) the data should be resampled to.
-See the reference documentation for :class:`NiftiMapsMasker` for every
-possible option.
-
-.. topic:: **Examples**
-
- * :ref:`sphx_glr_auto_examples_connectivity_plot_probabilistic_atlas_extraction.py`
-
-Extraction of signals from seeds:\ :class:`NiftiSpheresMasker`.
-==================================================================
-
-The purpose of :class:`NiftiSpheresMasker` is to compute signals from
-seeds containing voxels in spheres. It makes it easy to get these signals once
-you have a list of coordinates.
-A single seed is a sphere defined by the radius (in millimeters) and the
-coordinates (typically MNI or TAL) of its center.
-
-Using :class:`NiftiSpheresMasker` needs to define a list of coordinates.
-"seeds" argument takes a list of 3D coordinates (tuples) of the spheres centers,
-they should be in the same space as the images.
-Seeds can overlap spatially and are represented in a binary present-nonpresent
-coding (no weighting).
-Below is an example of a coordinates list of four seeds from the default mode network::
-
- >>> dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (0, 50, -5)]
-
-"radius" is an optional argument that takes a real value in millimeters.
-If no value is given for the "radius" argument, the single voxel at the given
-seed position is used.
-
-.. topic:: **Examples**
-
- * :ref:`sphx_glr_auto_examples_connectivity_plot_adhd_spheres.py`
diff --git a/doc/manipulating_visualizing/index.rst b/doc/manipulating_visualizing/index.rst
deleted file mode 100644
index e06891257e..0000000000
--- a/doc/manipulating_visualizing/index.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-.. include:: ../tune_toc.rst
-
-
-.. _manipulation_visualization:
-
-============================================
-Image manipulation and visualization
-============================================
-
-In this section, we detail the general tools to manipulation and
-visualize neuroimaging volume with nilearn.
-
-|
-
-.. include:: ../includes/big_toc_css.rst
-
-
-.. toctree::
-
- plotting.rst
- data_preparation.rst
- manipulating_images.rst
-
diff --git a/doc/manipulating_visualizing/manipulating_images.rst b/doc/manipulating_visualizing/manipulating_images.rst
deleted file mode 100644
index 476c6b5b28..0000000000
--- a/doc/manipulating_visualizing/manipulating_images.rst
+++ /dev/null
@@ -1,453 +0,0 @@
-.. _data_manipulation:
-
-=====================================================================
-Manipulating brain volume: input/output, masking, ROIs, smoothing...
-=====================================================================
-
-This chapter introduces the data structure of brain images and tools to
-manipulation these.
-
-
-.. contents:: **Chapters contents**
- :local:
- :depth: 1
-
-
-
-.. _loading_data:
-
-Loading data
-============
-
-.. currentmodule:: nilearn.datasets
-
-.. _datasets:
-
-Fetching open datasets
-----------------------
-
-The nilearn package provides a dataset fetching utility that
-automatically downloads reference
-datasets and atlases. Dataset fetching functions can be imported from
-:mod:`nilearn.datasets`::
-
- >>> from nilearn import datasets
- >>> haxby_files = datasets.fetch_haxby(n_subjects=1) # doctest: +SKIP
-
-They return a data structure that contains different pieces of
-information on the retrieved dataset, including the
-file names on hard disk::
-
- >>> # The different files
- >>> print(sorted(list(haxby_files.keys()))) # doctest: +SKIP
- ['anat', 'description', 'func', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target']
- >>> # Path to first functional file
- >>> print(haxby_files.func[0]) # doctest: +ELLIPSIS +SKIP
- /.../nilearn_data/haxby2001/subj1/bold.nii.gz
- >>> # Provide information on the dataset
- >>> print(haxby_files.description) # doctest: +ELLIPSIS +SKIP
- Haxby 2001 results
-
-
- Notes
- -----
- Results from a classical fMRI study that...
-
-|
-
-Explanation and further resources of the dataset at hand can be
-retrieved as follows:
-
- >>> print haxby_dataset['description'] # doctest: +SKIP
-
-For a list of all the data fetching functions in nilearn, see :ref:`datasets_ref`.
-
-Besides convenient downloading of openly accessible reference datasets
-including important meta-data (e.g., stimulus characteristics and
-participant information for confound removal), the fetching functions
-perform data downloads only once and return the locally saved data upon
-any later function calls.
-The locally stored data can be found in one of the
-following directories (in order of priority, if present):
-
- * default system paths used by third party software that may already
- provide the data (e.g., the Harvard-Oxford atlas
- is provided by the FSL software suite)
- * the folder specified by `data_dir` parameter in the fetching function
- * the global environment variable `NILEARN_SHARED_DATA`
- * the user environment variable `NILEARN_DATA`
- * the `nilearn_data` folder in the user home folder
-
-Two different environment variables are provided to distinguish a global dataset
-repository that may be read-only at the user-level.
-Note that you can copy that folder to another user's computers to avoid
-the initial dataset download on the first fetching call.
-
-
-Loading your own data
----------------------
-
-Using your own data images in nilearn is as simple as creating a list of
-file name strings ::
-
- # dataset folder contains subject1.nii and subject2.nii
- my_data = ['dataset/subject1.nii', 'dataset/subject2.nii']
-
-Nilearn also provides a "wildcard" pattern to list many files with one
-expression:
-
-::
-
- >>> # dataset folder contains subject_01.nii to subject_03.nii
- >>> # dataset/subject_*.nii is a glob expression matching all filenames.
- >>> # Example with a smoothing process:
- >>> from nilearn.image import smooth_img
- >>> result_img = smooth_img("dataset/subject_*") # doctest: +SKIP
-
-.. topic:: **Python globbing**
-
- For more complicated use cases, Python also provides functions to work
- with file paths, in particular, :func:`glob.glob`.
-
- .. warning::
-
- Unlike nilearn's path expansion, the result of :func:`glob.glob` is
- not sorted and depending on the computer you are running they
- might not be in alphabetic order. We advise you to rely on
- nilearn's path expansion.
-
-Understanding neuroimaging data
-===============================
-
-Nifti and Analyze files
------------------------
-
-.. topic:: **NIfTI and Analyze file structures**
-
- `NifTi `_ files (or Analyze files) are
- the standard way of sharing data in neuroimaging research.
- Three main components are:
-
- :data:
- raw scans in form of a numpy array: ``data = img.get_data()``
- :affine:
- returns the transformation matrix that maps
- from voxel indices of the numpy array to actual real-world
- locations of the brain:
- ``affine = img.get_affine()``
- :header:
- low-level informations about the data (slice duration, etc.):
- ``header = img.get_header()``
-
-
-Neuroimaging data can be loaded in a simple way thanks to nibabel_.
-A Nifti file on disk can be loaded with a single line.
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_visualization.py
- :start-after: # Fetch data
- :end-before: # Visualization
-
-.. topic:: **Dataset formatting: data shape**
-
- It is important to appreciate two main representations for
- storing and accessing more than one Nifti images, that is sets
- of MRI scans:
-
- - a big 4D matrix representing (3D MRI + 1D for time), stored in a single
- Nifti file.
- `FSL `_ users tend to
- prefer this format.
- - several 3D matrices representing each time point (single 3D volume) of the
- session, stored in set of 3D Nifti or analyse files.
- `SPM `_ users tend
- to prefer this format.
-
-.. _niimg:
-
-Niimg-like objects
--------------------
-
-As a baseline, nilearn functions take as input argument what we call
-"Niimg-like objects":
-
-**Niimg:** A Niimg-like object can be one of the following:
-
- * A string variable with a file path to a Nifti or Analyse image
- * Any object exposing ``get_data()`` and ``get_affine()`` methods, typically
- a ``Nifti1Image`` from nibabel_.
-
-**Niimg-4D:** Similarly, some functions require 4D Nifti-like
-data, which we call Niimgs or Niimg-4D. Accepted input arguments are:
-
- * A path to a 4D Nifti image
- * List of paths to 3D Nifti images
- * 4D Nifti-like object
- * List of 3D Nifti-like objects
-
-.. note:: **Image affines**
-
- If you provide a sequence of Nifti images, all of them must have the same
- affine.
-
-Text files: phenotype or behavior
-----------------------------------
-
-Phenotypic or behavioral data are often provided as text or CSV
-(Comma Separated Values) file. They
-can be loaded with `numpy.genfromtxt` but you may have to specify some options
-(typically `skip_header` ignores column titles if needed).
-
-For the Haxby datasets, we can load the categories of the images
-presented to the subject::
-
- >>> from nilearn import datasets
- >>> haxby_dataset = datasets.fetch_haxby(n_subjects=1) # doctest: +SKIP
- >>> import numpy as np
- >>> labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ") # doctest: +SKIP
- >>> stimuli = labels['labels'] # doctest: +SKIP
- >>> print(np.unique(stimuli)) # doctest: +SKIP
- ['bottle' 'cat' 'chair' 'face' 'house' 'rest' 'scissors' 'scrambledpix'
- 'shoe']
-
-|
-
-Masking data manually
-=====================
-
-Extracting a brain mask
-------------------------
-
-If we do not have a spatial mask of the target regions, a brain mask
-can be easily extracted from the fMRI data by the
-:func:`nilearn.masking.compute_epi_mask` function:
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_visualization_002.png
- :target: ../auto_examples/manipulating_visualizing/plot_visualization.html
- :align: right
- :scale: 50%
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_visualization.py
- :start-after: # Extracting a brain mask
- :end-before: # Applying the mask to extract the corresponding time series
-
-
-.. _mask_4d_2_3d:
-
-From 4D Nifti images to 2D data arrays
---------------------------------------
-
-fMRI data is usually represented as a 4D block of data: 3 spatial
-dimensions and one time dimension. In practice, we are usually
-interested in working on the voxel time-series in the
-brain. It is thus convenient to apply a brain mask in order to convert the
-4D brain images representation into a restructured 2D data representation,
-`voxel` **x** `time`, as depicted below:
-
-.. image:: ../images/masking.jpg
- :align: center
- :width: 100%
-
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_visualization.py
- :start-after: # Applying the mask to extract the corresponding time series
- :end-before: # Find voxels of interest
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_visualization_003.png
- :target: ../auto_examples/manipulating_visualizing/plot_visualization.html
- :align: center
- :scale: 50
-
-.. _preprocessing_functions:
-
-Functions for data preparation steps
-=====================================
-
-.. currentmodule:: nilearn.input_data
-
-The :class:`NiftiMasker` can automatically perform important data preparation
-steps. These steps are also available as independent functions if you want to
-set up your own data preparation procedure:
-
-.. currentmodule:: nilearn
-
-* Resampling: :func:`nilearn.image.resample_img`. See the example
- :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_affine_transformation.py` to
- see the effect of affine transforms on data and bounding boxes.
-* Computing the mean of images (along the time/4th dimension):
- :func:`nilearn.image.mean_img`
-* Swapping voxels of both hemisphere (e.g., useful to homogenize masks
- inter-hemispherically):
- :func:`nilearn.image.swap_img_hemispheres`
-* Smoothing: :func:`nilearn.image.smooth_img`
-* Masking:
-
- * compute from EPI images: :func:`nilearn.masking.compute_epi_mask`
- * compute from images with a flat background:
- :func:`nilearn.masking.compute_background_mask`
- * compute for multiple sessions/subjects:
- :func:`nilearn.masking.compute_multi_epi_mask`
- :func:`nilearn.masking.compute_multi_background_mask`
- * apply: :func:`nilearn.masking.apply_mask`
- * intersect several masks (useful for multi sessions/subjects): :func:`nilearn.masking.intersect_masks`
- * unmasking: :func:`nilearn.masking.unmask`
-
-* Cleaning signals (e.g., linear detrending, standardization,
- confound removal, low/high pass filtering): :func:`nilearn.signal.clean`
-
-
-Image operations: creating a ROI mask manually
-===============================================
-
-This section shows manual steps to create and further modify a ROI
-(region of interest) spatial mask. They represent a means for "data folding",
-that is, extracting and later analyzing data from a subset of voxels rather
-than the entire brain images. As a convenient side effect, this can help
-alleviate the curse of dimensionality (i.e., statistical problems that
-arise in the context of high-dimensional input variables).
-
-Smoothing
----------
-
-Functional MRI data have a low signal-to-noise ratio (yet much better
-than EEG or MEG measurements).
-When using simple methods
-that are not robust to noise, it is useful to apply a spatial filtering
-kernel on the data. Such data smoothing is
-usually applied using a Gaussian function with 4mm to 12mm full-width at
-half-maximum (this is where the FWHM comes from).
-The function :func:`nilearn.image.smooth_img` accounts for potential
-anisotropy in the image affine (i.e., non-identical voxel size in all
-the three dimensions). Analogous to the majority of nilearn functions,
-it can also use file names as input parameters.
-
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py
- :start-after: # Smooth the data
- :end-before: # Run a T-test for face and houses
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_001.png
- :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html
- :align: center
- :scale: 50%
-
-Selecting features
-------------------
-
-Functional MRI data can be considered "high dimensional" given the
-p-versus-n ratio (e.g., p=~50,000-200,000 voxels for n=1000 samples).
-In this setting, machine-learning
-algorithms can perform poorly (i.e., curse-of-dimensionality problem).
-However, simple means from the realms of classical statistics can help
-reducing the number of voxels.
-
-The Student's t-test (:func:`scipy.stats.ttest_ind`) is an established
-method to determine whether two
-distributions are statistically different. It can be used to compare voxel
-time-series from two different experimental conditions
-(e.g., when houses or faces are shown to individuals during brain scanning).
-If the time-series distribution is similar in the two conditions, then the
-voxel is not very interesting to discriminate the condition.
-
-This test returns p-values that represent probabilities that the two
-time-series had been drawn from the same distribution. The lower is the p-value, the
-more discriminative is the voxel in distinguishing the two conditions.
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py
- :start-after: # Run a T-test for face and houses
- :end-before: # Build a mask from this statistical map
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_002.png
- :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html
- :align: center
- :scale: 50%
-
-This feature selection method is available in the scikit-learn Python
-package, where it has been
-extended to several classes, using the
-:func:`sklearn.feature_selection.f_classif` function.
-
-Thresholding
-------------
-
-Voxels with better p-values are kept as voxels of interest.
-Applying a threshold to an array
-is easy thanks to numpy indexing à la Matlab.
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py
- :start-after: # Thresholding
- :end-before: # Binarization and intersection with VT mask
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_003.png
- :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html
- :align: center
- :scale: 50%
-
-Mask intersection
------------------
-
-We now want to restrict our investigation to the ventral temporal area. The
-corresponding spatial mask is provided in `haxby.mask_vt`.
-We want to compute the
-intersection of this provided mask with our self-computed mask.
-The first step is to load it with
-nibabel's **nibabel.load**. We can then use a logical "and" operation
--- **numpy.logical_and** -- to keep only voxels
-that have been selected in both masks. In neuroimaging jargon, this is
-called an "AND conjunction."
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py
- :start-after: # Binarization and intersection with VT mask
- :end-before: # Dilation
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_004.png
- :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html
- :align: center
- :scale: 50%
-
-Mask dilation
--------------
-
-Tresholded functional brain images often contain scattered voxels
-across the brain.
-To consolidate such brain images towards more
-compact shapes, we use a `morphological dilation `_. This is a common step to be sure
-not to forget voxels located on the edge of a ROI.
-Put differently, such operations can fill "holes" in masked voxel
-representations.
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py
- :start-after: # Dilation
- :end-before: # Identification of connected components
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_005.png
- :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html
- :align: center
- :scale: 50%
-
-Extracting connected components
--------------------------------
-
-The function **scipy.ndimage.label** from the scipy Python library
-identifies immediately neighboring
-voxels in our voxels mask. It assigns a separate integer label to each
-one of them.
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py
- :start-after: # Identification of connected components
- :end-before: # Use the new ROIs to extract data maps in both ROIs
-
-.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_006.png
- :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html
- :align: center
- :scale: 50%
-
-Saving the result
------------------
-
-The final voxel mask is saved using nibabel for further inspection
-with a software such as FSLView.
-
-.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py
- :start-after: # save the ROI 'atlas' to a single output Nifti
-
-.. _nibabel: http://nipy.sourceforge.net/nibabel/
diff --git a/doc/manipulating_visualizing/plotting.rst b/doc/manipulating_visualizing/plotting.rst
deleted file mode 100644
index 09227e452e..0000000000
--- a/doc/manipulating_visualizing/plotting.rst
+++ /dev/null
@@ -1,267 +0,0 @@
-.. _plotting:
-
-======================
-Plotting brain images
-======================
-
-Nilearn comes with plotting function to display brain maps coming from
-Nifti-like images, in the :mod:`nilearn.plotting` module.
-
-.. currentmodule:: nilearn.plotting
-
-Different plotting functions
-=============================
-
-Nilearn has a set of plotting functions to plot brain volumes that are
-fined tuned to specific applications. Amongst other things, they use
-different heuristics to find cutting coordinates.
-
-.. |plot_stat_map| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_001.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html
- :scale: 50
-
-.. |plot_glass_brain| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_glass_brain_extensive_001.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_glass_brain_extensive.html
- :scale: 50
-
-.. |plot_connectome| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png
- :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html
- :scale: 50
-
-.. |plot_anat| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_003.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html
- :scale: 50
-
-.. |plot_roi| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_004.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html
- :scale: 50
-
-.. |plot_epi| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_005.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html
- :scale: 50
-
-.. |plot_prob_atlas| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_prob_atlas_003.png
- :target: ../auto_examples/manipulating_visualizing/plot_prob_atlas.html
- :scale: 50
-
-.. A temporary hack to avoid a sphinx bug
-.. |hack| raw:: html
-
-
-
-
-=================== =========================================================
-=================== =========================================================
-|plot_anat| :func:`plot_anat`
- |hack|
- Plotting an anatomical image
-
-|plot_epi| :func:`plot_epi`
- |hack|
- Plotting an EPI, or T2* image
-
-|plot_glass_brain| :func:`plot_glass_brain`
- |hack|
- Glass brain visualization. By default plots maximum
- intensity projection of the absolute values. To plot
- positive and negative values set plot_abs parameter to
- False.
-
-|plot_stat_map| :func:`plot_stat_map`
- |hack|
- Plotting a statistical map, like a T-map, a Z-map, or
- an ICA, with an optional background
-
-|plot_roi| :func:`plot_roi`
- |hack|
- Plotting ROIs, or a mask, with an optional background
-
-|plot_connectome| :func:`plot_connectome`
- |hack|
- Plotting a connectome
-
-|plot_prob_atlas| :func:`plot_prob_atlas`
- |hack|
- Plotting 4D probabilistic atlas maps
-
-**plot_img** :func:`plot_img`
- |hack|
- General-purpose function, with no specific presets
-=================== =========================================================
-
-
-.. warning:: **Opening too many figures without closing**
-
- Each call to a plotting function creates a new figure by default. When
- used in non-interactive settings, such as a script or a program, these
- are not displayed, but still accumulate and eventually lead to slowing
- the execution and running out of memory.
-
- To avoid this, you must close the plot as follow::
-
- >>> from nilearn import plotting
- >>> display = plotting.plot_stat_map(img) # doctest: +SKIP
- >>> display.close() # doctest: +SKIP
-
-.. seealso::
-
- :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_dim_plotting.py`
-
-Different display modes
-========================
-
-.. |plot_ortho| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_001.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 50
-
-.. |plot_z_many| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_002.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 30
-
-.. |plot_x| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_003.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 50
-
-.. |plot_x_small| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_004.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 50
-
-.. |plot_z_small| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_005.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 50
-
-.. |plot_xz| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_006.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 50
-
-.. |plot_yx| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_007.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 50
-
-.. |plot_yz| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_008.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 50
-
-
-================= =========================================================
-================= =========================================================
-|plot_ortho| `display_mode='ortho', cut_coords=(36, -27, 60)`
- |hack|
- Ortho slicer: 3 cuts along the x, y, z directions
-
-|plot_z_many| `display_mode='z', cut_coords=5`
- |hack|
- Cutting in the z direction, specifying the number of
- cuts
-
-|plot_x| `display_mode='x', cut_coords=(-36, 36)`
- |hack|
- Cutting in the x direction, specifying the exact
- cuts
-
-|plot_x_small| `display_mode='x', cut_coords=1`
- |hack|
- Cutting in the x direction, with only 1 cut, that is
- automatically positionned
-
-|plot_z_small| `display_mode='z', cut_coords=1, colorbar=False`
- |hack|
- Cutting in the z direction, with only 1 cut, that is
- automatically positionned
-
-|plot_xz| `display_mode='xz', cut_coords=(36, 60)`
- |hack|
- Cutting in the x and z direction, with cuts manually
- positionned
-
-|plot_yx| `display_mode='yx', cut_coords=(-27, 36)`
- |hack|
- Cutting in the y and x direction, with cuts manually
- positionned
-
-|plot_yz| `display_mode='yz', cut_coords=(-27, 60)`
- |hack|
- Cutting in the y and z direction, with cuts manually
- positionned
-
-
-================= =========================================================
-
-Adding overlays, edges and contours
-====================================
-
-To add overlays, contours, or edges, use the return value of the plotting
-functions. Indeed, these return a display object, such as the
-:class:`nilearn.plotting.displays.OrthoSlicer`. This object represents the
-plot, and has methods to add overlays, contours or edge maps::
-
- display = plotting.plot_epi(...)
-
-.. |plot_edges| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_009.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 50
-
-.. |plot_contours| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_010.png
- :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html
- :scale: 50
-
-================= =========================================================
-================= =========================================================
-|plot_edges| `display.add_edges(img)`
- |hack|
- Add a plot of the edges of `img`, where edges are
- extracted using a Canny edge-detection routine. This
- is typically useful to check registration. Note that
- `img` should have some visible sharp edges. Typically
- an EPI img does not, but a T1 does.
-
-|plot_contours| `display.add_contours(img, levels=[.5], colors='r')`
- |hack|
- Add a plot of the contours of `img`, where contours
- are computed for constant values, specified in
- 'levels'. This is typically useful to outline a mask,
- or ROI on top of another map.
- |hack|
- **Example:** :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_haxby_masks.py`
-
-
-**add_overlay** `display.add_overlay(img, cmap=plotting.cm.purple_green, threshold=3)`
- |hack|
- Add a new overlay on the existing figure
- |hack|
- **Example:** :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_overlay.py`
-
-
-================= =========================================================
-
-Displaying or saving to an image file
-=====================================
-
-To display the figure when running a script, you need to call
-:func:`nilearn.plotting.show`: (this is just an alias to
-:func:`matplotlib.pyplot.show`)::
-
- >>> from nilearn import plotting
- >>> plotting.show() # doctest: +SKIP
-
-The simplest way to output an image file from the plotting functions is
-to specify the `output_file` argument::
-
- >>> from nilearn import plotting
- >>> plotting.plot_stat_map(img, output_file='pretty_brain.png') # doctest: +SKIP
-
-In this case, the display is closed automatically and the plotting
-function returns None.
-
-|
-
-The display object returned by the plotting function has a savefig method
-that can be used to save the plot to an image file::
-
- >>> from nilearn import plotting
- >>> display = plotting.plot_stat_map(img) # doctest: +SKIP
- >>> display.savefig('pretty_brain.png') # doctest: +SKIP
- # Don't forget to close the display
- >>> display.close() # doctest: +SKIP
-
-
diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst
index 336d21e00c..a3b0f3856e 100644
--- a/doc/modules/reference.rst
+++ b/doc/modules/reference.rst
@@ -27,7 +27,7 @@ uses.
.. autosummary::
:toctree: generated/
:template: class.rst
-
+
ConnectivityMeasure
GroupSparseCovariance
GroupSparseCovarianceCV
@@ -41,7 +41,11 @@ uses.
:template: function.rst
sym_to_vec
+ sym_matrix_to_vec
+ vec_to_sym_matrix
group_sparse_covariance
+ cov_to_corr
+ prec_to_partial
.. _datasets_ref:
@@ -66,20 +70,38 @@ uses.
fetch_atlas_destrieux_2009
fetch_atlas_harvard_oxford
fetch_atlas_msdl
- fetch_atlas_power_2011
+ fetch_coords_power_2011
fetch_atlas_smith_2009
fetch_atlas_yeo_2011
fetch_atlas_aal
+ fetch_atlas_basc_multiscale_2015
+ fetch_atlas_allen_2011
+ fetch_atlas_pauli_2017
+ fetch_coords_dosenbach_2010
fetch_abide_pcp
fetch_adhd
fetch_haxby
- fetch_haxby_simple
fetch_icbm152_2009
+ fetch_icbm152_brain_gm_mask
+ fetch_localizer_button_task
fetch_localizer_contrasts
fetch_localizer_calculation_task
fetch_miyawaki2008
fetch_nyu_rest
+ fetch_surf_nki_enhanced
+ fetch_surf_fsaverage
+ fetch_atlas_surf_destrieux
+ fetch_atlas_talairach
fetch_oasis_vbm
+ fetch_megatrawls_netmats
+ fetch_cobre
+ fetch_neurovault
+ fetch_neurovault_ids
+ fetch_neurovault_auditory_computation_task
+ fetch_neurovault_motor_task
+ get_data_dirs
+ load_mni152_template
+ load_mni152_brain_mask
.. _decoding_ref:
@@ -139,14 +161,21 @@ uses.
:toctree: generated/
:template: function.rst
- crop_img
+ clean_img
+ concat_imgs
+ coord_transform
copy_img
+ crop_img
+ high_variance_confounds
index_img
iter_img
- high_variance_confounds
+ largest_connected_component_img
+ load_img
+ math_img
mean_img
new_img_like
resample_img
+ resample_to_img
reorder_img
smooth_img
swap_img_hemispheres
@@ -198,6 +227,8 @@ uses.
compute_epi_mask
compute_multi_epi_mask
+ compute_gray_matter_mask
+ compute_multi_gray_matter_mask
compute_background_mask
compute_multi_background_mask
intersect_masks
@@ -220,6 +251,7 @@ uses.
:template: function.rst
connected_regions
+ connected_label_regions
img_to_signals_labels
signals_to_img_labels
img_to_signals_maps
@@ -233,7 +265,8 @@ uses.
:toctree: generated/
:template: class.rst
- RegionExtractor
+ RegionExtractor
+ Parcellations
:mod:`nilearn.mass_univariate`: Mass-univariate analysis
@@ -257,6 +290,7 @@ uses.
.. _plotting_ref:
+
:mod:`nilearn.plotting`: Plotting brain data
================================================
@@ -276,14 +310,25 @@ uses.
find_cut_slices
find_xyz_cut_coords
+ find_parcellation_cut_coords
+ find_probabilistic_atlas_cut_coords
plot_anat
plot_img
plot_epi
+ plot_matrix
plot_roi
plot_stat_map
plot_glass_brain
plot_connectome
plot_prob_atlas
+ plot_surf
+ plot_surf_roi
+ plot_surf_stat_map
+ view_surf
+ view_img_on_surf
+ view_connectome
+ view_markers
+ view_stat_map
show
**Classes**:
@@ -320,4 +365,23 @@ uses.
high_variance_confounds
+:mod:`nilearn.surface`: Manipulating surface data
+===================================================
+
+.. automodule:: nilearn.surface
+ :no-members:
+ :no-inherited-members:
+
+.. No relevant user manual section yet.
+
+**Functions**:
+
+.. currentmodule:: nilearn.surface
+
+.. autosummary::
+ :toctree: generated/
+ :template: function.rst
+ load_surf_data
+ load_surf_mesh
+ vol_to_surf
diff --git a/doc/plotting/index.rst b/doc/plotting/index.rst
new file mode 100644
index 0000000000..3684fa9ad6
--- /dev/null
+++ b/doc/plotting/index.rst
@@ -0,0 +1,506 @@
+.. _plotting:
+
+======================
+Plotting brain images
+======================
+
+In this section, we detail the general tools to visualize
+neuroimaging volumes with nilearn.
+
+Nilearn comes with plotting function to display brain maps coming from
+Nifti-like images, in the :mod:`nilearn.plotting` module.
+
+.. contents:: **Contents**
+ :local:
+ :depth: 1
+
+.. topic:: **Code examples**
+
+ Nilearn has a whole section of the example gallery on plotting.
+
+ A small tour of the plotting functions can be found in the example
+ :ref:`sphx_glr_auto_examples_01_plotting_plot_demo_plotting.py`.
+
+ Finally, note that, as always in the nilearn documentation, clicking
+ on a figure will take you to the code that generates it.
+
+.. currentmodule:: nilearn.plotting
+
+Different plotting functions
+=============================
+
+Nilearn has a set of plotting functions to plot brain volumes that are
+fined tuned to specific applications. Amongst other things, they use
+different heuristics to find cutting coordinates.
+
+.. |plot_stat_map| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_001.png
+ :target: ../auto_examples/01_plotting/plot_demo_plotting.html
+ :scale: 50
+
+.. |plot_glass_brain| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_extensive_001.png
+ :target: ../auto_examples/01_plotting/plot_demo_glass_brain_extensive.html
+ :scale: 50
+
+.. |plot_connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_adhd_spheres_003.png
+ :target: ../auto_examples/03_connectivity/plot_adhd_spheres.html
+ :scale: 50
+
+.. |plot_anat| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_003.png
+ :target: ../auto_examples/01_plotting/plot_demo_plotting.html
+ :scale: 50
+
+.. |plot_roi| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_004.png
+ :target: ../auto_examples/01_plotting/plot_demo_plotting.html
+ :scale: 50
+
+.. |plot_epi| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_005.png
+ :target: ../auto_examples/01_plotting/plot_demo_plotting.html
+ :scale: 50
+
+.. |plot_prob_atlas| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_prob_atlas_003.png
+ :target: ../auto_examples/01_plotting/plot_prob_atlas.html
+ :scale: 50
+
+.. A temporary hack to avoid a sphinx bug
+.. |hack| raw:: html
+
+
+
+
+=================== =========================================================
+=================== =========================================================
+|plot_anat| :func:`plot_anat`
+ |hack|
+ Plotting an anatomical image
+
+|plot_epi| :func:`plot_epi`
+ |hack|
+ Plotting an EPI, or T2* image
+
+|plot_glass_brain| :func:`plot_glass_brain`
+ |hack|
+ Glass brain visualization. By default plots maximum
+ intensity projection of the absolute values. To plot
+ positive and negative values set plot_abs parameter to
+ False.
+
+|plot_stat_map| :func:`plot_stat_map`
+ |hack|
+ Plotting a statistical map, like a T-map, a Z-map, or
+ an ICA, with an optional background
+
+|plot_roi| :func:`plot_roi`
+ |hack|
+ Plotting ROIs, or a mask, with an optional background
+
+|plot_connectome| :func:`plot_connectome`
+ |hack|
+ Plotting a connectome
+
+ Functions for automatic extraction of coords based on
+ brain parcellations useful for :func:`plot_connectome`
+ are demonstrated in
+ **Example:** :ref:`sphx_glr_auto_examples_03_connectivity_plot_atlas_comparison.py`
+
+|plot_prob_atlas| :func:`plot_prob_atlas`
+ |hack|
+ Plotting 4D probabilistic atlas maps
+
+**plot_img** :func:`plot_img`
+ |hack|
+ General-purpose function, with no specific presets
+=================== =========================================================
+
+
+.. warning:: **Opening too many figures without closing**
+
+ Each call to a plotting function creates a new figure by default. When
+ used in non-interactive settings, such as a script or a program, these
+ are not displayed, but still accumulate and eventually lead to slowing
+ the execution and running out of memory.
+
+ To avoid this, you must close the plot as follow::
+
+ >>> from nilearn import plotting
+ >>> display = plotting.plot_stat_map(img) # doctest: +SKIP
+ >>> display.close() # doctest: +SKIP
+
+|
+
+.. seealso::
+
+ :ref:`sphx_glr_auto_examples_01_plotting_plot_dim_plotting.py`
+
+Different display modes
+========================
+
+.. |plot_ortho| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_001.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_z_many| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_002.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 30
+
+.. |plot_x| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_003.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_y_small| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_004.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_z_small| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_005.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_xz| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_006.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_yx| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_007.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_yz| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_008.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_lzr| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_extensive_006.png
+ :target: ../auto_examples/01_plotting/plot_demo_glass_brain_extensive.html
+ :scale: 50
+
+.. |plot_lyrz| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_extensive_007.png
+ :target: ../auto_examples/01_plotting/plot_demo_glass_brain_extensive.html
+ :scale: 50
+
+
+================= =========================================================
+================= =========================================================
+|plot_ortho| `display_mode='ortho', cut_coords=[36, -27, 60]`
+ |hack|
+ Ortho slicer: 3 cuts along the x, y, z directions
+
+|plot_z_many| `display_mode='z', cut_coords=5`
+ |hack|
+ Cutting in the z direction, specifying the number of
+ cuts
+
+|plot_x| `display_mode='x', cut_coords=[-36, 36]`
+ |hack|
+ Cutting in the x direction, specifying the exact
+ cuts
+
+|plot_y_small| `display_mode='y', cut_coords=1`
+ |hack|
+ Cutting in the y direction, with only 1 cut, that is
+ automatically positionned
+
+|plot_z_small| `display_mode='z', cut_coords=1, colorbar=False`
+ |hack|
+ Cutting in the z direction, with only 1 cut, that is
+ automatically positionned
+
+|plot_xz| `display_mode='xz', cut_coords=[36, 60]`
+ |hack|
+ Cutting in the x and z direction, with cuts manually
+ positionned
+
+|plot_yx| `display_mode='yx', cut_coords=[-27, 36]`
+ |hack|
+ Cutting in the y and x direction, with cuts manually
+ positionned
+
+|plot_yz| `display_mode='yz', cut_coords=[-27, 60]`
+ |hack|
+ Cutting in the y and z direction, with cuts manually
+ positionned
+
+|plot_lzr| `Glass brain display_mode='lzr'`
+ |hack|
+ Glass brain and Connectome provide additional display modes
+ due to the possibility of doing hemispheric projections.
+ Check out: 'l', 'r', 'lr', 'lzr', 'lyr', 'lzry', 'lyrz'.
+
+|plot_lyrz| `Glass brain display_mode='lyrz'`
+ |hack|
+ Glass brain and Connectome provide additional display modes
+ due to the possibility of doing hemispheric projections.
+ Check out: 'l', 'r', 'lr', 'lzr', 'lyr', 'lzry', 'lyrz'.
+
+
+================= =========================================================
+
+Available Colormaps
+===================
+
+Nilearn plotting library ships with a set of extra colormaps, as seen in the
+image below
+
+.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_colormaps_001.png
+ :target: ../auto_examples/01_plotting/plot_colormaps.html
+ :scale: 50
+
+These colormaps can be used as any other matplotlib colormap.
+
+.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_colormaps_002.png
+ :target: ../auto_examples/01_plotting/plot_colormaps.html
+ :scale: 50
+
+
+.. _display_modules:
+
+Adding overlays, edges, contours, contour fillings and markers
+==============================================================
+
+To add overlays, contours, or edges, use the return value of the plotting
+functions. Indeed, these return a display object, such as the
+:class:`nilearn.plotting.displays.OrthoSlicer`. This object represents the
+plot, and has methods to add overlays, contours or edge maps::
+
+ display = plotting.plot_epi(...)
+
+.. |plot_edges| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_009.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_contours| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_010.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_fill| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_011.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_markers| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_012.png
+ :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html
+ :scale: 50
+
+.. |plot_overlay| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_overlay_002.png
+ :target: ../auto_examples/01_plotting/plot_overlay.html
+ :scale: 50
+
+================= =========================================================
+================= =========================================================
+|plot_edges| `display.add_edges(img)`
+ |hack|
+ Add a plot of the edges of `img`, where edges are
+ extracted using a Canny edge-detection routine. This
+ is typically useful to check registration. Note that
+ `img` should have some visible sharp edges. Typically
+ an EPI img does not, but a T1 does.
+
+|plot_contours| `display.add_contours(img, levels=[.5], colors='r')`
+ |hack|
+ Add a plot of the contours of `img`, where contours
+ are computed for constant values, specified in
+ 'levels'. This is typically useful to outline a mask,
+ or ROI on top of another map.
+ |hack|
+ **Example:** :ref:`sphx_glr_auto_examples_01_plotting_plot_haxby_masks.py`
+
+|plot_fill| `display.add_contours(img, filled=True, alpha=0.7, levels=[0.5], colors='b')`
+ |hack|
+ Add a plot of `img` with contours filled with colors
+
+|plot_overlay| `display.add_overlay(img, cmap=plotting.cm.purple_green, threshold=3)`
+ |hack|
+ Add a new overlay on the existing figure
+ |hack|
+ **Example:** :ref:`sphx_glr_auto_examples_01_plotting_plot_overlay.py`
+
+|plot_markers| `display.add_markers(coords, marker_color='y', marker_size=100)`
+ |hack|
+ Add seed based MNI coordinates as spheres on top of
+ statistical image or EPI image. This is useful for seed
+ based regions specific interpretation of brain images.
+ |hack|
+ **Example:** :ref:`sphx_glr_auto_examples_03_connectivity_plot_seed_to_voxel_correlation.py`
+
+================= =========================================================
+
+Displaying or saving to an image file
+=====================================
+
+To display the figure when running a script, you need to call
+:func:`nilearn.plotting.show`: (this is just an alias to
+:func:`matplotlib.pyplot.show`)::
+
+ >>> from nilearn import plotting
+ >>> plotting.show() # doctest: +SKIP
+
+The simplest way to output an image file from the plotting functions is
+to specify the `output_file` argument::
+
+ >>> from nilearn import plotting
+ >>> plotting.plot_stat_map(img, output_file='pretty_brain.png') # doctest: +SKIP
+
+In this case, the display is closed automatically and the plotting
+function returns None.
+
+|
+
+The display object returned by the plotting function has a savefig method
+that can be used to save the plot to an image file::
+
+ >>> from nilearn import plotting
+ >>> display = plotting.plot_stat_map(img) # doctest: +SKIP
+ >>> display.savefig('pretty_brain.png') # doctest: +SKIP
+ # Don't forget to close the display
+ >>> display.close() # doctest: +SKIP
+
+.. _surface-plotting:
+
+Surface plotting
+================
+
+Plotting functions required to plot surface data or statistical maps
+on a brain surface.
+
+.. versionadded:: 0.3
+
+.. |plot_surf_roi| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_surf_atlas_001.png
+ :target: ../auto_examples/01_plotting/plot_surf_atlas.html
+ :scale: 50
+
+.. |plot_surf_stat_map| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_surf_stat_map_001.png
+ :target: ../auto_examples/01_plotting/plot_surf_stat_map.html
+ :scale: 50
+
+===================== ===================================================================
+===================== ===================================================================
+|plot_surf_roi| :func:`plot_surf_roi`
+ |hack|
+ Plotting surface atlases on a brain surface
+ |hack|
+ **Example:**
+ :ref:`sphx_glr_auto_examples_01_plotting_plot_surf_atlas.py`
+
+|plot_surf_stat_map| :func:`plot_surf_stat_map`
+ |hack|
+ Plotting statistical maps onto a brain surface
+ |hack|
+ **Example:**
+ :ref:`sphx_glr_auto_examples_01_plotting_plot_surf_stat_map.py`
+
+===================== ===================================================================
+
+
+.. _interactive-plotting:
+
+Interactive plots
+=================
+
+Nilearn also has functions for making interactive plots that can be
+seen in a web browser.
+
+.. versionadded:: 0.5
+
+ Interactive plotting is new in nilearn 0.5
+
+For 3D surface plots of statistical maps or surface atlases, use
+:func:`view_img_on_surf` and :func:`view_surf`. Both produce a 3D plot on the
+cortical surface. The difference is that :func:`view_surf` takes as input a
+surface map and a cortical mesh, whereas :func:`view_img_on_surf` takes as input
+a volume statistical map, and projects it on the cortical surface before making
+the plot.
+
+For 3D plots of a connectome, use :func:`view_connectome`. To see only markers,
+use :func:`view_markers`.
+
+
+.. _interactive-surface-plotting:
+
+3D Plots of statistical maps or atlases on the cortical surface
+---------------------------------------------------------------
+
+:func:`view_img_on_surf`: Surface plot using a 3D statistical map::
+
+ >>> from nilearn import plotting, datasets # doctest: +SKIP
+ >>> img = datasets.fetch_localizer_button_task()['tmaps'][0] # doctest: +SKIP
+ >>> view = plotting.view_img_on_surf(img, threshold='90%', surf_mesh='fsaverage') # doctest: +SKIP
+
+If you are running a notebook, displaying ``view`` will embed an interactive
+plot (this is the case for all interactive plots produced by nilearn's "view"
+functions):
+
+.. image:: ../images/plotly_surface_plot_notebook_screenshot.png
+
+If you are not using a notebook, you can open the plot in a browser like this::
+
+ >>> view.open_in_browser() # doctest: +SKIP
+
+This will open this 3D plot in your web browser:
+
+.. image:: ../images/plotly_surface_plot.png
+
+
+Or you can save it to an html file::
+
+ >>> view.save_as_html("surface_plot.html") # doctest: +SKIP
+
+
+:func:`view_surf`: Surface plot using a surface map and a cortical mesh::
+
+ >>> from nilearn import plotting, datasets # doctest: +SKIP
+ >>> destrieux = datasets.fetch_atlas_surf_destrieux() # doctest: +SKIP
+ >>> fsaverage = datasets.fetch_surf_fsaverage() # doctest: +SKIP
+ >>> view = plotting.view_surf(fsaverage['infl_left'], destrieux['map_left'], # doctest: +SKIP
+ ... cmap='gist_ncar', symmetric_cmap=False) # doctest: +SKIP
+ ...
+ >>> view.open_in_browser() # doctest: +SKIP
+
+
+.. image:: ../images/plotly_surface_atlas_plot.png
+
+.. _interactive-connectome-plotting:
+
+3D Plots of connectomes
+-----------------------
+
+:func:`view_connectome`: 3D plot of a connectome::
+
+ >>> view = plotting.view_connectome(correlation_matrix, coords, threshold='90%') # doctest: +SKIP
+ >>> view.open_in_browser() # doctest: +SKIP
+
+
+.. image:: ../images/plotly_connectome_plot.png
+
+
+.. _interactive-markers-plotting:
+
+3D Plots of markers
+-------------------
+
+:func:`view_markers`: showing markers (e.g. seed locations) in 3D::
+
+ >>> from nilearn import plotting # doctest: +SKIP
+ >>> dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)] # doctest: +SKIP
+ >>> view = plotting.view_markers( # doctest: +SKIP
+ >>> dmn_coords, ['red', 'cyan', 'magenta', 'orange'], marker_size=10) # doctest: +SKIP
+ >>> view.open_in_browser() # doctest: +SKIP
+
+
+
+.. image:: ../images/plotly_markers_plot.png
+
+
+.. _interactive-stat-map-plotting:
+
+Interactive visualization of statistical map slices
+---------------------------------------------------
+
+:func:`view_stat_map`: open stat map in a Papaya viewer (https://github.com/rii-mango/Papaya)::
+
+ >>> from nilearn import plotting, datasets # doctest: +SKIP
+ >>> img = datasets.fetch_localizer_button_task()['tmaps'][0] # doctest: +SKIP
+ >>> view = plotting.view_stat_map(img, threshold=2, vmax=4) # doctest: +SKIP
+
+in a Jupyter notebook, you can view the image like this:
+
+.. image:: ../images/papaya_stat_map_plot_screenshot_notebook.png
+
+Or you can open a viewer in your web browser if you are not in the
+notebook::
+
+ >>> view.open_in_browser() # doctest: +SKIP
diff --git a/doc/sphinxext/numpydoc/__init__.py b/doc/sphinxext/numpydoc/__init__.py
new file mode 100644
index 0000000000..0fce2cf747
--- /dev/null
+++ b/doc/sphinxext/numpydoc/__init__.py
@@ -0,0 +1,3 @@
+from __future__ import division, absolute_import, print_function
+
+from .numpydoc import setup
diff --git a/doc/sphinxext/numpydoc/comment_eater.py b/doc/sphinxext/numpydoc/comment_eater.py
new file mode 100644
index 0000000000..8cddd3305f
--- /dev/null
+++ b/doc/sphinxext/numpydoc/comment_eater.py
@@ -0,0 +1,169 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from io import StringIO
+
+import compiler
+import inspect
+import textwrap
+import tokenize
+
+from .compiler_unparse import unparse
+
+
+class Comment(object):
+ """ A comment block.
+ """
+ is_comment = True
+ def __init__(self, start_lineno, end_lineno, text):
+ # int : The first line number in the block. 1-indexed.
+ self.start_lineno = start_lineno
+ # int : The last line number. Inclusive!
+ self.end_lineno = end_lineno
+ # str : The text block including '#' character but not any leading spaces.
+ self.text = text
+
+ def add(self, string, start, end, line):
+ """ Add a new comment line.
+ """
+ self.start_lineno = min(self.start_lineno, start[0])
+ self.end_lineno = max(self.end_lineno, end[0])
+ self.text += string
+
+ def __repr__(self):
+ return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
+ self.end_lineno, self.text)
+
+
+class NonComment(object):
+ """ A non-comment block of code.
+ """
+ is_comment = False
+ def __init__(self, start_lineno, end_lineno):
+ self.start_lineno = start_lineno
+ self.end_lineno = end_lineno
+
+ def add(self, string, start, end, line):
+ """ Add lines to the block.
+ """
+ if string.strip():
+ # Only add if not entirely whitespace.
+ self.start_lineno = min(self.start_lineno, start[0])
+ self.end_lineno = max(self.end_lineno, end[0])
+
+ def __repr__(self):
+ return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
+ self.end_lineno)
+
+
+class CommentBlocker(object):
+ """ Pull out contiguous comment blocks.
+ """
+ def __init__(self):
+ # Start with a dummy.
+ self.current_block = NonComment(0, 0)
+
+ # All of the blocks seen so far.
+ self.blocks = []
+
+ # The index mapping lines of code to their associated comment blocks.
+ self.index = {}
+
+ def process_file(self, file):
+ """ Process a file object.
+ """
+ if sys.version_info[0] >= 3:
+ nxt = file.__next__
+ else:
+ nxt = file.next
+ for token in tokenize.generate_tokens(nxt):
+ self.process_token(*token)
+ self.make_index()
+
+ def process_token(self, kind, string, start, end, line):
+ """ Process a single token.
+ """
+ if self.current_block.is_comment:
+ if kind == tokenize.COMMENT:
+ self.current_block.add(string, start, end, line)
+ else:
+ self.new_noncomment(start[0], end[0])
+ else:
+ if kind == tokenize.COMMENT:
+ self.new_comment(string, start, end, line)
+ else:
+ self.current_block.add(string, start, end, line)
+
+ def new_noncomment(self, start_lineno, end_lineno):
+ """ We are transitioning from a noncomment to a comment.
+ """
+ block = NonComment(start_lineno, end_lineno)
+ self.blocks.append(block)
+ self.current_block = block
+
+ def new_comment(self, string, start, end, line):
+ """ Possibly add a new comment.
+
+ Only adds a new comment if this comment is the only thing on the line.
+ Otherwise, it extends the noncomment block.
+ """
+ prefix = line[:start[1]]
+ if prefix.strip():
+ # Oops! Trailing comment, not a comment block.
+ self.current_block.add(string, start, end, line)
+ else:
+ # A comment block.
+ block = Comment(start[0], end[0], string)
+ self.blocks.append(block)
+ self.current_block = block
+
+ def make_index(self):
+ """ Make the index mapping lines of actual code to their associated
+ prefix comments.
+ """
+ for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
+ if not block.is_comment:
+ self.index[block.start_lineno] = prev
+
+ def search_for_comment(self, lineno, default=None):
+ """ Find the comment block just before the given line number.
+
+ Returns None (or the specified default) if there is no such block.
+ """
+ if not self.index:
+ self.make_index()
+ block = self.index.get(lineno, None)
+ text = getattr(block, 'text', default)
+ return text
+
+
+def strip_comment_marker(text):
+ """ Strip # markers at the front of a block of comment text.
+ """
+ lines = []
+ for line in text.splitlines():
+ lines.append(line.lstrip('#'))
+ text = textwrap.dedent('\n'.join(lines))
+ return text
+
+
+def get_class_traits(klass):
+ """ Yield all of the documentation for trait definitions on a class object.
+ """
+ # FIXME: gracefully handle errors here or in the caller?
+ source = inspect.getsource(klass)
+ cb = CommentBlocker()
+ cb.process_file(StringIO(source))
+ mod_ast = compiler.parse(source)
+ class_ast = mod_ast.node.nodes[0]
+ for node in class_ast.code.nodes:
+ # FIXME: handle other kinds of assignments?
+ if isinstance(node, compiler.ast.Assign):
+ name = node.nodes[0].name
+ rhs = unparse(node.expr).strip()
+ doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
+ yield name, rhs, doc
+
diff --git a/doc/sphinxext/numpydoc/compiler_unparse.py b/doc/sphinxext/numpydoc/compiler_unparse.py
new file mode 100644
index 0000000000..8933a83db3
--- /dev/null
+++ b/doc/sphinxext/numpydoc/compiler_unparse.py
@@ -0,0 +1,865 @@
+""" Turn compiler.ast structures back into executable python code.
+
+ The unparse method takes a compiler.ast tree and transforms it back into
+ valid python code. It is incomplete and currently only works for
+ import statements, function calls, function definitions, assignments, and
+ basic expressions.
+
+ Inspired by python-2.5-svn/Demo/parser/unparse.py
+
+ fixme: We may want to move to using _ast trees because the compiler for
+ them is about 6 times faster than compiler.compile.
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+def unparse(ast, single_line_functions=False):
+ s = StringIO()
+ UnparseCompilerAst(ast, s, single_line_functions)
+ return s.getvalue().lstrip()
+
+op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
+ 'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
+
+class UnparseCompilerAst:
+ """ Methods in this class recursively traverse an AST and
+ output source code for the abstract syntax; original formatting
+ is disregarged.
+ """
+
+ #########################################################################
+ # object interface.
+ #########################################################################
+
+ def __init__(self, tree, file = sys.stdout, single_line_functions=False):
+ """ Unparser(tree, file=sys.stdout) -> None.
+
+ Print the source for tree to file.
+ """
+ self.f = file
+ self._single_func = single_line_functions
+ self._do_indent = True
+ self._indent = 0
+ self._dispatch(tree)
+ self._write("\n")
+ self.f.flush()
+
+ #########################################################################
+ # Unparser private interface.
+ #########################################################################
+
+ ### format, output, and dispatch methods ################################
+
+ def _fill(self, text = ""):
+ "Indent a piece of text, according to the current indentation level"
+ if self._do_indent:
+ self._write("\n"+" "*self._indent + text)
+ else:
+ self._write(text)
+
+ def _write(self, text):
+ "Append a piece of text to the current line."
+ self.f.write(text)
+
+ def _enter(self):
+ "Print ':', and increase the indentation."
+ self._write(": ")
+ self._indent += 1
+
+ def _leave(self):
+ "Decrease the indentation level."
+ self._indent -= 1
+
+ def _dispatch(self, tree):
+ "_dispatcher function, _dispatching tree type T to method _T."
+ if isinstance(tree, list):
+ for t in tree:
+ self._dispatch(t)
+ return
+ meth = getattr(self, "_"+tree.__class__.__name__)
+ if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
+ return
+ meth(tree)
+
+
+ #########################################################################
+ # compiler.ast unparsing methods.
+ #
+ # There should be one method per concrete grammar type. They are
+ # organized in alphabetical order.
+ #########################################################################
+
+ def _Add(self, t):
+ self.__binary_op(t, '+')
+
+ def _And(self, t):
+ self._write(" (")
+ for i, node in enumerate(t.nodes):
+ self._dispatch(node)
+ if i != len(t.nodes)-1:
+ self._write(") and (")
+ self._write(")")
+
+ def _AssAttr(self, t):
+ """ Handle assigning an attribute of an object
+ """
+ self._dispatch(t.expr)
+ self._write('.'+t.attrname)
+
+ def _Assign(self, t):
+ """ Expression Assignment such as "a = 1".
+
+ This only handles assignment in expressions. Keyword assignment
+ is handled separately.
+ """
+ self._fill()
+ for target in t.nodes:
+ self._dispatch(target)
+ self._write(" = ")
+ self._dispatch(t.expr)
+ if not self._do_indent:
+ self._write('; ')
+
+ def _AssName(self, t):
+ """ Name on left hand side of expression.
+
+ Treat just like a name on the right side of an expression.
+ """
+ self._Name(t)
+
+ def _AssTuple(self, t):
+ """ Tuple on left hand side of an expression.
+ """
+
+ # _write each elements, separated by a comma.
+ for element in t.nodes[:-1]:
+ self._dispatch(element)
+ self._write(", ")
+
+ # Handle the last one without writing comma
+ last_element = t.nodes[-1]
+ self._dispatch(last_element)
+
+ def _AugAssign(self, t):
+ """ +=,-=,*=,/=,**=, etc. operations
+ """
+
+ self._fill()
+ self._dispatch(t.node)
+ self._write(' '+t.op+' ')
+ self._dispatch(t.expr)
+ if not self._do_indent:
+ self._write(';')
+
+ def _Bitand(self, t):
+ """ Bit and operation.
+ """
+
+ for i, node in enumerate(t.nodes):
+ self._write("(")
+ self._dispatch(node)
+ self._write(")")
+ if i != len(t.nodes)-1:
+ self._write(" & ")
+
+ def _Bitor(self, t):
+ """ Bit or operation
+ """
+
+ for i, node in enumerate(t.nodes):
+ self._write("(")
+ self._dispatch(node)
+ self._write(")")
+ if i != len(t.nodes)-1:
+ self._write(" | ")
+
+ def _CallFunc(self, t):
+ """ Function call.
+ """
+ self._dispatch(t.node)
+ self._write("(")
+ comma = False
+ for e in t.args:
+ if comma: self._write(", ")
+ else: comma = True
+ self._dispatch(e)
+ if t.star_args:
+ if comma: self._write(", ")
+ else: comma = True
+ self._write("*")
+ self._dispatch(t.star_args)
+ if t.dstar_args:
+ if comma: self._write(", ")
+ else: comma = True
+ self._write("**")
+ self._dispatch(t.dstar_args)
+ self._write(")")
+
+ def _Compare(self, t):
+ self._dispatch(t.expr)
+ for op, expr in t.ops:
+ self._write(" " + op + " ")
+ self._dispatch(expr)
+
+ def _Const(self, t):
+ """ A constant value such as an integer value, 3, or a string, "hello".
+ """
+ self._dispatch(t.value)
+
+ def _Decorators(self, t):
+ """ Handle function decorators (eg. @has_units)
+ """
+ for node in t.nodes:
+ self._dispatch(node)
+
+ def _Dict(self, t):
+ self._write("{")
+ for i, (k, v) in enumerate(t.items):
+ self._dispatch(k)
+ self._write(": ")
+ self._dispatch(v)
+ if i < len(t.items)-1:
+ self._write(", ")
+ self._write("}")
+
+ def _Discard(self, t):
+ """ Node for when return value is ignored such as in "foo(a)".
+ """
+ self._fill()
+ self._dispatch(t.expr)
+
+ def _Div(self, t):
+ self.__binary_op(t, '/')
+
+ def _Ellipsis(self, t):
+ self._write("...")
+
+ def _From(self, t):
+ """ Handle "from xyz import foo, bar as baz".
+ """
+ # fixme: Are From and ImportFrom handled differently?
+ self._fill("from ")
+ self._write(t.modname)
+ self._write(" import ")
+ for i, (name,asname) in enumerate(t.names):
+ if i != 0:
+ self._write(", ")
+ self._write(name)
+ if asname is not None:
+ self._write(" as "+asname)
+
+ def _Function(self, t):
+ """ Handle function definitions
+ """
+ if t.decorators is not None:
+ self._fill("@")
+ self._dispatch(t.decorators)
+ self._fill("def "+t.name + "(")
+ defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
+ for i, arg in enumerate(zip(t.argnames, defaults)):
+ self._write(arg[0])
+ if arg[1] is not None:
+ self._write('=')
+ self._dispatch(arg[1])
+ if i < len(t.argnames)-1:
+ self._write(', ')
+ self._write(")")
+ if self._single_func:
+ self._do_indent = False
+ self._enter()
+ self._dispatch(t.code)
+ self._leave()
+ self._do_indent = True
+
+ def _Getattr(self, t):
+ """ Handle getting an attribute of an object
+ """
+ if isinstance(t.expr, (Div, Mul, Sub, Add)):
+ self._write('(')
+ self._dispatch(t.expr)
+ self._write(')')
+ else:
+ self._dispatch(t.expr)
+
+ self._write('.'+t.attrname)
+
+ def _If(self, t):
+ self._fill()
+
+ for i, (compare,code) in enumerate(t.tests):
+ if i == 0:
+ self._write("if ")
+ else:
+ self._write("elif ")
+ self._dispatch(compare)
+ self._enter()
+ self._fill()
+ self._dispatch(code)
+ self._leave()
+ self._write("\n")
+
+ if t.else_ is not None:
+ self._write("else")
+ self._enter()
+ self._fill()
+ self._dispatch(t.else_)
+ self._leave()
+ self._write("\n")
+
+ def _IfExp(self, t):
+ self._dispatch(t.then)
+ self._write(" if ")
+ self._dispatch(t.test)
+
+ if t.else_ is not None:
+ self._write(" else (")
+ self._dispatch(t.else_)
+ self._write(")")
+
+ def _Import(self, t):
+ """ Handle "import xyz.foo".
+ """
+ self._fill("import ")
+
+ for i, (name,asname) in enumerate(t.names):
+ if i != 0:
+ self._write(", ")
+ self._write(name)
+ if asname is not None:
+ self._write(" as "+asname)
+
+ def _Keyword(self, t):
+ """ Keyword value assignment within function calls and definitions.
+ """
+ self._write(t.name)
+ self._write("=")
+ self._dispatch(t.expr)
+
+ def _List(self, t):
+ self._write("[")
+ for i,node in enumerate(t.nodes):
+ self._dispatch(node)
+ if i < len(t.nodes)-1:
+ self._write(", ")
+ self._write("]")
+
+ def _Module(self, t):
+ if t.doc is not None:
+ self._dispatch(t.doc)
+ self._dispatch(t.node)
+
+ def _Mul(self, t):
+ self.__binary_op(t, '*')
+
+ def _Name(self, t):
+ self._write(t.name)
+
+ def _NoneType(self, t):
+ self._write("None")
+
+ def _Not(self, t):
+ self._write('not (')
+ self._dispatch(t.expr)
+ self._write(')')
+
+ def _Or(self, t):
+ self._write(" (")
+ for i, node in enumerate(t.nodes):
+ self._dispatch(node)
+ if i != len(t.nodes)-1:
+ self._write(") or (")
+ self._write(")")
+
+ def _Pass(self, t):
+ self._write("pass\n")
+
+ def _Printnl(self, t):
+ self._fill("print ")
+ if t.dest:
+ self._write(">> ")
+ self._dispatch(t.dest)
+ self._write(", ")
+ comma = False
+ for node in t.nodes:
+ if comma: self._write(', ')
+ else: comma = True
+ self._dispatch(node)
+
+ def _Power(self, t):
+ self.__binary_op(t, '**')
+
+ def _Return(self, t):
+ self._fill("return ")
+ if t.value:
+ if isinstance(t.value, Tuple):
+ text = ', '.join([ name.name for name in t.value.asList() ])
+ self._write(text)
+ else:
+ self._dispatch(t.value)
+ if not self._do_indent:
+ self._write('; ')
+
+ def _Slice(self, t):
+ self._dispatch(t.expr)
+ self._write("[")
+ if t.lower:
+ self._dispatch(t.lower)
+ self._write(":")
+ if t.upper:
+ self._dispatch(t.upper)
+ #if t.step:
+ # self._write(":")
+ # self._dispatch(t.step)
+ self._write("]")
+
+ def _Sliceobj(self, t):
+ for i, node in enumerate(t.nodes):
+ if i != 0:
+ self._write(":")
+ if not (isinstance(node, Const) and node.value is None):
+ self._dispatch(node)
+
+ def _Stmt(self, tree):
+ for node in tree.nodes:
+ self._dispatch(node)
+
+ def _Sub(self, t):
+ self.__binary_op(t, '-')
+
+ def _Subscript(self, t):
+ self._dispatch(t.expr)
+ self._write("[")
+ for i, value in enumerate(t.subs):
+ if i != 0:
+ self._write(",")
+ self._dispatch(value)
+ self._write("]")
+
+ def _TryExcept(self, t):
+ self._fill("try")
+ self._enter()
+ self._dispatch(t.body)
+ self._leave()
+
+ for handler in t.handlers:
+ self._fill('except ')
+ self._dispatch(handler[0])
+ if handler[1] is not None:
+ self._write(', ')
+ self._dispatch(handler[1])
+ self._enter()
+ self._dispatch(handler[2])
+ self._leave()
+
+ if t.else_:
+ self._fill("else")
+ self._enter()
+ self._dispatch(t.else_)
+ self._leave()
+
+ def _Tuple(self, t):
+
+ if not t.nodes:
+ # Empty tuple.
+ self._write("()")
+ else:
+ self._write("(")
+
+ # _write each elements, separated by a comma.
+ for element in t.nodes[:-1]:
+ self._dispatch(element)
+ self._write(", ")
+
+ # Handle the last one without writing comma
+ last_element = t.nodes[-1]
+ self._dispatch(last_element)
+
+ self._write(")")
+
+ def _UnaryAdd(self, t):
+ self._write("+")
+ self._dispatch(t.expr)
+
+ def _UnarySub(self, t):
+ self._write("-")
+ self._dispatch(t.expr)
+
+ def _With(self, t):
+ self._fill('with ')
+ self._dispatch(t.expr)
+ if t.vars:
+ self._write(' as ')
+ self._dispatch(t.vars.name)
+ self._enter()
+ self._dispatch(t.body)
+ self._leave()
+ self._write('\n')
+
+ def _int(self, t):
+ self._write(repr(t))
+
+ def __binary_op(self, t, symbol):
+ # Check if parenthesis are needed on left side and then dispatch
+ has_paren = False
+ left_class = str(t.left.__class__)
+ if (left_class in op_precedence.keys() and
+ op_precedence[left_class] < op_precedence[str(t.__class__)]):
+ has_paren = True
+ if has_paren:
+ self._write('(')
+ self._dispatch(t.left)
+ if has_paren:
+ self._write(')')
+ # Write the appropriate symbol for operator
+ self._write(symbol)
+ # Check if parenthesis are needed on the right side and then dispatch
+ has_paren = False
+ right_class = str(t.right.__class__)
+ if (right_class in op_precedence.keys() and
+ op_precedence[right_class] < op_precedence[str(t.__class__)]):
+ has_paren = True
+ if has_paren:
+ self._write('(')
+ self._dispatch(t.right)
+ if has_paren:
+ self._write(')')
+
+ def _float(self, t):
+ # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
+ # We prefer str here.
+ self._write(str(t))
+
+ def _str(self, t):
+ self._write(repr(t))
+
+ def _tuple(self, t):
+ self._write(str(t))
+
+ #########################################################################
+ # These are the methods from the _ast modules unparse.
+ #
+ # As our needs to handle more advanced code increase, we may want to
+ # modify some of the methods below so that they work for compiler.ast.
+ #########################################################################
+
+# # stmt
+# def _Expr(self, tree):
+# self._fill()
+# self._dispatch(tree.value)
+#
+# def _Import(self, t):
+# self._fill("import ")
+# first = True
+# for a in t.names:
+# if first:
+# first = False
+# else:
+# self._write(", ")
+# self._write(a.name)
+# if a.asname:
+# self._write(" as "+a.asname)
+#
+## def _ImportFrom(self, t):
+## self._fill("from ")
+## self._write(t.module)
+## self._write(" import ")
+## for i, a in enumerate(t.names):
+## if i == 0:
+## self._write(", ")
+## self._write(a.name)
+## if a.asname:
+## self._write(" as "+a.asname)
+## # XXX(jpe) what is level for?
+##
+#
+# def _Break(self, t):
+# self._fill("break")
+#
+# def _Continue(self, t):
+# self._fill("continue")
+#
+# def _Delete(self, t):
+# self._fill("del ")
+# self._dispatch(t.targets)
+#
+# def _Assert(self, t):
+# self._fill("assert ")
+# self._dispatch(t.test)
+# if t.msg:
+# self._write(", ")
+# self._dispatch(t.msg)
+#
+# def _Exec(self, t):
+# self._fill("exec ")
+# self._dispatch(t.body)
+# if t.globals:
+# self._write(" in ")
+# self._dispatch(t.globals)
+# if t.locals:
+# self._write(", ")
+# self._dispatch(t.locals)
+#
+# def _Print(self, t):
+# self._fill("print ")
+# do_comma = False
+# if t.dest:
+# self._write(">>")
+# self._dispatch(t.dest)
+# do_comma = True
+# for e in t.values:
+# if do_comma:self._write(", ")
+# else:do_comma=True
+# self._dispatch(e)
+# if not t.nl:
+# self._write(",")
+#
+# def _Global(self, t):
+# self._fill("global")
+# for i, n in enumerate(t.names):
+# if i != 0:
+# self._write(",")
+# self._write(" " + n)
+#
+# def _Yield(self, t):
+# self._fill("yield")
+# if t.value:
+# self._write(" (")
+# self._dispatch(t.value)
+# self._write(")")
+#
+# def _Raise(self, t):
+# self._fill('raise ')
+# if t.type:
+# self._dispatch(t.type)
+# if t.inst:
+# self._write(", ")
+# self._dispatch(t.inst)
+# if t.tback:
+# self._write(", ")
+# self._dispatch(t.tback)
+#
+#
+# def _TryFinally(self, t):
+# self._fill("try")
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# self._fill("finally")
+# self._enter()
+# self._dispatch(t.finalbody)
+# self._leave()
+#
+# def _excepthandler(self, t):
+# self._fill("except ")
+# if t.type:
+# self._dispatch(t.type)
+# if t.name:
+# self._write(", ")
+# self._dispatch(t.name)
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# def _ClassDef(self, t):
+# self._write("\n")
+# self._fill("class "+t.name)
+# if t.bases:
+# self._write("(")
+# for a in t.bases:
+# self._dispatch(a)
+# self._write(", ")
+# self._write(")")
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# def _FunctionDef(self, t):
+# self._write("\n")
+# for deco in t.decorators:
+# self._fill("@")
+# self._dispatch(deco)
+# self._fill("def "+t.name + "(")
+# self._dispatch(t.args)
+# self._write(")")
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# def _For(self, t):
+# self._fill("for ")
+# self._dispatch(t.target)
+# self._write(" in ")
+# self._dispatch(t.iter)
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+# if t.orelse:
+# self._fill("else")
+# self._enter()
+# self._dispatch(t.orelse)
+# self._leave
+#
+# def _While(self, t):
+# self._fill("while ")
+# self._dispatch(t.test)
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+# if t.orelse:
+# self._fill("else")
+# self._enter()
+# self._dispatch(t.orelse)
+# self._leave
+#
+# # expr
+# def _Str(self, tree):
+# self._write(repr(tree.s))
+##
+# def _Repr(self, t):
+# self._write("`")
+# self._dispatch(t.value)
+# self._write("`")
+#
+# def _Num(self, t):
+# self._write(repr(t.n))
+#
+# def _ListComp(self, t):
+# self._write("[")
+# self._dispatch(t.elt)
+# for gen in t.generators:
+# self._dispatch(gen)
+# self._write("]")
+#
+# def _GeneratorExp(self, t):
+# self._write("(")
+# self._dispatch(t.elt)
+# for gen in t.generators:
+# self._dispatch(gen)
+# self._write(")")
+#
+# def _comprehension(self, t):
+# self._write(" for ")
+# self._dispatch(t.target)
+# self._write(" in ")
+# self._dispatch(t.iter)
+# for if_clause in t.ifs:
+# self._write(" if ")
+# self._dispatch(if_clause)
+#
+# def _IfExp(self, t):
+# self._dispatch(t.body)
+# self._write(" if ")
+# self._dispatch(t.test)
+# if t.orelse:
+# self._write(" else ")
+# self._dispatch(t.orelse)
+#
+# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
+# def _UnaryOp(self, t):
+# self._write(self.unop[t.op.__class__.__name__])
+# self._write("(")
+# self._dispatch(t.operand)
+# self._write(")")
+#
+# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
+# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
+# "FloorDiv":"//", "Pow": "**"}
+# def _BinOp(self, t):
+# self._write("(")
+# self._dispatch(t.left)
+# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
+# self._dispatch(t.right)
+# self._write(")")
+#
+# boolops = {_ast.And: 'and', _ast.Or: 'or'}
+# def _BoolOp(self, t):
+# self._write("(")
+# self._dispatch(t.values[0])
+# for v in t.values[1:]:
+# self._write(" %s " % self.boolops[t.op.__class__])
+# self._dispatch(v)
+# self._write(")")
+#
+# def _Attribute(self,t):
+# self._dispatch(t.value)
+# self._write(".")
+# self._write(t.attr)
+#
+## def _Call(self, t):
+## self._dispatch(t.func)
+## self._write("(")
+## comma = False
+## for e in t.args:
+## if comma: self._write(", ")
+## else: comma = True
+## self._dispatch(e)
+## for e in t.keywords:
+## if comma: self._write(", ")
+## else: comma = True
+## self._dispatch(e)
+## if t.starargs:
+## if comma: self._write(", ")
+## else: comma = True
+## self._write("*")
+## self._dispatch(t.starargs)
+## if t.kwargs:
+## if comma: self._write(", ")
+## else: comma = True
+## self._write("**")
+## self._dispatch(t.kwargs)
+## self._write(")")
+#
+# # slice
+# def _Index(self, t):
+# self._dispatch(t.value)
+#
+# def _ExtSlice(self, t):
+# for i, d in enumerate(t.dims):
+# if i != 0:
+# self._write(': ')
+# self._dispatch(d)
+#
+# # others
+# def _arguments(self, t):
+# first = True
+# nonDef = len(t.args)-len(t.defaults)
+# for a in t.args[0:nonDef]:
+# if first:first = False
+# else: self._write(", ")
+# self._dispatch(a)
+# for a,d in zip(t.args[nonDef:], t.defaults):
+# if first:first = False
+# else: self._write(", ")
+# self._dispatch(a),
+# self._write("=")
+# self._dispatch(d)
+# if t.vararg:
+# if first:first = False
+# else: self._write(", ")
+# self._write("*"+t.vararg)
+# if t.kwarg:
+# if first:first = False
+# else: self._write(", ")
+# self._write("**"+t.kwarg)
+#
+## def _keyword(self, t):
+## self._write(t.arg)
+## self._write("=")
+## self._dispatch(t.value)
+#
+# def _Lambda(self, t):
+# self._write("lambda ")
+# self._dispatch(t.args)
+# self._write(": ")
+# self._dispatch(t.body)
+
+
+
diff --git a/doc/sphinxext/numpy_ext/docscrape.py b/doc/sphinxext/numpydoc/docscrape.py
similarity index 73%
rename from doc/sphinxext/numpy_ext/docscrape.py
rename to doc/sphinxext/numpydoc/docscrape.py
index e9670c05f5..2b1719db5c 100644
--- a/doc/sphinxext/numpy_ext/docscrape.py
+++ b/doc/sphinxext/numpydoc/docscrape.py
@@ -1,13 +1,15 @@
"""Extract reference documentation from the NumPy source tree.
"""
+from __future__ import division, absolute_import, print_function
import inspect
import textwrap
import re
import pydoc
-from StringIO import StringIO
from warnings import warn
+import collections
+import sys
class Reader(object):
@@ -22,10 +24,10 @@ def __init__(self, data):
String with lines separated by '\n'.
"""
- if isinstance(data, list):
+ if isinstance(data,list):
self._str = data
else:
- self._str = data.split('\n') # store string as list of lines
+ self._str = data.split('\n') # store string as list of lines
self.reset()
@@ -33,7 +35,7 @@ def __getitem__(self, n):
return self._str[n]
def reset(self):
- self._l = 0 # current line nr
+ self._l = 0 # current line nr
def read(self):
if not self.eof():
@@ -60,12 +62,11 @@ def read_to_condition(self, condition_func):
return self[start:self._l]
self._l += 1
if self.eof():
- return self[start:self._l + 1]
+ return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
-
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
@@ -75,7 +76,7 @@ def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
- def peek(self, n=0):
+ def peek(self,n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
@@ -111,11 +112,11 @@ def __init__(self, docstring, config={}):
self._parse()
- def __getitem__(self, key):
+ def __getitem__(self,key):
return self._parsed_data[key]
- def __setitem__(self, key, val):
- if not self._parsed_data.has_key(key):
+ def __setitem__(self,key,val):
+ if key not in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
@@ -131,27 +132,25 @@ def _is_at_section(self):
if l1.startswith('.. index::'):
return True
- l2 = self._doc.peek(1).strip() # ---------- or ==========
- return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1))
+ l2 = self._doc.peek(1).strip() # ---------- or ==========
+ return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
- def _strip(self, doc):
+ def _strip(self,doc):
i = 0
j = 0
- for i, line in enumerate(doc):
- if line.strip():
- break
+ for i,line in enumerate(doc):
+ if line.strip(): break
- for j, line in enumerate(doc[::-1]):
- if line.strip():
- break
+ for j,line in enumerate(doc[::-1]):
+ if line.strip(): break
- return doc[i:len(doc) - j]
+ return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
- if not self._doc.peek(-1).strip(): # previous line was empty
+ if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
@@ -163,14 +162,14 @@ def _read_sections(self):
data = self._read_to_next_section()
name = data[0].strip()
- if name.startswith('..'): # index section
+ if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
- def _parse_param_list(self, content):
+ def _parse_param_list(self,content):
r = Reader(content)
params = []
while not r.eof():
@@ -183,13 +182,13 @@ def _parse_param_list(self, content):
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
- params.append((arg_name, arg_type, desc))
+ params.append((arg_name,arg_type,desc))
return params
+
_name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|"
r" (?P[a-zA-Z0-9_.-]+))\s*", re.X)
-
def _parse_see_also(self, content):
"""
func_name : Descriptive text
@@ -222,8 +221,7 @@ def push_item(name, rest):
rest = []
for line in content:
- if not line.strip():
- continue
+ if not line.strip(): continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
@@ -237,7 +235,8 @@ def push_item(name, rest):
current_func = None
if ',' in line:
for func in line.split(','):
- push_item(func, [])
+ if func.strip():
+ push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
@@ -269,13 +268,17 @@ def _parse_summary(self):
if self._is_at_section():
return
- summary = self._doc.read_to_next_empty_line()
- summary_str = " ".join([s.strip() for s in summary]).strip()
- if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
- self['Signature'] = summary_str
- if not self._is_at_section():
- self['Summary'] = self._doc.read_to_next_empty_line()
- else:
+ # If several signatures present, take the last one
+ while True:
+ summary = self._doc.read_to_next_empty_line()
+ summary_str = " ".join([s.strip() for s in summary]).strip()
+ if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+ self['Signature'] = summary_str
+ if not self._is_at_section():
+ continue
+ break
+
+ if summary is not None:
self['Summary'] = summary
if not self._is_at_section():
@@ -285,12 +288,11 @@ def _parse(self):
self._doc.reset()
self._parse_summary()
- for (section, content) in self._read_sections():
+ for (section,content) in self._read_sections():
if not section.startswith('..'):
- section = ' '.join([s.capitalize()
- for s in section.split(' ')])
- if section in ('Parameters', 'Attributes', 'Methods',
- 'Returns', 'Raises', 'Warns'):
+ section = ' '.join([s.capitalize() for s in section.split(' ')])
+ if section in ('Parameters', 'Returns', 'Raises', 'Warns',
+ 'Other Parameters', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
@@ -302,17 +304,17 @@ def _parse(self):
# string conversion routines
def _str_header(self, name, symbol='-'):
- return [name, len(name) * symbol]
+ return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
- out += [' ' * indent + line]
+ out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
- return [self['Signature'].replace('*', '\*')] + ['']
+ return [self['Signature'].replace('*','\*')] + ['']
else:
return ['']
@@ -332,8 +334,11 @@ def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
- for param, param_type, desc in self[name]:
- out += ['%s : %s' % (param, param_type)]
+ for param,param_type,desc in self[name]:
+ if param_type:
+ out += ['%s : %s' % (param, param_type)]
+ else:
+ out += [param]
out += self._str_indent(desc)
out += ['']
return out
@@ -347,8 +352,7 @@ def _str_section(self, name):
return out
def _str_see_also(self, func_role):
- if not self['See Also']:
- return []
+ if not self['See Also']: return []
out = []
out += self._str_header("See Also")
last_had_desc = True
@@ -375,8 +379,8 @@ def _str_see_also(self, func_role):
def _str_index(self):
idx = self['index']
out = []
- out += ['.. index:: %s' % idx.get('default', '')]
- for section, references in idx.iteritems():
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.items():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
@@ -387,11 +391,12 @@ def __str__(self, func_role=''):
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
- for param_list in ('Parameters', 'Returns', 'Raises'):
+ for param_list in ('Parameters', 'Returns', 'Other Parameters',
+ 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
- for s in ('Notes', 'References', 'Examples'):
+ for s in ('Notes','References','Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
@@ -399,27 +404,25 @@ def __str__(self, func_role=''):
return '\n'.join(out)
-def indent(str, indent=4):
- indent_str = ' ' * indent
+def indent(str,indent=4):
+ indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
-
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
-
def header(text, style='-'):
- return text + '\n' + style * len(text) + '\n'
+ return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
- self._role = role # e.g. "func" or "meth"
+ self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
@@ -431,11 +434,14 @@ def __init__(self, func, role='func', doc=None, config={}):
func, func_name = self.get_func()
try:
# try to read signature
- argspec = inspect.getargspec(func)
+ if sys.version_info[0] >= 3:
+ argspec = inspect.getfullargspec(func)
+ else:
+ argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
- argspec = argspec.replace('*', '\*')
+ argspec = argspec.replace('*','\*')
signature = '%s%s' % (func_name, argspec)
- except TypeError, e:
+ except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
@@ -457,9 +463,9 @@ def __str__(self):
'meth': 'method'}
if self._role:
- if not roles.has_key(self._role):
- print "Warning: invalid role %s" % self._role
- out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
+ if self._role not in roles:
+ print("Warning: invalid role %s" % self._role)
+ out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
@@ -467,8 +473,11 @@ def __str__(self):
class ClassDoc(NumpyDocString):
+
+ extra_public_methods = ['__call__']
+
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
- config=None):
+ config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
@@ -484,24 +493,39 @@ def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
NumpyDocString.__init__(self, doc)
- if config is not None and config.get('show_class_members', True):
- if not self['Methods']:
- self['Methods'] = [(name, '', '')
- for name in sorted(self.methods)]
- if not self['Attributes']:
- self['Attributes'] = [(name, '', '')
- for name in sorted(self.properties)]
+ if config.get('show_class_members', True):
+ def splitlines_x(s):
+ if not s:
+ return []
+ else:
+ return s.splitlines()
+
+ for field, items in [('Methods', self.methods),
+ ('Attributes', self.properties)]:
+ if not self[field]:
+ doc_list = []
+ for name in sorted(items):
+ try:
+ doc_item = pydoc.getdoc(getattr(self._cls, name))
+ doc_list.append((name, '', splitlines_x(doc_item)))
+ except AttributeError:
+ pass # method doesn't exist
+ self[field] = doc_list
@property
def methods(self):
if self._cls is None:
return []
- return [name for name, func in inspect.getmembers(self._cls)
- if not name.startswith('_') and callable(func)]
+ return [name for name,func in inspect.getmembers(self._cls)
+ if ((not name.startswith('_')
+ or name in self.extra_public_methods)
+ and isinstance(func, collections.Callable))]
@property
def properties(self):
if self._cls is None:
return []
- return [name for name, func in inspect.getmembers(self._cls)
- if not name.startswith('_') and func is None]
+ return [name for name,func in inspect.getmembers(self._cls)
+ if not name.startswith('_') and
+ (func is None or isinstance(func, property) or
+ inspect.isgetsetdescriptor(func))]
diff --git a/doc/sphinxext/numpy_ext/docscrape_sphinx.py b/doc/sphinxext/numpydoc/docscrape_sphinx.py
similarity index 65%
rename from doc/sphinxext/numpy_ext/docscrape_sphinx.py
rename to doc/sphinxext/numpydoc/docscrape_sphinx.py
index bcf7e70731..cdc2a37d17 100644
--- a/doc/sphinxext/numpy_ext/docscrape_sphinx.py
+++ b/doc/sphinxext/numpydoc/docscrape_sphinx.py
@@ -1,18 +1,24 @@
-import re
-import inspect
-import textwrap
-import pydoc
+from __future__ import division, absolute_import, print_function
+
+import sys, re, inspect, textwrap, pydoc
import sphinx
-from docscrape import NumpyDocString
-from docscrape import FunctionDoc
-from docscrape import ClassDoc
+import collections
+from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+if sys.version_info[0] >= 3:
+ sixu = lambda s: s
+else:
+ sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
- def __init__(self, docstring, config=None):
- config = {} if config is None else config
- self.use_plots = config.get('use_plots', False)
+ def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
+ self.load_config(config)
+
+ def load_config(self, config):
+ self.use_plots = config.get('use_plots', False)
+ self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
@@ -24,7 +30,7 @@ def _str_field_list(self, name):
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
- out += [' ' * indent + line]
+ out += [' '*indent + line]
return out
def _str_signature(self):
@@ -40,16 +46,37 @@ def _str_summary(self):
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
+ def _str_returns(self):
+ out = []
+ if self['Returns']:
+ out += self._str_field_list('Returns')
+ out += ['']
+ for param, param_type, desc in self['Returns']:
+ if param_type:
+ out += self._str_indent(['**%s** : %s' % (param.strip(),
+ param_type)])
+ else:
+ out += self._str_indent([param.strip()])
+ if desc:
+ out += ['']
+ out += self._str_indent(desc, 8)
+ out += ['']
+ return out
+
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
- out += self._str_indent(['**%s** : %s' % (param.strip(),
- param_type)])
- out += ['']
- out += self._str_indent(desc, 8)
+ if param_type:
+ out += self._str_indent(['**%s** : %s' % (param.strip(),
+ param_type)])
+ else:
+ out += self._str_indent(['**%s**' % param.strip()])
+ if desc:
+ out += ['']
+ out += self._str_indent(desc, 8)
out += ['']
return out
@@ -79,28 +106,36 @@ def _str_member_list(self, name):
others = []
for param, param_type, desc in self[name]:
param = param.strip()
- if not self._obj or hasattr(self._obj, param):
+
+ # Check if the referenced member can have a docstring or not
+ param_obj = getattr(self._obj, param, None)
+ if not (callable(param_obj)
+ or isinstance(param_obj, property)
+ or inspect.isgetsetdescriptor(param_obj)):
+ param_obj = None
+
+ if param_obj and (pydoc.getdoc(param_obj) or not desc):
+ # Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
- # GAEL: Toctree commented out below because it creates
- # hundreds of sphinx warnings
- # out += ['.. autosummary::', ' :toctree:', '']
- out += ['.. autosummary::', '']
- out += autosum
+ out += ['.. autosummary::']
+ if self.class_members_toctree:
+ out += [' :toctree:']
+ out += [''] + autosum
if others:
- maxlen_0 = max([len(x[0]) for x in others])
- maxlen_1 = max([len(x[1]) for x in others])
- hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
- fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
- n_indent = maxlen_0 + maxlen_1 + 4
- out += [hdr]
+ maxlen_0 = max(3, max([len(x[0]) for x in others]))
+ hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
+ fmt = sixu('%%%ds %%s ') % (maxlen_0,)
+ out += ['', hdr]
for param, param_type, desc in others:
- out += [fmt % (param.strip(), param_type)]
- out += self._str_indent(desc, n_indent)
+ desc = sixu(" ").join(x.strip() for x in desc).strip()
+ if param_type:
+ desc = "(%s) %s" % (param_type, desc)
+ out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
@@ -136,8 +171,8 @@ def _str_index(self):
if len(idx) == 0:
return out
- out += ['.. index:: %s' % idx.get('default', '')]
- for section, references in idx.iteritems():
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
@@ -157,9 +192,9 @@ def _str_references(self):
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
- out += ['.. only:: latex', '']
+ out += ['.. only:: latex','']
else:
- out += ['.. latexonly::', '']
+ out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
@@ -188,7 +223,9 @@ def __str__(self, indent=0, func_role="obj"):
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
- for param_list in ('Parameters', 'Returns', 'Raises'):
+ out += self._str_param_list('Parameters')
+ out += self._str_returns()
+ for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
@@ -197,35 +234,32 @@ def __str__(self, indent=0, func_role="obj"):
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
- out = self._str_indent(out, indent)
+ out = self._str_indent(out,indent)
return '\n'.join(out)
-
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
- self.use_plots = config.get('use_plots', False)
+ self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
-
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
- self.use_plots = config.get('use_plots', False)
+ self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
-
class SphinxObjDoc(SphinxDocString):
- def __init__(self, obj, doc=None, config=None):
+ def __init__(self, obj, doc=None, config={}):
self._f = obj
+ self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
-
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
- elif callable(obj):
+ elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
diff --git a/doc/sphinxext/numpydoc/linkcode.py b/doc/sphinxext/numpydoc/linkcode.py
new file mode 100644
index 0000000000..1ad3ab82cb
--- /dev/null
+++ b/doc/sphinxext/numpydoc/linkcode.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+"""
+ linkcode
+ ~~~~~~~~
+
+ Add external links to module code in Python object descriptions.
+
+ :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import warnings
+import collections
+
+warnings.warn("This extension has been accepted to Sphinx upstream. "
+ "Use the version from there (Sphinx >= 1.2) "
+ "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode",
+ FutureWarning, stacklevel=1)
+
+
+from docutils import nodes
+
+from sphinx import addnodes
+from sphinx.locale import _
+from sphinx.errors import SphinxError
+
+class LinkcodeError(SphinxError):
+ category = "linkcode error"
+
+def doctree_read(app, doctree):
+ env = app.builder.env
+
+ resolve_target = getattr(env.config, 'linkcode_resolve', None)
+ if not isinstance(env.config.linkcode_resolve, collections.Callable):
+ raise LinkcodeError(
+ "Function `linkcode_resolve` is not given in conf.py")
+
+ domain_keys = dict(
+ py=['module', 'fullname'],
+ c=['names'],
+ cpp=['names'],
+ js=['object', 'fullname'],
+ )
+
+ for objnode in doctree.traverse(addnodes.desc):
+ domain = objnode.get('domain')
+ uris = set()
+ for signode in objnode:
+ if not isinstance(signode, addnodes.desc_signature):
+ continue
+
+ # Convert signode to a specified format
+ info = {}
+ for key in domain_keys.get(domain, []):
+ value = signode.get(key)
+ if not value:
+ value = ''
+ info[key] = value
+ if not info:
+ continue
+
+ # Call user code to resolve the link
+ uri = resolve_target(domain, info)
+ if not uri:
+ # no source
+ continue
+
+ if uri in uris or not uri:
+ # only one link per name, please
+ continue
+ uris.add(uri)
+
+ onlynode = addnodes.only(expr='html')
+ onlynode += nodes.reference('', '', internal=False, refuri=uri)
+ onlynode[0] += nodes.inline('', _('[source]'),
+ classes=['viewcode-link'])
+ signode += onlynode
+
+def setup(app):
+ app.connect('doctree-read', doctree_read)
+ app.add_config_value('linkcode_resolve', None, '')
diff --git a/doc/sphinxext/numpy_ext/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py
similarity index 63%
rename from doc/sphinxext/numpy_ext/numpydoc.py
rename to doc/sphinxext/numpydoc/numpydoc.py
index 62adb56ae7..4861aa90ed 100644
--- a/doc/sphinxext/numpy_ext/numpydoc.py
+++ b/doc/sphinxext/numpydoc/numpydoc.py
@@ -10,52 +10,65 @@
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
-- Extract the signature from the docstring, if it can't be determined
- otherwise.
+- Extract the signature from the docstring, if it can't be determined otherwise.
-.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
+.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
"""
+from __future__ import division, absolute_import, print_function
-import os
-import re
-import pydoc
-from docscrape_sphinx import get_doc_object
-from docscrape_sphinx import SphinxDocString
-from sphinx.util.compat import Directive
+import os, sys, re, pydoc
+import sphinx
import inspect
+import collections
+
+if sphinx.__version__ < '1.0.1':
+ raise RuntimeError("Sphinx 1.0.1 or newer is required")
+
+from .docscrape_sphinx import get_doc_object, SphinxDocString
+
+if sys.version_info[0] >= 3:
+ sixu = lambda s: s
+else:
+ sixu = lambda s: unicode(s, 'unicode_escape')
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
cfg = dict(use_plots=app.config.numpydoc_use_plots,
- show_class_members=app.config.numpydoc_show_class_members)
+ show_class_members=app.config.numpydoc_show_class_members,
+ class_members_toctree=app.config.numpydoc_class_members_toctree,
+ )
if what == 'module':
# Strip top title
- title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
- re.I | re.S)
- lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
+ title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'),
+ re.I|re.S)
+ lines[:] = title_re.sub(sixu(''), sixu("\n").join(lines)).split(sixu("\n"))
else:
- doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
- lines[:] = unicode(doc).split(u"\n")
+ doc = get_doc_object(obj, what, sixu("\n").join(lines), config=cfg)
+ if sys.version_info[0] >= 3:
+ doc = str(doc)
+ else:
+ doc = unicode(doc)
+ lines[:] = doc.split(sixu("\n"))
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
if hasattr(obj, '__module__'):
- v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
+ v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
- lines += [u'', u'.. htmlonly::', '']
- lines += [u' %s' % x for x in
+ lines += [sixu(''), sixu('.. htmlonly::'), sixu('')]
+ lines += [sixu(' %s') % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# replace reference numbers so that there are no duplicates
references = []
for line in lines:
line = line.strip()
- m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
+ m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I)
if m:
references.append(m.group(1))
@@ -64,38 +77,36 @@ def mangle_docstrings(app, what, name, obj, options, lines,
if references:
for i, line in enumerate(lines):
for r in references:
- if re.match(ur'^\d+$', r):
- new_r = u"R%d" % (reference_offset[0] + int(r))
+ if re.match(sixu('^\\d+$'), r):
+ new_r = sixu("R%d") % (reference_offset[0] + int(r))
else:
- new_r = u"%s%d" % (r, reference_offset[0])
- lines[i] = lines[i].replace(u'[%s]_' % r,
- u'[%s]_' % new_r)
- lines[i] = lines[i].replace(u'.. [%s]' % r,
- u'.. [%s]' % new_r)
+ new_r = sixu("%s%d") % (r, reference_offset[0])
+ lines[i] = lines[i].replace(sixu('[%s]_') % r,
+ sixu('[%s]_') % new_r)
+ lines[i] = lines[i].replace(sixu('.. [%s]') % r,
+ sixu('.. [%s]') % new_r)
reference_offset[0] += len(references)
-
-def mangle_signature(app, what, name, obj,
- options, sig, retann):
+def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
- if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')):
- return
- if not hasattr(obj, '__doc__'):
- return
+ if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return
+ if not hasattr(obj, '__doc__'): return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
- sig = re.sub(u"^[^(]*", u"", doc['Signature'])
- return sig, u''
-
+ sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature'])
+ return sig, sixu('')
def setup(app, get_doc_object_=get_doc_object):
+ if not hasattr(app, 'add_config_value'):
+ return # probably called by nose, better bail out
+
global get_doc_object
get_doc_object = get_doc_object_
@@ -104,20 +115,20 @@ def setup(app, get_doc_object_=get_doc_object):
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
+ app.add_config_value('numpydoc_class_members_toctree', True, True)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
-#-----------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Docstring-mangling domains
-#-----------------------------------------------------------------------------
+#------------------------------------------------------------------------------
from docutils.statemachine import ViewList
from sphinx.domains.c import CDomain
from sphinx.domains.python import PythonDomain
-
class ManglingDomainBase(object):
directive_mangling_map = {}
@@ -126,11 +137,10 @@ def __init__(self, *a, **kw):
self.wrap_mangling_directives()
def wrap_mangling_directives(self):
- for name, objtype in self.directive_mangling_map.items():
+ for name, objtype in list(self.directive_mangling_map.items()):
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
-
class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
name = 'np'
directive_mangling_map = {
@@ -142,7 +152,7 @@ class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
'staticmethod': 'function',
'attribute': 'attribute',
}
-
+ indices = []
class NumpyCDomain(ManglingDomainBase, CDomain):
name = 'np-c'
@@ -154,7 +164,6 @@ class NumpyCDomain(ManglingDomainBase, CDomain):
'var': 'object',
}
-
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
diff --git a/doc/sphinxext/numpydoc/phantom_import.py b/doc/sphinxext/numpydoc/phantom_import.py
new file mode 100644
index 0000000000..9a60b4a35b
--- /dev/null
+++ b/doc/sphinxext/numpydoc/phantom_import.py
@@ -0,0 +1,167 @@
+"""
+==============
+phantom_import
+==============
+
+Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
+extensions to use docstrings loaded from an XML file.
+
+This extension loads an XML file in the Pydocweb format [1] and
+creates a dummy module that contains the specified docstrings. This
+can be used to get the current docstrings from a Pydocweb instance
+without needing to rebuild the documented module.
+
+.. [1] http://code.google.com/p/pydocweb
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import imp, sys, compiler, types, os, inspect, re
+
+def setup(app):
+ app.connect('builder-inited', initialize)
+ app.add_config_value('phantom_import_file', None, True)
+
+def initialize(app):
+ fn = app.config.phantom_import_file
+ if (fn and os.path.isfile(fn)):
+ print("[numpydoc] Phantom importing modules from", fn, "...")
+ import_phantom_module(fn)
+
+#------------------------------------------------------------------------------
+# Creating 'phantom' modules from an XML description
+#------------------------------------------------------------------------------
+def import_phantom_module(xml_file):
+ """
+ Insert a fake Python module to sys.modules, based on a XML file.
+
+ The XML file is expected to conform to Pydocweb DTD. The fake
+ module will contain dummy objects, which guarantee the following:
+
+ - Docstrings are correct.
+ - Class inheritance relationships are correct (if present in XML).
+ - Function argspec is *NOT* correct (even if present in XML).
+ Instead, the function signature is prepended to the function docstring.
+ - Class attributes are *NOT* correct; instead, they are dummy objects.
+
+ Parameters
+ ----------
+ xml_file : str
+ Name of an XML file to read
+
+ """
+ import lxml.etree as etree
+
+ object_cache = {}
+
+ tree = etree.parse(xml_file)
+ root = tree.getroot()
+
+ # Sort items so that
+ # - Base classes come before classes inherited from them
+ # - Modules come before their contents
+ all_nodes = dict([(n.attrib['id'], n) for n in root])
+
+ def _get_bases(node, recurse=False):
+ bases = [x.attrib['ref'] for x in node.findall('base')]
+ if recurse:
+ j = 0
+ while True:
+ try:
+ b = bases[j]
+ except IndexError: break
+ if b in all_nodes:
+ bases.extend(_get_bases(all_nodes[b]))
+ j += 1
+ return bases
+
+ type_index = ['module', 'class', 'callable', 'object']
+
+ def base_cmp(a, b):
+ x = cmp(type_index.index(a.tag), type_index.index(b.tag))
+ if x != 0: return x
+
+ if a.tag == 'class' and b.tag == 'class':
+ a_bases = _get_bases(a, recurse=True)
+ b_bases = _get_bases(b, recurse=True)
+ x = cmp(len(a_bases), len(b_bases))
+ if x != 0: return x
+ if a.attrib['id'] in b_bases: return -1
+ if b.attrib['id'] in a_bases: return 1
+
+ return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
+
+ nodes = root.getchildren()
+ nodes.sort(base_cmp)
+
+ # Create phantom items
+ for node in nodes:
+ name = node.attrib['id']
+ doc = (node.text or '').decode('string-escape') + "\n"
+ if doc == "\n": doc = ""
+
+ # create parent, if missing
+ parent = name
+ while True:
+ parent = '.'.join(parent.split('.')[:-1])
+ if not parent: break
+ if parent in object_cache: break
+ obj = imp.new_module(parent)
+ object_cache[parent] = obj
+ sys.modules[parent] = obj
+
+ # create object
+ if node.tag == 'module':
+ obj = imp.new_module(name)
+ obj.__doc__ = doc
+ sys.modules[name] = obj
+ elif node.tag == 'class':
+ bases = [object_cache[b] for b in _get_bases(node)
+ if b in object_cache]
+ bases.append(object)
+ init = lambda self: None
+ init.__doc__ = doc
+ obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
+ obj.__name__ = name.split('.')[-1]
+ elif node.tag == 'callable':
+ funcname = node.attrib['id'].split('.')[-1]
+ argspec = node.attrib.get('argspec')
+ if argspec:
+ argspec = re.sub('^[^(]*', '', argspec)
+ doc = "%s%s\n\n%s" % (funcname, argspec, doc)
+ obj = lambda: 0
+ obj.__argspec_is_invalid_ = True
+ if sys.version_info[0] >= 3:
+ obj.__name__ = funcname
+ else:
+ obj.func_name = funcname
+ obj.__name__ = name
+ obj.__doc__ = doc
+ if inspect.isclass(object_cache[parent]):
+ obj.__objclass__ = object_cache[parent]
+ else:
+ class Dummy(object): pass
+ obj = Dummy()
+ obj.__name__ = name
+ obj.__doc__ = doc
+ if inspect.isclass(object_cache[parent]):
+ obj.__get__ = lambda: None
+ object_cache[name] = obj
+
+ if parent:
+ if inspect.ismodule(object_cache[parent]):
+ obj.__module__ = parent
+ setattr(object_cache[parent], name.split('.')[-1], obj)
+
+ # Populate items
+ for node in root:
+ obj = object_cache.get(node.attrib['id'])
+ if obj is None: continue
+ for ref in node.findall('ref'):
+ if node.tag == 'class':
+ if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
+ setattr(obj, ref.attrib['name'],
+ object_cache.get(ref.attrib['ref']))
+ else:
+ setattr(obj, ref.attrib['name'],
+ object_cache.get(ref.attrib['ref']))
diff --git a/doc/sphinxext/numpydoc/plot_directive.py b/doc/sphinxext/numpydoc/plot_directive.py
new file mode 100644
index 0000000000..2014f85707
--- /dev/null
+++ b/doc/sphinxext/numpydoc/plot_directive.py
@@ -0,0 +1,642 @@
+"""
+A special directive for generating a matplotlib plot.
+
+.. warning::
+
+ This is a hacked version of plot_directive.py from Matplotlib.
+ It's very much subject to change!
+
+
+Usage
+-----
+
+Can be used like this::
+
+ .. plot:: examples/example.py
+
+ .. plot::
+
+ import matplotlib.pyplot as plt
+ plt.plot([1,2,3], [4,5,6])
+
+ .. plot::
+
+ A plotting example:
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot([1,2,3], [4,5,6])
+
+The content is interpreted as doctest formatted if it has a line starting
+with ``>>>``.
+
+The ``plot`` directive supports the options
+
+ format : {'python', 'doctest'}
+ Specify the format of the input
+
+ include-source : bool
+ Whether to display the source code. Default can be changed in conf.py
+
+and the ``image`` directive options ``alt``, ``height``, ``width``,
+``scale``, ``align``, ``class``.
+
+Configuration options
+---------------------
+
+The plot directive has the following configuration options:
+
+ plot_include_source
+ Default value for the include-source option
+
+ plot_pre_code
+ Code that should be executed before each plot.
+
+ plot_basedir
+ Base directory, to which plot:: file names are relative to.
+ (If None or empty, file names are relative to the directoly where
+ the file containing the directive is.)
+
+ plot_formats
+ File formats to generate. List of tuples or strings::
+
+ [(suffix, dpi), suffix, ...]
+
+ that determine the file format and the DPI. For entries whose
+ DPI was omitted, sensible defaults are chosen.
+
+ plot_html_show_formats
+ Whether to show links to the files in HTML.
+
+TODO
+----
+
+* Refactor Latex output; now it's plain images, but it would be nice
+ to make them appear side-by-side, or in floats.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
+import sphinx
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from io import StringIO
+
+import warnings
+warnings.warn("A plot_directive module is also available under "
+ "matplotlib.sphinxext; expect this numpydoc.plot_directive "
+ "module to be deprecated after relevant features have been "
+ "integrated there.",
+ FutureWarning, stacklevel=2)
+
+
+#------------------------------------------------------------------------------
+# Registration hook
+#------------------------------------------------------------------------------
+
+def setup(app):
+ setup.app = app
+ setup.config = app.config
+ setup.confdir = app.confdir
+
+ app.add_config_value('plot_pre_code', '', True)
+ app.add_config_value('plot_include_source', False, True)
+ app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
+ app.add_config_value('plot_basedir', None, True)
+ app.add_config_value('plot_html_show_formats', True, True)
+
+ app.add_directive('plot', plot_directive, True, (0, 1, False),
+ **plot_directive_options)
+
+#------------------------------------------------------------------------------
+# plot:: directive
+#------------------------------------------------------------------------------
+from docutils.parsers.rst import directives
+from docutils import nodes
+
+def plot_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return run(arguments, content, options, state_machine, state, lineno)
+plot_directive.__doc__ = __doc__
+
+def _option_boolean(arg):
+ if not arg or not arg.strip():
+ # no argument given, assume used as a flag
+ return True
+ elif arg.strip().lower() in ('no', '0', 'false'):
+ return False
+ elif arg.strip().lower() in ('yes', '1', 'true'):
+ return True
+ else:
+ raise ValueError('"%s" unknown boolean' % arg)
+
+def _option_format(arg):
+ return directives.choice(arg, ('python', 'lisp'))
+
+def _option_align(arg):
+ return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
+ "right"))
+
+plot_directive_options = {'alt': directives.unchanged,
+ 'height': directives.length_or_unitless,
+ 'width': directives.length_or_percentage_or_unitless,
+ 'scale': directives.nonnegative_int,
+ 'align': _option_align,
+ 'class': directives.class_option,
+ 'include-source': _option_boolean,
+ 'format': _option_format,
+ }
+
+#------------------------------------------------------------------------------
+# Generating output
+#------------------------------------------------------------------------------
+
+from docutils import nodes, utils
+
+try:
+ # Sphinx depends on either Jinja or Jinja2
+ import jinja2
+ def format_template(template, **kw):
+ return jinja2.Template(template).render(**kw)
+except ImportError:
+ import jinja
+ def format_template(template, **kw):
+ return jinja.from_string(template, **kw)
+
+TEMPLATE = """
+{{ source_code }}
+
+{{ only_html }}
+
+ {% if source_link or (html_show_formats and not multi_image) %}
+ (
+ {%- if source_link -%}
+ `Source code <{{ source_link }}>`__
+ {%- endif -%}
+ {%- if html_show_formats and not multi_image -%}
+ {%- for img in images -%}
+ {%- for fmt in img.formats -%}
+ {%- if source_link or not loop.first -%}, {% endif -%}
+ `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+ {%- endfor -%}
+ {%- endfor -%}
+ {%- endif -%}
+ )
+ {% endif %}
+
+ {% for img in images %}
+ .. figure:: {{ build_dir }}/{{ img.basename }}.png
+ {%- for option in options %}
+ {{ option }}
+ {% endfor %}
+
+ {% if html_show_formats and multi_image -%}
+ (
+ {%- for fmt in img.formats -%}
+ {%- if not loop.first -%}, {% endif -%}
+ `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+ {%- endfor -%}
+ )
+ {%- endif -%}
+ {% endfor %}
+
+{{ only_latex }}
+
+ {% for img in images %}
+ .. image:: {{ build_dir }}/{{ img.basename }}.pdf
+ {% endfor %}
+
+"""
+
+class ImageFile(object):
+ def __init__(self, basename, dirname):
+ self.basename = basename
+ self.dirname = dirname
+ self.formats = []
+
+ def filename(self, format):
+ return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
+
+ def filenames(self):
+ return [self.filename(fmt) for fmt in self.formats]
+
+def run(arguments, content, options, state_machine, state, lineno):
+ if arguments and content:
+ raise RuntimeError("plot:: directive can't have both args and content")
+
+ document = state_machine.document
+ config = document.settings.env.config
+
+ options.setdefault('include-source', config.plot_include_source)
+
+ # determine input
+ rst_file = document.attributes['source']
+ rst_dir = os.path.dirname(rst_file)
+
+ if arguments:
+ if not config.plot_basedir:
+ source_file_name = os.path.join(rst_dir,
+ directives.uri(arguments[0]))
+ else:
+ source_file_name = os.path.join(setup.confdir, config.plot_basedir,
+ directives.uri(arguments[0]))
+ code = open(source_file_name, 'r').read()
+ output_base = os.path.basename(source_file_name)
+ else:
+ source_file_name = rst_file
+ code = textwrap.dedent("\n".join(map(str, content)))
+ counter = document.attributes.get('_plot_counter', 0) + 1
+ document.attributes['_plot_counter'] = counter
+ base, ext = os.path.splitext(os.path.basename(source_file_name))
+ output_base = '%s-%d.py' % (base, counter)
+
+ base, source_ext = os.path.splitext(output_base)
+ if source_ext in ('.py', '.rst', '.txt'):
+ output_base = base
+ else:
+ source_ext = ''
+
+ # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
+ output_base = output_base.replace('.', '-')
+
+ # is it in doctest format?
+ is_doctest = contains_doctest(code)
+ if 'format' in options:
+ if options['format'] == 'python':
+ is_doctest = False
+ else:
+ is_doctest = True
+
+ # determine output directory name fragment
+ source_rel_name = relpath(source_file_name, setup.confdir)
+ source_rel_dir = os.path.dirname(source_rel_name)
+ while source_rel_dir.startswith(os.path.sep):
+ source_rel_dir = source_rel_dir[1:]
+
+ # build_dir: where to place output files (temporarily)
+ build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
+ 'plot_directive',
+ source_rel_dir)
+ if not os.path.exists(build_dir):
+ os.makedirs(build_dir)
+
+ # output_dir: final location in the builder's directory
+ dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
+ source_rel_dir))
+
+ # how to link to files from the RST file
+ dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
+ source_rel_dir).replace(os.path.sep, '/')
+ build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
+ source_link = dest_dir_link + '/' + output_base + source_ext
+
+ # make figures
+ try:
+ results = makefig(code, source_file_name, build_dir, output_base,
+ config)
+ errors = []
+ except PlotError as err:
+ reporter = state.memo.reporter
+ sm = reporter.system_message(
+ 2, "Exception occurred in plotting %s: %s" % (output_base, err),
+ line=lineno)
+ results = [(code, [])]
+ errors = [sm]
+
+ # generate output restructuredtext
+ total_lines = []
+ for j, (code_piece, images) in enumerate(results):
+ if options['include-source']:
+ if is_doctest:
+ lines = ['']
+ lines += [row.rstrip() for row in code_piece.split('\n')]
+ else:
+ lines = ['.. code-block:: python', '']
+ lines += [' %s' % row.rstrip()
+ for row in code_piece.split('\n')]
+ source_code = "\n".join(lines)
+ else:
+ source_code = ""
+
+ opts = [':%s: %s' % (key, val) for key, val in list(options.items())
+ if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
+
+ only_html = ".. only:: html"
+ only_latex = ".. only:: latex"
+
+ if j == 0:
+ src_link = source_link
+ else:
+ src_link = None
+
+ result = format_template(
+ TEMPLATE,
+ dest_dir=dest_dir_link,
+ build_dir=build_dir_link,
+ source_link=src_link,
+ multi_image=len(images) > 1,
+ only_html=only_html,
+ only_latex=only_latex,
+ options=opts,
+ images=images,
+ source_code=source_code,
+ html_show_formats=config.plot_html_show_formats)
+
+ total_lines.extend(result.split("\n"))
+ total_lines.extend("\n")
+
+ if total_lines:
+ state_machine.insert_input(total_lines, source=source_file_name)
+
+ # copy image files to builder's output directory
+ if not os.path.exists(dest_dir):
+ os.makedirs(dest_dir)
+
+ for code_piece, images in results:
+ for img in images:
+ for fn in img.filenames():
+ shutil.copyfile(fn, os.path.join(dest_dir,
+ os.path.basename(fn)))
+
+ # copy script (if necessary)
+ if source_file_name == rst_file:
+ target_name = os.path.join(dest_dir, output_base + source_ext)
+ f = open(target_name, 'w')
+ f.write(unescape_doctest(code))
+ f.close()
+
+ return errors
+
+
+#------------------------------------------------------------------------------
+# Run code and capture figures
+#------------------------------------------------------------------------------
+
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+import matplotlib.image as image
+from matplotlib import _pylab_helpers
+
+import exceptions
+
+def contains_doctest(text):
+ try:
+ # check if it's valid Python as-is
+ compile(text, '', 'exec')
+ return False
+ except SyntaxError:
+ pass
+ r = re.compile(r'^\s*>>>', re.M)
+ m = r.search(text)
+ return bool(m)
+
+def unescape_doctest(text):
+ """
+ Extract code from a piece of text, which contains either Python code
+ or doctests.
+
+ """
+ if not contains_doctest(text):
+ return text
+
+ code = ""
+ for line in text.split("\n"):
+ m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
+ if m:
+ code += m.group(2) + "\n"
+ elif line.strip():
+ code += "# " + line.strip() + "\n"
+ else:
+ code += "\n"
+ return code
+
+def split_code_at_show(text):
+ """
+ Split code at plt.show()
+
+ """
+
+ parts = []
+ is_doctest = contains_doctest(text)
+
+ part = []
+ for line in text.split("\n"):
+ if (not is_doctest and line.strip() == 'plt.show()') or \
+ (is_doctest and line.strip() == '>>> plt.show()'):
+ part.append(line)
+ parts.append("\n".join(part))
+ part = []
+ else:
+ part.append(line)
+ if "\n".join(part).strip():
+ parts.append("\n".join(part))
+ return parts
+
+class PlotError(RuntimeError):
+ pass
+
+def run_code(code, code_path, ns=None):
+ # Change the working directory to the directory of the example, so
+ # it can get at its data files, if any.
+ pwd = os.getcwd()
+ old_sys_path = list(sys.path)
+ if code_path is not None:
+ dirname = os.path.abspath(os.path.dirname(code_path))
+ os.chdir(dirname)
+ sys.path.insert(0, dirname)
+
+ # Redirect stdout
+ stdout = sys.stdout
+ sys.stdout = StringIO()
+
+ # Reset sys.argv
+ old_sys_argv = sys.argv
+ sys.argv = [code_path]
+
+ try:
+ try:
+ code = unescape_doctest(code)
+ if ns is None:
+ ns = {}
+ if not ns:
+ exec(setup.config.plot_pre_code, ns)
+ exec(code, ns)
+ except (Exception, SystemExit) as err:
+ raise PlotError(traceback.format_exc())
+ finally:
+ os.chdir(pwd)
+ sys.argv = old_sys_argv
+ sys.path[:] = old_sys_path
+ sys.stdout = stdout
+ return ns
+
+
+#------------------------------------------------------------------------------
+# Generating figures
+#------------------------------------------------------------------------------
+
+def out_of_date(original, derived):
+ """
+ Returns True if derivative is out-of-date wrt original,
+ both of which are full file paths.
+ """
+ return (not os.path.exists(derived)
+ or os.stat(derived).st_mtime < os.stat(original).st_mtime)
+
+
+def makefig(code, code_path, output_dir, output_base, config):
+ """
+ Run a pyplot script *code* and save the images under *output_dir*
+ with file names derived from *output_base*
+
+ """
+
+ # -- Parse format list
+ default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
+ formats = []
+ for fmt in config.plot_formats:
+ if isinstance(fmt, str):
+ formats.append((fmt, default_dpi.get(fmt, 80)))
+ elif type(fmt) in (tuple, list) and len(fmt)==2:
+ formats.append((str(fmt[0]), int(fmt[1])))
+ else:
+ raise PlotError('invalid image format "%r" in plot_formats' % fmt)
+
+ # -- Try to determine if all images already exist
+
+ code_pieces = split_code_at_show(code)
+
+ # Look for single-figure output files first
+ all_exists = True
+ img = ImageFile(output_base, output_dir)
+ for format, dpi in formats:
+ if out_of_date(code_path, img.filename(format)):
+ all_exists = False
+ break
+ img.formats.append(format)
+
+ if all_exists:
+ return [(code, [img])]
+
+ # Then look for multi-figure output files
+ results = []
+ all_exists = True
+ for i, code_piece in enumerate(code_pieces):
+ images = []
+ for j in range(1000):
+ img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
+ for format, dpi in formats:
+ if out_of_date(code_path, img.filename(format)):
+ all_exists = False
+ break
+ img.formats.append(format)
+
+ # assume that if we have one, we have them all
+ if not all_exists:
+ all_exists = (j > 0)
+ break
+ images.append(img)
+ if not all_exists:
+ break
+ results.append((code_piece, images))
+
+ if all_exists:
+ return results
+
+ # -- We didn't find the files, so build them
+
+ results = []
+ ns = {}
+
+ for i, code_piece in enumerate(code_pieces):
+ # Clear between runs
+ plt.close('all')
+
+ # Run code
+ run_code(code_piece, code_path, ns)
+
+ # Collect images
+ images = []
+ fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
+ for j, figman in enumerate(fig_managers):
+ if len(fig_managers) == 1 and len(code_pieces) == 1:
+ img = ImageFile(output_base, output_dir)
+ else:
+ img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
+ output_dir)
+ images.append(img)
+ for format, dpi in formats:
+ try:
+ figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
+ except exceptions.BaseException as err:
+ raise PlotError(traceback.format_exc())
+ img.formats.append(format)
+
+ # Results
+ results.append((code_piece, images))
+
+ return results
+
+
+#------------------------------------------------------------------------------
+# Relative pathnames
+#------------------------------------------------------------------------------
+
+try:
+ from os.path import relpath
+except ImportError:
+ # Copied from Python 2.7
+ if 'posix' in sys.builtin_module_names:
+ def relpath(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+ from os.path import sep, curdir, join, abspath, commonprefix, \
+ pardir
+
+ if not path:
+ raise ValueError("no path specified")
+
+ start_list = abspath(start).split(sep)
+ path_list = abspath(path).split(sep)
+
+ # Work out how much of the filepath is shared by start and path.
+ i = len(commonprefix([start_list, path_list]))
+
+ rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return curdir
+ return join(*rel_list)
+ elif 'nt' in sys.builtin_module_names:
+ def relpath(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+ from os.path import sep, curdir, join, abspath, commonprefix, \
+ pardir, splitunc
+
+ if not path:
+ raise ValueError("no path specified")
+ start_list = abspath(start).split(sep)
+ path_list = abspath(path).split(sep)
+ if start_list[0].lower() != path_list[0].lower():
+ unc_path, rest = splitunc(path)
+ unc_start, rest = splitunc(start)
+ if bool(unc_path) ^ bool(unc_start):
+ raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
+ % (path, start))
+ else:
+ raise ValueError("path is on drive %s, start on drive %s"
+ % (path_list[0], start_list[0]))
+ # Work out how much of the filepath is shared by start and path.
+ for i in range(min(len(start_list), len(path_list))):
+ if start_list[i].lower() != path_list[i].lower():
+ break
+ else:
+ i += 1
+
+ rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return curdir
+ return join(*rel_list)
+ else:
+ raise RuntimeError("Unsupported platform (no relpath available!)")
diff --git a/doc/sphinxext/numpydoc/traitsdoc.py b/doc/sphinxext/numpydoc/traitsdoc.py
new file mode 100644
index 0000000000..596c54eb38
--- /dev/null
+++ b/doc/sphinxext/numpydoc/traitsdoc.py
@@ -0,0 +1,142 @@
+"""
+=========
+traitsdoc
+=========
+
+Sphinx extension that handles docstrings in the Numpy standard format, [1]
+and support Traits [2].
+
+This extension can be used as a replacement for ``numpydoc`` when support
+for Traits is required.
+
+.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
+.. [2] http://code.enthought.com/projects/traits/
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import inspect
+import os
+import pydoc
+import collections
+
+from . import docscrape
+from . import docscrape_sphinx
+from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString
+
+from . import numpydoc
+
+from . import comment_eater
+
+class SphinxTraitsDoc(SphinxClassDoc):
+ def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
+ if not inspect.isclass(cls):
+ raise ValueError("Initialise using a class. Got %r" % cls)
+ self._cls = cls
+
+ if modulename and not modulename.endswith('.'):
+ modulename += '.'
+ self._mod = modulename
+ self._name = cls.__name__
+ self._func_doc = func_doc
+
+ docstring = pydoc.getdoc(cls)
+ docstring = docstring.split('\n')
+
+ # De-indent paragraph
+ try:
+ indent = min(len(s) - len(s.lstrip()) for s in docstring
+ if s.strip())
+ except ValueError:
+ indent = 0
+
+ for n,line in enumerate(docstring):
+ docstring[n] = docstring[n][indent:]
+
+ self._doc = docscrape.Reader(docstring)
+ self._parsed_data = {
+ 'Signature': '',
+ 'Summary': '',
+ 'Description': [],
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Returns': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'Other Parameters': [],
+ 'Traits': [],
+ 'Methods': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'References': '',
+ 'Example': '',
+ 'Examples': '',
+ 'index': {}
+ }
+
+ self._parse()
+
+ def _str_summary(self):
+ return self['Summary'] + ['']
+
+ def _str_extended_summary(self):
+ return self['Description'] + self['Extended Summary'] + ['']
+
+ def __str__(self, indent=0, func_role="func"):
+ out = []
+ out += self._str_signature()
+ out += self._str_index() + ['']
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters', 'Traits', 'Methods',
+ 'Returns','Raises'):
+ out += self._str_param_list(param_list)
+ out += self._str_see_also("obj")
+ out += self._str_section('Notes')
+ out += self._str_references()
+ out += self._str_section('Example')
+ out += self._str_section('Examples')
+ out = self._str_indent(out,indent)
+ return '\n'.join(out)
+
+def looks_like_issubclass(obj, classname):
+ """ Return True if the object has a class or superclass with the given class
+ name.
+
+ Ignores old-style classes.
+ """
+ t = obj
+ if t.__name__ == classname:
+ return True
+ for klass in t.__mro__:
+ if klass.__name__ == classname:
+ return True
+ return False
+
+def get_doc_object(obj, what=None, config=None):
+ if what is None:
+ if inspect.isclass(obj):
+ what = 'class'
+ elif inspect.ismodule(obj):
+ what = 'module'
+ elif isinstance(obj, collections.Callable):
+ what = 'function'
+ else:
+ what = 'object'
+ if what == 'class':
+ doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config)
+ if looks_like_issubclass(obj, 'HasTraits'):
+ for name, trait, comment in comment_eater.get_class_traits(obj):
+ # Exclude private traits.
+ if not name.startswith('_'):
+ doc['Traits'].append((name, trait, comment.splitlines()))
+ return doc
+ elif what in ('function', 'method'):
+ return SphinxFunctionDoc(obj, '', config=config)
+ else:
+ return SphinxDocString(pydoc.getdoc(obj), config=config)
+
+def setup(app):
+ # init numpydoc
+ numpydoc.setup(app, get_doc_object)
+
diff --git a/doc/sphinxext/sphinx_gallery/__init__.py b/doc/sphinxext/sphinx_gallery/__init__.py
index 247d21aebe..e113f97d2a 100644
--- a/doc/sphinxext/sphinx_gallery/__init__.py
+++ b/doc/sphinxext/sphinx_gallery/__init__.py
@@ -1,11 +1,10 @@
"""
-==============
Sphinx Gallery
==============
"""
import os
-__version__ = '0.0.11'
+__version__ = '0.1.11'
def glr_path_static():
diff --git a/doc/sphinxext/sphinx_gallery/_static/broken_example.png b/doc/sphinxext/sphinx_gallery/_static/broken_example.png
new file mode 100644
index 0000000000..4fea24e7df
Binary files /dev/null and b/doc/sphinxext/sphinx_gallery/_static/broken_example.png differ
diff --git a/doc/sphinxext/sphinx_gallery/_static/gallery.css b/doc/sphinxext/sphinx_gallery/_static/gallery.css
index 623003ee25..37047a9b91 100644
--- a/doc/sphinxext/sphinx_gallery/_static/gallery.css
+++ b/doc/sphinxext/sphinx_gallery/_static/gallery.css
@@ -1,106 +1,192 @@
-div.sphx-glr-thumbContainer {
+/*
+Sphinx-Gallery has compatible CSS to fix default sphinx themes
+Tested for Sphinx 1.3.1 for all themes: default, alabaster, sphinxdoc,
+scrolls, agogo, traditional, nature, haiku, pyramid
+Tested for Read the Docs theme 0.1.7 */
+.sphx-glr-thumbcontainer {
+ background: #fff;
+ border: solid #fff 1px;
+ -moz-border-radius: 5px;
+ -webkit-border-radius: 5px;
+ border-radius: 5px;
box-shadow: none;
- background: #FFF;
+ float: left;
margin: 5px;
- padding-top: 5px;
min-height: 230px;
- border: solid white 1px;
+ padding-top: 5px;
+ position: relative;
+}
+.sphx-glr-thumbcontainer:hover {
+ border: solid #b4ddfc 1px;
+ box-shadow: 0 0 15px rgba(142, 176, 202, 0.5);
+}
+.sphx-glr-thumbcontainer a.internal {
+ bottom: 0;
+ display: block;
+ left: 0;
+ padding: 150px 10px 0;
+ position: absolute;
+ right: 0;
+ top: 0;
+}
+/* Next one is to avoid Sphinx traditional theme to cover all the
+thumbnail with its default link Background color */
+.sphx-glr-thumbcontainer a.internal:hover {
+ background-color: transparent;
+}
+
+.sphx-glr-thumbcontainer p {
+ margin: 0 0 .1em 0;
+}
+.sphx-glr-thumbcontainer .figure {
+ margin: 10px;
+ width: 160px;
+}
+.sphx-glr-thumbcontainer img {
+ display: inline;
+ max-height: 160px;
+ width: 160px;
+}
+.sphx-glr-thumbcontainer[tooltip]:hover:after {
+ background: rgba(0, 0, 0, 0.8);
-webkit-border-radius: 5px;
-moz-border-radius: 5px;
border-radius: 5px;
- float: left;
- position: relative; }
- div.sphx-glr-thumbContainer:hover {
- box-shadow: 0 0 15px rgba(142, 176, 202, 0.5);
- border: solid #B4DDFC 1px; }
- div.sphx-glr-thumbContainer a.internal {
- display: block;
- position: absolute;
- padding: 150px 10px 0px 10px;
- top: 0px;
- right: 0px;
- bottom: 0px;
- left: 0px; }
- div.sphx-glr-thumbContainer p {
- margin: 0 0 .1em 0; }
- div.sphx-glr-thumbContainer .figure {
- margin: 10px;
- width: 160px; }
- div.sphx-glr-thumbContainer img {
- max-width: 100%;
- max-height: 160px;
- display: inline; }
- div.sphx-glr-thumbContainer[tooltip]:hover:after {
- background: rgba(0, 0, 0, 0.8);
- -webkit-border-radius: 5px;
- -moz-border-radius: 5px;
- border-radius: 5px;
- color: white;
- content: attr(tooltip);
- left: 95%;
- padding: 5px 15px;
- position: absolute;
- z-index: 98;
- width: 220px;
- bottom: 52%; }
- div.sphx-glr-thumbContainer[tooltip]:hover:before {
- content: "";
- position: absolute;
- z-index: 99;
- border: solid;
- border-color: #333 transparent;
- border-width: 18px 0px 0px 20px;
- left: 85%;
- bottom: 58%; }
-
-div.sphx-glr-script-out div.highlight {
- background-color: transparent;
+ color: #fff;
+ content: attr(tooltip);
+ left: 95%;
+ padding: 5px 15px;
+ position: absolute;
+ z-index: 98;
+ width: 220px;
+ bottom: 52%;
+}
+.sphx-glr-thumbcontainer[tooltip]:hover:before {
+ border: solid;
+ border-color: #333 transparent;
+ border-width: 18px 0 0 20px;
+ bottom: 58%;
+ content: '';
+ left: 85%;
+ position: absolute;
+ z-index: 99;
}
-p.sphx-glr-script-out {
- margin: -.9ex 0ex;
- color: #888;
+.highlight-pytb pre {
+ background-color: #ffe4e4;
+ border: 1px solid #f66;
+ margin-top: 10px;
+ padding: 7px;
}
-.sphx-glr-script-out pre {
- overflow: auto;
- word-break: break-word;
+.sphx-glr-script-out {
+ color: #888;
+ margin: 0;
+}
+.sphx-glr-script-out .highlight {
+ background-color: transparent;
+ margin-left: 2.5em;
+ margin-top: -1.4em;
+}
+.sphx-glr-script-out .highlight pre {
+ background-color: #fafae2;
+ border: 0;
max-height: 30em;
- background-color: #FAFAE2;
- border: none;
- margin-left: 1ex;
- margin-top: 0px;
+ overflow: auto;
padding-left: 1ex;
+ margin: 0px;
+ word-break: break-word;
}
-
-p.sphx-glr-horizontal {
- margin-bottom: 0px;
+.sphx-glr-script-out + p {
+ margin-top: 1.8em;
+}
+blockquote.sphx-glr-script-out {
+ margin-left: 0pt;
}
-/* Paragraph following an output are a bit more indented */
-blockquote.sphx-glr-script-out+p {
- margin-top: 1.8em;
+div.sphx-glr-footer {
+ text-align: center;
}
div.sphx-glr-download {
- padding: 1ex;
- margin: 1em auto 1ex auto;
- border-radius: 4px;
- max-width: 45ex;
- background-color: #ffc;
- border: 1px solid #C2C22D;
+ display: inline-block;
+ margin: 1em auto 1ex 2ex;
+ vertical-align: middle;
}
div.sphx-glr-download a {
- color: #4B4600;
+ background-color: #ffc;
+ background-image: linear-gradient(to bottom, #FFC, #d5d57e);
+ border-radius: 4px;
+ border: 1px solid #c2c22d;
+ color: #000;
+ display: inline-block;
+ /* Not valid in old browser, hence we keep the line above to override */
+ display: table-caption;
+ font-weight: bold;
+ padding: 1ex;
+ text-align: center;
+}
+
+/* The last child of a download button is the file name */
+div.sphx-glr-download a span:last-child {
+ font-size: smaller;
+}
+
+@media (min-width: 20em) {
+ div.sphx-glr-download a {
+ min-width: 10em;
+ }
+}
+
+@media (min-width: 30em) {
+ div.sphx-glr-download a {
+ min-width: 13em;
+ }
+}
+
+@media (min-width: 40em) {
+ div.sphx-glr-download a {
+ min-width: 16em;
+ }
+}
+
+
+div.sphx-glr-download code.download {
+ display: inline-block;
+ white-space: normal;
+ word-break: normal;
+ overflow-wrap: break-word;
+ /* border and background are given by the enclosing 'a' */
+ border: none;
+ background: none;
+}
+
+div.sphx-glr-download a:hover {
+ box-shadow: inset 0 1px 0 rgba(255,255,255,.1), 0 1px 5px rgba(0,0,0,.25);
+ text-decoration: none;
+ background-image: none;
+ background-color: #d5d57e;
}
ul.sphx-glr-horizontal {
- padding: 0px;
- list-style: none; }
- ul.sphx-glr-horizontal li {
- display: inline; }
- ul.sphx-glr-horizontal img {
- height: auto !important; }
+ list-style: none;
+ padding: 0;
+}
+ul.sphx-glr-horizontal li {
+ display: inline;
+}
+ul.sphx-glr-horizontal img {
+ height: auto !important;
+}
-/*# sourceMappingURL=gallery.css.map */
+p.sphx-glr-signature a.reference.external {
+ -moz-border-radius: 5px;
+ -webkit-border-radius: 5px;
+ border-radius: 5px;
+ padding: 3px;
+ font-size: 75%;
+ text-align: right;
+ margin-left: auto;
+ display: table;
+}
diff --git a/doc/sphinxext/sphinx_gallery/backreferences.py b/doc/sphinxext/sphinx_gallery/backreferences.py
index 4df5d3df61..32e4dd913f 100644
--- a/doc/sphinxext/sphinx_gallery/backreferences.py
+++ b/doc/sphinxext/sphinx_gallery/backreferences.py
@@ -2,11 +2,10 @@
# Author: Óscar Nájera
# License: 3-clause BSD
"""
-========================
Backreferences Generator
========================
-Reviews generated example files in order to keep track of used modules
+Parses example file code in order to keep track of used functions
"""
from __future__ import print_function
@@ -75,7 +74,7 @@ def get_short_module_name(module_name, obj_name):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
- except ImportError:
+ except Exception: # libraries can throw all sorts of exceptions...
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
@@ -97,13 +96,22 @@ def identify_names(code):
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
- finder.visit(ast.parse(code))
+ try:
+ finder.visit(ast.parse(code))
+ except SyntaxError:
+ return {}
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
- module, attribute = full_name.rsplit('.', 1)
+ splitted = full_name.rsplit('.', 1)
+ if len(splitted) == 1:
+ # module without attribute. This is not useful for
+ # backreferences
+ continue
+
+ module, attribute = splitted
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
@@ -130,7 +138,7 @@ def scan_used_functions(example_file, gallery_conf):
THUMBNAIL_TEMPLATE = """
.. raw:: html
-
+
.. only:: html
@@ -154,6 +162,10 @@ def _thumbnail_div(full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb',
'sphx_glr_%s_thumb.png' % fname[:-3])
+
+ # Inside rst files forward slash defines paths
+ thumb = thumb.replace(os.sep, "/")
+
ref_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATE
@@ -164,10 +176,15 @@ def write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, snippet):
"""Writes down back reference files, which include a thumbnail list
of examples using a certain module"""
+ if gallery_conf['backreferences_dir'] is None:
+ return
+
example_file = os.path.join(target_dir, fname)
+ build_target_dir = os.path.relpath(target_dir, gallery_conf['src_dir'])
backrefs = scan_used_functions(example_file, gallery_conf)
for backref in backrefs:
- include_path = os.path.join(gallery_conf['mod_example_dir'],
+ include_path = os.path.join(gallery_conf['src_dir'],
+ gallery_conf['backreferences_dir'],
'%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
@@ -175,6 +192,6 @@ def write_backreferences(seen_backrefs, gallery_conf,
heading = '\n\nExamples using ``%s``' % backref
ex_file.write(heading + '\n')
ex_file.write('^' * len(heading) + '\n')
- ex_file.write(_thumbnail_div(target_dir, fname, snippet,
+ ex_file.write(_thumbnail_div(build_target_dir, fname, snippet,
is_backref=True))
seen_backrefs.add(backref)
diff --git a/doc/sphinxext/sphinx_gallery/docs_resolv.py b/doc/sphinxext/sphinx_gallery/docs_resolv.py
index fb596fdb1f..762298cbe2 100644
--- a/doc/sphinxext/sphinx_gallery/docs_resolv.py
+++ b/doc/sphinxext/sphinx_gallery/docs_resolv.py
@@ -1,8 +1,10 @@
# -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
-###############################################################################
-# Documentation link resolver objects
+"""
+Link resolver objects
+=====================
+"""
from __future__ import print_function
import gzip
import os
@@ -10,21 +12,25 @@
import re
import shelve
import sys
+from distutils.version import LooseVersion
+
+import sphinx
+from sphinx.util.console import fuchsia
# Try Python 2 first, otherwise load from Python 3
try:
- from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
- from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
+from io import StringIO
+
def _get_data(url):
"""Helper function to get data over http or from a local file"""
@@ -232,22 +238,34 @@ def _get_link(self, cobj):
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
- fname = self._searchindex['filenames'][fname_idx] + '.html'
-
- if self._is_windows:
- fname = fname.replace('/', '\\')
- link = os.path.join(self.doc_url, fname)
- else:
- link = posixpath.join(self.doc_url, fname)
-
- if hasattr(link, 'decode'):
- link = link.decode('utf-8', 'replace')
-
- if link in self._page_cache:
- html = self._page_cache[link]
+ fname = self._searchindex['filenames'][fname_idx]
+ # In 1.5+ Sphinx seems to have changed from .rst.html to only
+ # .html extension in converted files. But URLs could be
+ # built with < 1.5 or >= 1.5 regardless of what we're currently
+ # building with, so let's just check both :(
+ fnames = [fname + '.html', os.path.splitext(fname)[0] + '.html']
+ for fname in fnames:
+ try:
+ if self._is_windows:
+ fname = fname.replace('/', '\\')
+ link = os.path.join(self.doc_url, fname)
+ else:
+ link = posixpath.join(self.doc_url, fname)
+
+ if hasattr(link, 'decode'):
+ link = link.decode('utf-8', 'replace')
+
+ if link in self._page_cache:
+ html = self._page_cache[link]
+ else:
+ html = get_data(link, self.gallery_dir)
+ self._page_cache[link] = html
+ except (HTTPError, URLError, IOError):
+ pass
+ else:
+ break
else:
- html = get_data(link, self.gallery_dir)
- self._page_cache[link] = html
+ raise
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
@@ -279,7 +297,7 @@ def resolve(self, cobj, this_url):
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
- cobi['name'] : function or class name (str)
+ cobj['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
@@ -319,16 +337,17 @@ def _embed_code_links(app, gallery_conf, gallery_dir):
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
+ src_gallery_dir = os.path.join(app.builder.srcdir, gallery_dir)
for this_module, url in gallery_conf['reference_url'].items():
try:
if url is None:
doc_resolvers[this_module] = SphinxDocLinkResolver(
app.builder.outdir,
- gallery_dir,
+ src_gallery_dir,
relative=True)
else:
doc_resolvers[this_module] = SphinxDocLinkResolver(url,
- gallery_dir)
+ src_gallery_dir)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
@@ -345,64 +364,82 @@ def _embed_code_links(app, gallery_conf, gallery_dir):
gallery_dir))
# patterns for replacement
- link_pattern = '%s'
+ link_pattern = ('%s')
orig_pattern = '%s'
period = '.'
- for dirpath, _, filenames in os.walk(html_gallery_dir):
- for fname in filenames:
- print('\tprocessing: %s' % fname)
- full_fname = os.path.join(html_gallery_dir, dirpath, fname)
- subpath = dirpath[len(html_gallery_dir) + 1:]
- pickle_fname = os.path.join(gallery_dir, subpath,
- fname[:-5] + '_codeobj.pickle')
-
- if os.path.exists(pickle_fname):
- # we have a pickle file with the objects to embed links for
- with open(pickle_fname, 'rb') as fid:
- example_code_obj = pickle.load(fid)
- fid.close()
- str_repl = {}
- # generate replacement strings with the links
- for name, cobj in example_code_obj.items():
- this_module = cobj['module'].split('.')[0]
-
- if this_module not in doc_resolvers:
- continue
-
- try:
- link = doc_resolvers[this_module].resolve(cobj,
- full_fname)
- except (HTTPError, URLError) as e:
- print("The following error has occurred:\n")
- print(repr(e))
- continue
-
- if link is not None:
- parts = name.split('.')
- name_html = period.join(orig_pattern % part
- for part in parts)
- str_repl[name_html] = link_pattern % (link, name_html)
- # do the replacement in the html file
-
- # ensure greediness
- names = sorted(str_repl, key=len, reverse=True)
- expr = re.compile(r'(? 0:
- with open(full_fname, 'rb') as fid:
- lines_in = fid.readlines()
- with open(full_fname, 'wb') as fid:
- for line in lines_in:
- line = line.decode('utf-8')
- line = expr.sub(substitute_link, line)
- fid.write(line.encode('utf-8'))
- print('[done]')
+ # This could be turned into a generator if necessary, but should be okay
+ flat = [[dirpath, filename]
+ for dirpath, _, filenames in os.walk(html_gallery_dir)
+ for filename in filenames]
+ if LooseVersion(sphinx.__version__) >= LooseVersion('1.6'):
+ # It will be removed once upgraded to new sphinx-gallery version
+ from sphinx.util import status_iterator
+ iterator = status_iterator(
+ flat, os.path.basename(html_gallery_dir), color='fuchsia',
+ length=len(flat), stringify_func=lambda x: os.path.basename(x[1]))
+ else:
+ iterator = app.status_iterator(
+ flat, os.path.basename(html_gallery_dir), colorfunc=fuchsia,
+ length=len(flat), stringify_func=lambda x: os.path.basename(x[1]))
+
+ for dirpath, fname in iterator:
+ full_fname = os.path.join(html_gallery_dir, dirpath, fname)
+ subpath = dirpath[len(html_gallery_dir) + 1:]
+ pickle_fname = os.path.join(src_gallery_dir, subpath,
+ fname[:-5] + '_codeobj.pickle')
+
+ if os.path.exists(pickle_fname):
+ # we have a pickle file with the objects to embed links for
+ with open(pickle_fname, 'rb') as fid:
+ example_code_obj = pickle.load(fid)
+ fid.close()
+ str_repl = {}
+ # generate replacement strings with the links
+ for name, cobj in example_code_obj.items():
+ this_module = cobj['module'].split('.')[0]
+
+ if this_module not in doc_resolvers:
+ continue
+
+ try:
+ link = doc_resolvers[this_module].resolve(cobj,
+ full_fname)
+ except (HTTPError, URLError) as e:
+ if isinstance(e, HTTPError):
+ extra = e.code
+ else:
+ extra = e.reason
+ print("\n\t\tError resolving %s.%s: %r (%s)"
+ % (cobj['module'], cobj['name'], e, extra))
+ continue
+
+ if link is not None:
+ parts = name.split('.')
+ name_html = period.join(orig_pattern % part
+ for part in parts)
+ full_function_name = '%s.%s' % (
+ cobj['module'], cobj['name'])
+ str_repl[name_html] = link_pattern % (
+ link, full_function_name, name_html)
+ # do the replacement in the html file
+
+ # ensure greediness
+ names = sorted(str_repl, key=len, reverse=True)
+ regex_str = '|'.join(re.escape(name) for name in names)
+ regex = re.compile(regex_str)
+
+ def substitute_link(match):
+ return str_repl[match.group()]
+
+ if len(str_repl) > 0:
+ with open(full_fname, 'rb') as fid:
+ lines_in = fid.readlines()
+ with open(full_fname, 'wb') as fid:
+ for line in lines_in:
+ line = line.decode('utf-8')
+ line = regex.sub(substitute_link, line)
+ fid.write(line.encode('utf-8'))
def embed_code_links(app, exception):
diff --git a/doc/sphinxext/sphinx_gallery/downloads.py b/doc/sphinxext/sphinx_gallery/downloads.py
new file mode 100644
index 0000000000..6b5b3df17f
--- /dev/null
+++ b/doc/sphinxext/sphinx_gallery/downloads.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+r"""
+Utilities for downloadable items
+================================
+
+"""
+# Author: Óscar Nájera
+# License: 3-clause BSD
+
+from __future__ import absolute_import, division, print_function
+
+import os
+import zipfile
+
+CODE_DOWNLOAD = """
+\n.. container:: sphx-glr-footer
+
+\n .. container:: sphx-glr-download
+
+ :download:`Download Python source code: {0} <{0}>`\n
+
+\n .. container:: sphx-glr-download
+
+ :download:`Download Jupyter notebook: {1} <{1}>`\n"""
+
+CODE_ZIP_DOWNLOAD = """
+\n.. container:: sphx-glr-footer
+
+\n .. container:: sphx-glr-download
+
+ :download:`Download all examples in Python source code: {0} {1}>`\n
+
+\n .. container:: sphx-glr-download
+
+ :download:`Download all examples in Jupyter notebooks: {2} {3}>`\n"""
+
+
+def python_zip(file_list, gallery_path, extension='.py'):
+ """Stores all files in file_list into an zip file
+
+ Parameters
+ ----------
+ file_list : list of strings
+ Holds all the file names to be included in zip file
+ gallery_path : string
+ path to where the zipfile is stored
+ extension : str
+ '.py' or '.ipynb' In order to deal with downloads of python
+ sources and jupyter notebooks the file extension from files in
+ file_list will be removed and replace with the value of this
+ variable while generating the zip file
+ Returns
+ -------
+ zipname : string
+ zip file name, written as `target_dir_{python,jupyter}.zip`
+ depending on the extension
+ """
+ zipname = os.path.basename(gallery_path)
+ zipname += '_python' if extension == '.py' else '_jupyter'
+ zipname = os.path.join(gallery_path, zipname + '.zip')
+
+ zipf = zipfile.ZipFile(zipname, mode='w')
+ for fname in file_list:
+ file_src = os.path.splitext(fname)[0] + extension
+ zipf.write(file_src, os.path.relpath(file_src, gallery_path))
+ zipf.close()
+
+ return zipname
+
+
+def list_downloadable_sources(target_dir):
+ """Returns a list of python source files is target_dir
+
+ Parameters
+ ----------
+ target_dir : string
+ path to the directory where python source file are
+ Returns
+ -------
+ list
+ list of paths to all Python source files in `target_dir`
+ """
+ return [os.path.join(target_dir, fname)
+ for fname in os.listdir(target_dir)
+ if fname.endswith('.py')]
+
+
+def generate_zipfiles(gallery_dir):
+ """
+ Collects all Python source files and Jupyter notebooks in
+ gallery_dir and makes zipfiles of them
+
+ Parameters
+ ----------
+ gallery_dir : string
+ path of the gallery to collect downloadable sources
+
+ Return
+ ------
+ download_rst: string
+ RestructuredText to include download buttons to the generated files
+ """
+
+ listdir = list_downloadable_sources(gallery_dir)
+ for directory in sorted(os.listdir(gallery_dir)):
+ if os.path.isdir(os.path.join(gallery_dir, directory)):
+ target_dir = os.path.join(gallery_dir, directory)
+ listdir.extend(list_downloadable_sources(target_dir))
+
+ py_zipfile = python_zip(listdir, gallery_dir)
+ jy_zipfile = python_zip(listdir, gallery_dir, ".ipynb")
+
+ def rst_path(filepath):
+ return filepath.replace(os.sep, '/')
+
+ dw_rst = CODE_ZIP_DOWNLOAD.format(os.path.basename(py_zipfile),
+ rst_path(py_zipfile),
+ os.path.basename(jy_zipfile),
+ rst_path(jy_zipfile))
+ return dw_rst
diff --git a/doc/sphinxext/sphinx_gallery/gen_gallery.py b/doc/sphinxext/sphinx_gallery/gen_gallery.py
index 8e58b62362..3cfb028576 100644
--- a/doc/sphinxext/sphinx_gallery/gen_gallery.py
+++ b/doc/sphinxext/sphinx_gallery/gen_gallery.py
@@ -2,7 +2,6 @@
# Author: Óscar Nájera
# License: 3-clause BSD
"""
-========================
Sphinx-Gallery Generator
========================
@@ -12,13 +11,39 @@
from __future__ import division, print_function, absolute_import
+import copy
+import re
import os
+
from . import glr_path_static
-from .gen_rst import generate_dir_rst
+from .gen_rst import generate_dir_rst, SPHX_GLR_SIG
from .docs_resolv import embed_code_links
+from .downloads import generate_zipfiles
+
+try:
+ FileNotFoundError
+except NameError:
+ # Python2
+ FileNotFoundError = IOError
+
+DEFAULT_GALLERY_CONF = {
+ 'filename_pattern': re.escape(os.sep) + 'plot',
+ 'examples_dirs': os.path.join('..', 'examples'),
+ 'gallery_dirs': 'auto_examples',
+ 'backreferences_dir': None,
+ 'doc_module': (),
+ 'reference_url': {},
+ # build options
+ 'plot_gallery': True,
+ 'download_all_examples': True,
+ 'abort_on_example_error': False,
+ 'failing_examples': {},
+ 'expected_failing_examples': set(),
+}
def clean_gallery_out(build_dir):
+ """Deletes images under the sphx_glr namespace in the build directory"""
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
@@ -29,10 +54,11 @@ def clean_gallery_out(build_dir):
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
- # image build directory each time the docs are built. If sphinx
- # changes their layout between versions, this will not work (though
- # it should probably not cause a crash). Tested successfully
- # on Sphinx 1.0.7
+ # image build directory from gallery images each time the docs are built.
+ # If sphinx changes their layout between versions, this will not
+ # work (though it should probably not cause a crash).
+ # Tested successfully on Sphinx 1.0.7
+
build_image_dir = os.path.join(build_dir, '_images')
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
@@ -41,28 +67,66 @@ def clean_gallery_out(build_dir):
os.remove(os.path.join(build_image_dir, filename))
-def generate_gallery_rst(app):
- """Generate the Main examples gallery reStructuredText
-
- Start the sphinx-gallery configuration and recursively scan the examples
- directories in order to populate the examples gallery
- """
+def parse_config(app):
+ """Process the Sphinx Gallery configuration"""
+ # TODO: Test this behavior.
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
+ gallery_conf = copy.deepcopy(DEFAULT_GALLERY_CONF)
gallery_conf.update(app.config.sphinx_gallery_conf)
+ gallery_conf.update(plot_gallery=plot_gallery)
+ gallery_conf.update(
+ abort_on_example_error=app.builder.config.abort_on_example_error)
+ gallery_conf['src_dir'] = app.builder.srcdir
+
+ backreferences_warning = """\n========
+Sphinx-Gallery now requires you to set the configuration variable
+'backreferences_dir' in your config to activate the
+backreferences. That is mini galleries clustered by the functions used
+in the example scripts. Have a look at it in sphinx-gallery
+
+https://sphinx-gallery.readthedocs.io/en/stable/index.html#examples-using-numpy-linspace
+"""
+
+ if gallery_conf.get("mod_example_dir", False):
+ update_msg = """\nFor a quick fix try replacing 'mod_example_dir'
+by 'backreferences_dir' in your conf.py file. If that does not solve the
+present issue read carefully how to update in the online documentation
+
+https://sphinx-gallery.readthedocs.io/en/latest/advanced_configuration.html#references-to-examples"""
+
+ gallery_conf['backreferences_dir'] = gallery_conf['mod_example_dir']
+ app.warn("Old configuration for backreferences detected \n"
+ "using the configuration variable `mod_example_dir`\n"
+ + backreferences_warning
+ + update_msg, prefix="DeprecationWarning: ")
+
+ elif gallery_conf['backreferences_dir'] is None:
+ no_care_msg = """
+If you don't care about this features set in your conf.py
+'backreferences_dir': False\n"""
+
+ app.warn(backreferences_warning + no_care_msg)
+
+ gallery_conf['backreferences_dir'] = os.path.join(
+ 'modules', 'generated')
+ app.warn("using old default 'backreferences_dir':'{}'.\n"
+ " This will be disabled in future releases\n".format(
+ gallery_conf['backreferences_dir']),
+ prefix="DeprecationWarning: ")
# this assures I can call the config in other places
app.config.sphinx_gallery_conf = gallery_conf
app.config.html_static_path.append(glr_path_static())
- if not plot_gallery:
- return
+ return gallery_conf
- clean_gallery_out(app.builder.outdir)
+def _prepare_sphx_glr_dirs(gallery_conf, srcdir):
+ """Creates necessary folders for sphinx_gallery files """
examples_dirs = gallery_conf['examples_dirs']
gallery_dirs = gallery_conf['gallery_dirs']
@@ -71,56 +135,171 @@ def generate_gallery_rst(app):
if not isinstance(gallery_dirs, list):
gallery_dirs = [gallery_dirs]
- mod_examples_dir = os.path.relpath(gallery_conf['mod_example_dir'],
- app.builder.srcdir)
+ if bool(gallery_conf['backreferences_dir']):
+ backreferences_dir = os.path.join(
+ srcdir, gallery_conf['backreferences_dir'])
+ if not os.path.exists(backreferences_dir):
+ os.makedirs(backreferences_dir)
+
+ return examples_dirs, gallery_dirs
+
+
+def generate_gallery_rst(app):
+ """Generate the Main examples gallery reStructuredText
+
+ Start the sphinx-gallery configuration and recursively scan the examples
+ directories in order to populate the examples gallery
+ """
+ print('Generating gallery')
+ gallery_conf = parse_config(app)
+
+ clean_gallery_out(app.builder.outdir)
+
seen_backrefs = set()
+ computation_times = []
+ examples_dirs, gallery_dirs = _prepare_sphx_glr_dirs(gallery_conf,
+ app.builder.srcdir)
+
for examples_dir, gallery_dir in zip(examples_dirs, gallery_dirs):
- examples_dir = os.path.relpath(examples_dir,
- app.builder.srcdir)
- gallery_dir = os.path.relpath(gallery_dir,
- app.builder.srcdir)
+ examples_dir = os.path.join(app.builder.srcdir, examples_dir)
+ gallery_dir = os.path.join(app.builder.srcdir, gallery_dir)
- for workdir in [examples_dir, gallery_dir, mod_examples_dir]:
+ for workdir in [examples_dir, gallery_dir]:
if not os.path.exists(workdir):
os.makedirs(workdir)
+ # Here we don't use an os.walk, but we recurse only twice: flat is
+ # better than nested.
+ this_fhindex, this_computation_times = generate_dir_rst(
+ examples_dir, gallery_dir, gallery_conf, seen_backrefs)
+ if this_fhindex == "":
+ raise FileNotFoundError("Main example directory {0} does not "
+ "have a README.txt file. Please write "
+ "one to introduce your gallery."
+ .format(examples_dir))
+
+ computation_times += this_computation_times
# we create an index.rst with all examples
fhindex = open(os.path.join(gallery_dir, 'index.rst'), 'w')
- # Here we don't use an os.walk, but we recurse only twice: flat is
- # better than nested.
- fhindex.write(generate_dir_rst(examples_dir, gallery_dir, gallery_conf,
- seen_backrefs))
+ # :orphan: to suppress "not included in TOCTREE" sphinx warnings
+ fhindex.write(":orphan:\n\n" + this_fhindex)
for directory in sorted(os.listdir(examples_dir)):
if os.path.isdir(os.path.join(examples_dir, directory)):
src_dir = os.path.join(examples_dir, directory)
target_dir = os.path.join(gallery_dir, directory)
- fhindex.write(generate_dir_rst(src_dir, target_dir,
- gallery_conf,
- seen_backrefs))
+ this_fhindex, this_computation_times = generate_dir_rst(src_dir, target_dir, gallery_conf,
+ seen_backrefs)
+ fhindex.write(this_fhindex)
+ computation_times += this_computation_times
+
+ if gallery_conf['download_all_examples']:
+ download_fhindex = generate_zipfiles(gallery_dir)
+ fhindex.write(download_fhindex)
+
+ fhindex.write(SPHX_GLR_SIG)
fhindex.flush()
+ if gallery_conf['plot_gallery']:
+ print("Computation time summary:")
+ for time_elapsed, fname in sorted(computation_times)[::-1]:
+ if time_elapsed is not None:
+ print("\t- %s : %.2g sec" % (fname, time_elapsed))
+ else:
+ print("\t- %s : not run" % fname)
-gallery_conf = {
- 'examples_dirs': '../examples',
- 'gallery_dirs': 'auto_examples',
- 'mod_example_dir': 'modules/generated',
- 'doc_module': (),
- 'reference_url': {},
-}
+
+def touch_empty_backreferences(app, what, name, obj, options, lines):
+ """Generate empty back-reference example files
+
+ This avoids inclusion errors/warnings if there are no gallery
+ examples for a class / module that is being parsed by autodoc"""
+
+ if not bool(app.config.sphinx_gallery_conf['backreferences_dir']):
+ return
+
+ examples_path = os.path.join(app.srcdir,
+ app.config.sphinx_gallery_conf[
+ "backreferences_dir"],
+ "%s.examples" % name)
+
+ if not os.path.exists(examples_path):
+ # touch file
+ open(examples_path, 'w').close()
+
+
+def sumarize_failing_examples(app, exception):
+ """Collects the list of falling examples during build and prints them with the traceback
+
+ Raises ValueError if there where failing examples
+ """
+ if exception is not None:
+ return
+
+ # Under no-plot Examples are not run so nothing to summarize
+ if not app.config.sphinx_gallery_conf['plot_gallery']:
+ return
+
+ gallery_conf = app.config.sphinx_gallery_conf
+ failing_examples = set(gallery_conf['failing_examples'].keys())
+ expected_failing_examples = set([os.path.normpath(os.path.join(app.srcdir, path))
+ for path in
+ gallery_conf['expected_failing_examples']])
+
+ examples_expected_to_fail = failing_examples.intersection(
+ expected_failing_examples)
+ expected_fail_msg = []
+ if examples_expected_to_fail:
+ expected_fail_msg.append("\n\nExamples failing as expected:")
+ for fail_example in examples_expected_to_fail:
+ expected_fail_msg.append(fail_example + ' failed leaving traceback:\n' +
+ gallery_conf['failing_examples'][fail_example] + '\n')
+ print("\n".join(expected_fail_msg))
+
+ examples_not_expected_to_fail = failing_examples.difference(
+ expected_failing_examples)
+ fail_msgs = []
+ if examples_not_expected_to_fail:
+ fail_msgs.append("Unexpected failing examples:")
+ for fail_example in examples_not_expected_to_fail:
+ fail_msgs.append(fail_example + ' failed leaving traceback:\n' +
+ gallery_conf['failing_examples'][fail_example] + '\n')
+
+ examples_not_expected_to_pass = expected_failing_examples.difference(
+ failing_examples)
+ if examples_not_expected_to_pass:
+ fail_msgs.append("Examples expected to fail, but not failling:\n" +
+ "Please remove these examples from\n" +
+ "sphinx_gallery_conf['expected_failing_examples']\n" +
+ "in your conf.py file"
+ "\n".join(examples_not_expected_to_pass))
+
+ if fail_msgs:
+ raise ValueError("Here is a summary of the problems encountered when "
+ "running the examples\n\n" + "\n".join(fail_msgs) +
+ "\n" + "-" * 79)
+
+
+def get_default_config_value(key):
+ def default_getter(conf):
+ return conf['sphinx_gallery_conf'].get(key, DEFAULT_GALLERY_CONF[key])
+ return default_getter
def setup(app):
"""Setup sphinx-gallery sphinx extension"""
- app.add_config_value('plot_gallery', True, 'html')
- app.add_config_value('sphinx_gallery_conf', gallery_conf, 'html')
+ app.add_config_value('sphinx_gallery_conf', DEFAULT_GALLERY_CONF, 'html')
+ for key in ['plot_gallery', 'abort_on_example_error']:
+ app.add_config_value(key, get_default_config_value(key), 'html')
+
app.add_stylesheet('gallery.css')
+ # Sphinx < 1.6 calls it `_extensions`, >= 1.6 is `extensions`.
+ extensions_attr = '_extensions' if hasattr(app, '_extensions') else 'extensions'
+ if 'sphinx.ext.autodoc' in getattr(app, extensions_attr):
+ app.connect('autodoc-process-docstring', touch_empty_backreferences)
+
app.connect('builder-inited', generate_gallery_rst)
+ app.connect('build-finished', sumarize_failing_examples)
app.connect('build-finished', embed_code_links)
-
-
-def setup_module():
- # HACK: Stop nosetests running setup() above
- pass
diff --git a/doc/sphinxext/sphinx_gallery/gen_rst.py b/doc/sphinxext/sphinx_gallery/gen_rst.py
index 48a04d1e82..c2a0b95545 100644
--- a/doc/sphinxext/sphinx_gallery/gen_rst.py
+++ b/doc/sphinxext/sphinx_gallery/gen_rst.py
@@ -2,7 +2,6 @@
# Author: Óscar Nájera
# License: 3-clause BSD
"""
-==================
RST file generator
==================
@@ -12,44 +11,22 @@
Files that generate images should start with 'plot'
"""
+# Don't use unicode_literals here (be explicit with u"..." instead) otherwise
+# tricky errors come up with exec(code_blocks, ...) calls
from __future__ import division, print_function, absolute_import
from time import time
-import ast
+import codecs
+import hashlib
import os
import re
import shutil
-import traceback
-import sys
import subprocess
+import sys
+import traceback
import warnings
-from textwrap import dedent
-from . import glr_path_static
-from .backreferences import write_backreferences, _thumbnail_div
# Try Python 2 first, otherwise load from Python 3
-try:
- from StringIO import StringIO
-except ImportError:
- from io import StringIO
-
-try:
- basestring
-except NameError:
- basestring = str
-
-try:
- # make sure that the Agg backend is set before importing any
- # matplotlib
- import matplotlib
- matplotlib.use('Agg')
- import matplotlib.pyplot as plt
-except ImportError:
- # this script can be imported by nosetest to find tests to run: we should
- # not impose the matplotlib requirement in that case.
- pass
-
-
try:
# textwrap indent only exists in python 3
from textwrap import indent
@@ -71,6 +48,44 @@ def prefixed_lines():
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
+from io import StringIO
+
+# make sure that the Agg backend is set before importing any
+# matplotlib
+import matplotlib
+matplotlib.use('agg')
+matplotlib_backend = matplotlib.get_backend()
+
+if matplotlib_backend != 'agg':
+ mpl_backend_msg = (
+ "Sphinx-Gallery relies on the matplotlib 'agg' backend to "
+ "render figures and write them to files. You are "
+ "currently using the {} backend. Sphinx-Gallery will "
+ "terminate the build now, because changing backends is "
+ "not well supported by matplotlib. We advise you to move "
+ "sphinx_gallery imports before any matplotlib-dependent "
+ "import. Moving sphinx_gallery imports at the top of "
+ "your conf.py file should fix this issue")
+
+ raise ValueError(mpl_backend_msg.format(matplotlib_backend))
+
+import matplotlib.pyplot as plt
+
+from . import glr_path_static
+from .backreferences import write_backreferences, _thumbnail_div
+from .downloads import CODE_DOWNLOAD
+from .py_source_parser import (get_docstring_and_rest,
+ split_code_and_text_blocks)
+
+from .notebook import jupyter_notebook, save_notebook
+
+try:
+ basestring
+except NameError:
+ basestring = str
+ unicode = str
+
+
###############################################################################
@@ -89,14 +104,21 @@ def flush(self):
self.file1.flush()
self.file2.flush()
+ # When called from a local terminal seaborn needs it in Python3
+ def isatty(self):
+ self.file1.isatty()
-###############################################################################
-CODE_DOWNLOAD = """**Total running time of the script:**
-({0:.0f} minutes {1:.3f} seconds)\n\n
-\n.. container:: sphx-glr-download
- **Download Python source code:** :download:`{2} <{2}>`\n"""
+class MixedEncodingStringIO(StringIO):
+ """Helper when both ASCII and unicode strings will be written"""
+
+ def write(self, data):
+ if not isinstance(data, unicode):
+ data = data.decode('utf-8')
+ StringIO.write(self, data)
+
+###############################################################################
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
@@ -117,98 +139,43 @@ def flush(self):
:align: center
"""
-CODE_OUTPUT = """.. rst-class:: sphx-glr-script-out
- **Output**:\n
+# This one could contain unicode
+CODE_OUTPUT = u""".. rst-class:: sphx-glr-script-out
- ::
+ Out::
{0}\n"""
-def get_docstring_and_rest(filename):
- """Separate `filename` content between docstring and the rest
-
- Strongly inspired from ast.get_docstring.
-
- Returns
- -------
- docstring: str
- docstring of `filename`
- rest: str
- `filename` content without the docstring
- """
- with open(filename) as f:
- content = f.read()
-
- node = ast.parse(content)
- if not isinstance(node, ast.Module):
- raise TypeError("This function only supports modules. "
- "You provided {0}".format(node.__class__.__name__))
- if node.body and isinstance(node.body[0], ast.Expr) and \
- isinstance(node.body[0].value, ast.Str):
- docstring_node = node.body[0]
- docstring = docstring_node.value.s
- # This get the content of the file after the docstring last line
- # Note: 'maxsplit' argument is not a keyword argument in python2
- rest = content.split('\n', docstring_node.lineno)[-1]
- return docstring, rest
- else:
- raise ValueError(('Could not find docstring in file "{0}". '
- 'A docstring is required by sphinx-gallery')
- .format(filename))
-
-
-def split_code_and_text_blocks(source_file):
- """Return list with source file separated into code and text blocks.
-
- Returns
- -------
- blocks : list of (label, content)
- List where each element is a tuple with the label ('text' or 'code'),
- and content string of block.
- """
- docstring, rest_of_content = get_docstring_and_rest(source_file)
-
- blocks = [('text', docstring)]
-
- pattern = re.compile(
- r'(?P^#{20,}.*)\s(?P(?:^#.*\s)*)',
- flags=re.M)
+SPHX_GLR_SIG = """\n.. rst-class:: sphx-glr-signature
- pos_so_far = 0
- for match in re.finditer(pattern, rest_of_content):
- match_start_pos, match_end_pos = match.span()
- code_block_content = rest_of_content[pos_so_far:match_start_pos]
- text_content = match.group('text_content')
- sub_pat = re.compile('^#', flags=re.M)
- text_block_content = dedent(re.sub(sub_pat, '', text_content))
- if code_block_content.strip():
- blocks.append(('code', code_block_content))
- if text_block_content.strip():
- blocks.append(('text', text_block_content))
- pos_so_far = match_end_pos
+ `Generated by Sphinx-Gallery `_\n"""
- remaining_content = rest_of_content[pos_so_far:]
- if remaining_content.strip():
- blocks.append(('code', remaining_content))
- return blocks
-
-
-def codestr2rst(codestr):
+def codestr2rst(codestr, lang='python'):
"""Return reStructuredText code block from code string"""
- code_directive = "\n.. code-block:: python\n\n"
+ code_directive = "\n.. code-block:: {0}\n\n".format(lang)
indented_block = indent(codestr, ' ' * 4)
return code_directive + indented_block
-def text2string(content):
- """Returns a string without the extra triple quotes"""
- try:
- return ast.literal_eval(content) + '\n'
- except Exception:
- return content
+def extract_thumbnail_number(text):
+ """ Pull out the thumbnail image number specified in the docstring. """
+
+ # check whether the user has specified a specific thumbnail image
+ pattr = re.compile(
+ r"^\s*#\s*sphinx_gallery_thumbnail_number\s*=\s*([0-9]+)\s*$",
+ flags=re.MULTILINE)
+ match = pattr.search(text)
+
+ if match is None:
+ # by default, use the first figure created
+ thumbnail_number = 1
+ else:
+ thumbnail_number = int(match.groups()[0])
+
+ return thumbnail_number
def extract_intro(filename):
@@ -231,14 +198,29 @@ def extract_intro(filename):
return first_paragraph
-def _plots_are_current(src_file, image_file):
- """Test existence of image file and later touch time to source script"""
+def get_md5sum(src_file):
+ """Returns md5sum of file"""
+
+ with open(src_file, 'rb') as src_data:
+ src_content = src_data.read()
+
+ src_md5 = hashlib.md5(src_content).hexdigest()
+ return src_md5
+
+
+def md5sum_is_current(src_file):
+ """Checks whether src_file has the same md5 hash as the one on disk"""
+
+ src_md5 = get_md5sum(src_file)
+
+ src_md5_file = src_file + '.md5'
+ if os.path.exists(src_md5_file):
+ with open(src_md5_file, 'r') as file_checksum:
+ ref_md5 = file_checksum.read()
+
+ return src_md5 == ref_md5
- first_image_file = image_file.format(1)
- needs_replot = (
- not os.path.exists(first_image_file) or
- os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime)
- return not needs_replot
+ return False
def save_figures(image_path, fig_count, gallery_conf):
@@ -250,18 +232,22 @@ def save_figures(image_path, fig_count, gallery_conf):
Path where plots are saved (format string which accepts figure number)
fig_count : int
Previous figure number count. Figure number add from this number
+ gallery_conf : dict
+ Contains the configuration of Sphinx-Gallery
Returns
-------
- list of strings containing the full path to each figure
+ images_rst : str
+ rst code to embed the images in the document
+ fig_num : int
+ number of figures saved
"""
figure_list = []
- fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
- for fig_mngr in fig_managers:
+ for fig_num in plt.get_fignums():
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
- fig = plt.figure(fig_mngr.num)
+ fig = plt.figure(fig_num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
@@ -270,16 +256,16 @@ def save_figures(image_path, fig_count, gallery_conf):
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
- current_fig = image_path.format(fig_count + fig_mngr.num)
+ current_fig = image_path.format(fig_count + fig_num)
fig.savefig(current_fig, **kwargs)
figure_list.append(current_fig)
if gallery_conf.get('find_mayavi_figures', False):
from mayavi import mlab
e = mlab.get_engine()
- last_matplotlib_fig_num = len(figure_list)
+ last_matplotlib_fig_num = fig_count + len(figure_list)
total_fig_num = last_matplotlib_fig_num + len(e.scenes)
- mayavi_fig_nums = range(last_matplotlib_fig_num, total_fig_num)
+ mayavi_fig_nums = range(last_matplotlib_fig_num + 1, total_fig_num + 1)
for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums):
current_fig = image_path.format(mayavi_fig_num)
@@ -289,7 +275,43 @@ def save_figures(image_path, fig_count, gallery_conf):
figure_list.append(current_fig)
mlab.close(all=True)
- return figure_list
+ return figure_rst(figure_list, gallery_conf['src_dir'])
+
+
+def figure_rst(figure_list, sources_dir):
+ """Given a list of paths to figures generate the corresponding rst
+
+ Depending on whether we have one or more figures, we use a
+ single rst call to 'image' or a horizontal list.
+
+ Parameters
+ ----------
+ figure_list : list of str
+ Strings are the figures' absolute paths
+ sources_dir : str
+ absolute path of Sphinx documentation sources
+
+ Returns
+ -------
+ images_rst : str
+ rst code to embed the images in the document
+ fig_num : int
+ number of figures saved
+ """
+
+ figure_paths = [os.path.relpath(figure_path, sources_dir)
+ .replace(os.sep, '/').lstrip('/')
+ for figure_path in figure_list]
+ images_rst = ""
+ if len(figure_paths) == 1:
+ figure_name = figure_paths[0]
+ images_rst = SINGLE_IMAGE % figure_name
+ elif len(figure_paths) > 1:
+ images_rst = HLIST_HEADER
+ for figure_name in figure_paths:
+ images_rst += HLIST_IMAGE_TEMPLATE % figure_name
+
+ return images_rst, len(figure_list)
def scale_image(in_fname, out_fname, max_width, max_height):
@@ -337,18 +359,28 @@ def scale_image(in_fname, out_fname, max_width, max_height):
generated images')
-def save_thumbnail(image_path, base_image_name, gallery_conf):
+def save_thumbnail(image_path_template, src_file, gallery_conf):
"""Save the thumbnail image"""
- first_image_file = image_path.format(1)
- thumb_dir = os.path.join(os.path.dirname(first_image_file), 'thumb')
+ # read specification of the figure to display as thumbnail from main text
+ _, content = get_docstring_and_rest(src_file)
+ thumbnail_number = extract_thumbnail_number(content)
+ thumbnail_image_path = image_path_template.format(thumbnail_number)
+
+ thumb_dir = os.path.join(os.path.dirname(thumbnail_image_path), 'thumb')
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
+ base_image_name = os.path.splitext(os.path.basename(src_file))[0]
thumb_file = os.path.join(thumb_dir,
'sphx_glr_%s_thumb.png' % base_image_name)
- if os.path.exists(first_image_file):
- scale_image(first_image_file, thumb_file, 400, 280)
+ if src_file in gallery_conf['failing_examples']:
+ broken_img = os.path.join(glr_path_static(), 'broken_example.png')
+ scale_image(broken_img, thumb_file, 200, 140)
+
+ elif os.path.exists(thumbnail_image_path):
+ scale_image(thumbnail_image_path, thumb_file, 400, 280)
+
elif not os.path.exists(thumb_file):
# create something to replace the thumbnail
default_thumb_file = os.path.join(glr_path_static(), 'no_image.png')
@@ -365,29 +397,38 @@ def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs):
src_dir)
print('Skipping this directory')
print(80 * '_')
- return "" # because string is an expected return type
+ return "", [] # because string is an expected return type
+
+ with open(os.path.join(src_dir, 'README.txt')) as fid:
+ fhindex = fid.read()
+ # Add empty lines to avoid bug in issue #165
+ fhindex += "\n\n"
- fhindex = open(os.path.join(src_dir, 'README.txt')).read()
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = [fname for fname in sorted(os.listdir(src_dir))
if fname.endswith('.py')]
entries_text = []
+ computation_times = []
+ build_target_dir = os.path.relpath(target_dir, gallery_conf['src_dir'])
for fname in sorted_listdir:
- amount_of_code = generate_file_rst(fname, target_dir, src_dir,
- gallery_conf)
+ amount_of_code, time_elapsed = \
+ generate_file_rst(fname, target_dir, src_dir, gallery_conf)
+ computation_times.append((time_elapsed, fname))
new_fname = os.path.join(src_dir, fname)
intro = extract_intro(new_fname)
- write_backreferences(seen_backrefs, gallery_conf,
- target_dir, fname, intro)
- this_entry = _thumbnail_div(target_dir, fname, intro) + """
+ this_entry = _thumbnail_div(build_target_dir, fname, intro) + """
.. toctree::
:hidden:
- /%s/%s\n""" % (target_dir, fname[:-3])
+ /%s\n""" % os.path.join(build_target_dir, fname[:-3]).replace(os.sep, '/')
entries_text.append((amount_of_code, this_entry))
+ if gallery_conf['backreferences_dir']:
+ write_backreferences(seen_backrefs, gallery_conf,
+ target_dir, fname, intro)
+
# sort to have the smallest entries in the beginning
entries_text.sort()
@@ -398,80 +439,118 @@ def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs):
fhindex += """.. raw:: html\n
\n\n"""
- return fhindex
+ return fhindex, computation_times
-def execute_script(code_block, example_globals, image_path, fig_count,
- src_file, gallery_conf):
+def execute_code_block(code_block, example_globals,
+ block_vars, gallery_conf):
"""Executes the code block of the example file"""
time_elapsed = 0
stdout = ''
- # We need to execute the code
- print('plotting code blocks in %s' % src_file)
+ # If example is not suitable to run, skip executing its blocks
+ if not block_vars['execute_script']:
+ return stdout, time_elapsed
plt.close('all')
cwd = os.getcwd()
# Redirect output to stdout and
orig_stdout = sys.stdout
+ src_file = block_vars['src_file']
try:
# First cd in the original example dir, so that any file
# created by the example get created in this directory
os.chdir(os.path.dirname(src_file))
- my_buffer = StringIO()
+ my_buffer = MixedEncodingStringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
t_start = time()
+ # don't use unicode_literals at the top of this file or you get
+ # nasty errors here on Py2.7
exec(code_block, example_globals)
time_elapsed = time() - t_start
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue().strip().expandtabs()
+ # raise RuntimeError
if my_stdout:
- stdout = CODE_OUTPUT.format(indent(my_stdout, ' ' * 4))
+ stdout = CODE_OUTPUT.format(indent(my_stdout, u' ' * 4))
os.chdir(cwd)
- figure_list = save_figures(image_path, fig_count, gallery_conf)
-
- # Depending on whether we have one or more figures, we're using a
- # horizontal list or a single rst call to 'image'.
- if len(figure_list) == 1:
- figure_name = figure_list[0]
- image_list = SINGLE_IMAGE % figure_name.lstrip('/')
- else:
- image_list = HLIST_HEADER
- for figure_name in figure_list:
- image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
+ images_rst, fig_num = save_figures(block_vars['image_path'],
+ block_vars['fig_count'], gallery_conf)
except Exception:
- figure_list = []
- image_list = '%s is not compiling:' % src_file
- print(80 * '_')
- print(image_list)
- traceback.print_exc()
- print(80 * '_')
+ formatted_exception = traceback.format_exc()
+
+ fail_example_warning = 80 * '_' + '\n' + \
+ '%s failed to execute correctly:' % src_file + \
+ formatted_exception + 80 * '_' + '\n'
+ warnings.warn(fail_example_warning)
+
+ fig_num = 0
+ images_rst = codestr2rst(formatted_exception, lang='pytb')
+
+ # Breaks build on first example error
+ # XXX This check can break during testing e.g. if you uncomment the
+ # `raise RuntimeError` by the `my_stdout` call, maybe use `.get()`?
+ if gallery_conf['abort_on_example_error']:
+ raise
+ # Stores failing file
+ gallery_conf['failing_examples'][src_file] = formatted_exception
+ block_vars['execute_script'] = False
+
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
- print(" - time elapsed : %.2g sec" % time_elapsed)
- code_output = "\n{0}\n\n{1}\n\n".format(image_list, stdout)
+ code_output = u"\n{0}\n\n{1}\n\n".format(images_rst, stdout)
+ block_vars['fig_count'] += fig_num
+
+ return code_output, time_elapsed
+
+
+def clean_modules():
+ """Remove "unload" seaborn from the name space
+
+ After a script is executed it can load a variety of setting that one
+ does not want to influence in other examples in the gallery."""
+
+ # Horrible code to 'unload' seaborn, so that it resets
+ # its default when is load
+ # Python does not support unloading of modules
+ # https://bugs.python.org/issue9072
+ for module in list(sys.modules.keys()):
+ if 'seaborn' in module:
+ del sys.modules[module]
- return code_output, time_elapsed, fig_count + len(figure_list)
+ # Reset Matplotlib to default
+ plt.rcdefaults()
def generate_file_rst(fname, target_dir, src_dir, gallery_conf):
- """ Generate the rst file for a given example.
+ """Generate the rst file for a given example.
- Returns the amout of code (in characters) of the corresponding
- files.
+ Returns
+ -------
+ amount_of_code : int
+ character count of the corresponding python script in file
+ time_elapsed : float
+ seconds required to run the script
"""
- src_file = os.path.join(src_dir, fname)
+ src_file = os.path.normpath(os.path.join(src_dir, fname))
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
+ script_blocks = split_code_and_text_blocks(src_file)
+ amount_of_code = sum([len(bcontent)
+ for blabel, bcontent in script_blocks
+ if blabel == 'code'])
+
+ if md5sum_is_current(example_file):
+ return amount_of_code, 0
image_dir = os.path.join(target_dir, 'images')
if not os.path.exists(image_dir):
@@ -479,66 +558,84 @@ def generate_file_rst(fname, target_dir, src_dir, gallery_conf):
base_image_name = os.path.splitext(fname)[0]
image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png'
- image_path = os.path.join(image_dir, image_fname)
-
- script_blocks = split_code_and_text_blocks(example_file)
+ build_image_dir = os.path.relpath(image_dir, gallery_conf['src_dir'])
+ image_path_template = os.path.join(image_dir, image_fname)
- if _plots_are_current(src_file, image_path):
- amount_of_code = sum([len(bcontent)
- for blabel, bcontent in script_blocks
- if blabel == 'code'])
- return amount_of_code
-
- time_elapsed = 0
-
- ref_fname = example_file.replace(os.path.sep, '_')
+ ref_fname = os.path.relpath(example_file, gallery_conf['src_dir'])
+ ref_fname = ref_fname.replace(os.path.sep, '_')
example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname)
- if not fname.startswith('plot'):
- convert_func = dict(code=codestr2rst, text=text2string)
- for blabel, bcontent in script_blocks:
- example_rst += convert_func[blabel](bcontent) + '\n'
- else:
+ filename_pattern = gallery_conf.get('filename_pattern')
+ execute_script = re.search(filename_pattern, src_file) and gallery_conf[
+ 'plot_gallery']
+ example_globals = {
# A lot of examples contains 'print(__doc__)' for example in
# scikit-learn so that running the example prints some useful
# information. Because the docstring has been separated from
# the code blocks in sphinx-gallery, __doc__ is actually
# __builtin__.__doc__ in the execution context and we do not
# want to print it
- example_globals = {'__doc__': ''}
- fig_count = 0
- # A simple example has two blocks: one for the
- # example introduction/explanation and one for the code
- is_example_notebook_like = len(script_blocks) > 2
- for blabel, bcontent in script_blocks:
- if blabel == 'code':
- code_output, rtime, fig_count = execute_script(bcontent,
- example_globals,
- image_path,
- fig_count,
- src_file,
- gallery_conf)
-
- time_elapsed += rtime
-
- if is_example_notebook_like:
- example_rst += codestr2rst(bcontent) + '\n'
- example_rst += code_output
- else:
- example_rst += code_output
- example_rst += codestr2rst(bcontent) + '\n'
-
+ '__doc__': '',
+ # Examples may contain if __name__ == '__main__' guards
+ # for in example scikit-learn if the example uses multiprocessing
+ '__name__': '__main__',
+ # Don't ever support __file__: Issues #166 #212
+ }
+
+ # A simple example has two blocks: one for the
+ # example introduction/explanation and one for the code
+ is_example_notebook_like = len(script_blocks) > 2
+ time_elapsed = 0
+ block_vars = {'execute_script': execute_script, 'fig_count': 0,
+ 'image_path': image_path_template, 'src_file': src_file}
+ if block_vars['execute_script']:
+ print('Executing file %s' % src_file)
+ for blabel, bcontent in script_blocks:
+ if blabel == 'code':
+ code_output, rtime = execute_code_block(bcontent,
+ example_globals,
+ block_vars,
+ gallery_conf)
+
+ time_elapsed += rtime
+
+ if is_example_notebook_like:
+ example_rst += codestr2rst(bcontent) + '\n'
+ example_rst += code_output
else:
- example_rst += text2string(bcontent) + '\n'
+ example_rst += code_output
+ if 'sphx-glr-script-out' in code_output:
+ # Add some vertical space after output
+ example_rst += "\n\n|\n\n"
+ example_rst += codestr2rst(bcontent) + '\n'
- amount_of_code = sum([len(bcontent)
- for blabel, bcontent in script_blocks
- if blabel == 'code'])
+ else:
+ example_rst += bcontent + '\n\n'
+
+ clean_modules()
- save_thumbnail(image_path, base_image_name, gallery_conf)
+ # Writes md5 checksum if example has build correctly
+ # not failed and was initially meant to run(no-plot shall not cache md5sum)
+ if block_vars['execute_script']:
+ with open(example_file + '.md5', 'w') as file_checksum:
+ file_checksum.write(get_md5sum(example_file))
+
+ save_thumbnail(image_path_template, src_file, gallery_conf)
time_m, time_s = divmod(time_elapsed, 60)
- with open(os.path.join(target_dir, base_image_name + '.rst'), 'w') as f:
- example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname)
+ example_nb = jupyter_notebook(script_blocks)
+ save_notebook(example_nb, example_file.replace('.py', '.ipynb'))
+ with codecs.open(os.path.join(target_dir, base_image_name + '.rst'),
+ mode='w', encoding='utf-8') as f:
+ example_rst += "**Total running time of the script:**" \
+ " ({0: .0f} minutes {1: .3f} seconds)\n\n".format(
+ time_m, time_s)
+ example_rst += CODE_DOWNLOAD.format(fname,
+ fname.replace('.py', '.ipynb'))
+ example_rst += SPHX_GLR_SIG
f.write(example_rst)
- return amount_of_code
+
+ if block_vars['execute_script']:
+ print("{0} ran in : {1:.2g} seconds\n".format(src_file, time_elapsed))
+
+ return amount_of_code, time_elapsed
diff --git a/doc/sphinxext/sphinx_gallery/notebook.py b/doc/sphinxext/sphinx_gallery/notebook.py
new file mode 100644
index 0000000000..a0cfdbd788
--- /dev/null
+++ b/doc/sphinxext/sphinx_gallery/notebook.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+r"""
+Parser for Jupyter notebooks
+============================
+
+Class that holds the Jupyter notebook information
+
+"""
+# Author: Óscar Nájera
+# License: 3-clause BSD
+
+from __future__ import division, absolute_import, print_function
+from functools import partial
+import argparse
+import json
+import re
+import sys
+from .py_source_parser import split_code_and_text_blocks
+
+
+def jupyter_notebook_skeleton():
+ """Returns a dictionary with the elements of a Jupyter notebook"""
+ py_version = sys.version_info
+ notebook_skeleton = {
+ "cells": [],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python " + str(py_version[0]),
+ "language": "python",
+ "name": "python" + str(py_version[0])
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": py_version[0]
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython" + str(py_version[0]),
+ "version": '{0}.{1}.{2}'.format(*sys.version_info[:3])
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+ }
+ return notebook_skeleton
+
+
+def directive_fun(match, directive):
+ """Helper to fill in directives"""
+ directive_to_alert = dict(note="info", warning="danger")
+ return ('
{1}
{2}
'
+ .format(directive_to_alert[directive], directive.capitalize(),
+ match.group(1).strip()))
+
+
+def rst2md(text):
+ """Converts the RST text from the examples docstrigs and comments
+ into markdown text for the Jupyter notebooks"""
+
+ top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
+ text = re.sub(top_heading, r'# \1', text)
+
+ math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
+ text = re.sub(math_eq,
+ lambda match: r'\begin{{align}}{0}\end{{align}}'.format(
+ match.group(1).strip()),
+ text)
+ inline_math = re.compile(r':math:`(.+?)`', re.DOTALL)
+ text = re.sub(inline_math, r'$\1$', text)
+
+ directives = ('warning', 'note')
+ for directive in directives:
+ directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^ .+)*)'
+ % directive, flags=re.M)
+ text = re.sub(directive_re,
+ partial(directive_fun, directive=directive), text)
+
+ links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M)
+ text = re.sub(links, '', text)
+
+ refs = re.compile(r':ref:`')
+ text = re.sub(refs, '`', text)
+
+ contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n',
+ flags=re.M)
+ text = re.sub(contents, '', text)
+
+ images = re.compile(
+ r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*',
+ flags=re.M)
+ text = re.sub(
+ images, lambda match: '![{1}]({0})\n'.format(
+ match.group(1).strip(), (match.group(2) or '').strip()), text)
+
+ return text
+
+
+def jupyter_notebook(script_blocks):
+ """Generate a Jupyter notebook file cell-by-cell
+
+ Parameters
+ ----------
+ script_blocks: list
+ script execution cells
+ """
+
+ work_notebook = jupyter_notebook_skeleton()
+ add_code_cell(work_notebook, "%matplotlib inline")
+ fill_notebook(work_notebook, script_blocks)
+
+ return work_notebook
+
+
+def add_code_cell(work_notebook, code):
+ """Add a code cell to the notebook
+
+ Parameters
+ ----------
+ code : str
+ Cell content
+ """
+
+ code_cell = {
+ "cell_type": "code",
+ "execution_count": None,
+ "metadata": {"collapsed": False},
+ "outputs": [],
+ "source": [code.strip()]
+ }
+ work_notebook["cells"].append(code_cell)
+
+
+def add_markdown_cell(work_notebook, text):
+ """Add a markdown cell to the notebook
+
+ Parameters
+ ----------
+ code : str
+ Cell content
+ """
+ markdown_cell = {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [rst2md(text)]
+ }
+ work_notebook["cells"].append(markdown_cell)
+
+
+def fill_notebook(work_notebook, script_blocks):
+ """Writes the Jupyter notebook cells
+
+ Parameters
+ ----------
+ script_blocks : list of tuples
+ """
+
+ for blabel, bcontent in script_blocks:
+ if blabel == 'code':
+ add_code_cell(work_notebook, bcontent)
+ else:
+ add_markdown_cell(work_notebook, bcontent + '\n')
+
+
+def save_notebook(work_notebook, write_file):
+ """Saves the Jupyter work_notebook to write_file"""
+ with open(write_file, 'w') as out_nb:
+ json.dump(work_notebook, out_nb, indent=2)
+
+
+###############################################################################
+# Notebook shell utility
+
+def python_to_jupyter_cli(args=None, namespace=None):
+ """Exposes the jupyter notebook renderer to the command line
+
+ Takes the same arguments as ArgumentParser.parse_args
+ """
+ parser = argparse.ArgumentParser(
+ description='Sphinx-Gallery Notebook converter')
+ parser.add_argument('python_src_file', nargs='+',
+ help='Input Python file script to convert. '
+ 'Supports multiple files and shell wildcards'
+ ' (e.g. *.py)')
+ args = parser.parse_args(args, namespace)
+
+ for src_file in args.python_src_file:
+ blocks = split_code_and_text_blocks(src_file)
+ print('Converting {0}'.format(src_file))
+ example_nb = jupyter_notebook(blocks)
+ save_notebook(example_nb, src_file.replace('.py', '.ipynb'))
diff --git a/doc/sphinxext/sphinx_gallery/py_source_parser.py b/doc/sphinxext/sphinx_gallery/py_source_parser.py
new file mode 100644
index 0000000000..d397087f99
--- /dev/null
+++ b/doc/sphinxext/sphinx_gallery/py_source_parser.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+r"""
+Parser for python source files
+==============================
+"""
+# Created Sun Nov 27 14:03:07 2016
+# Author: Óscar Nájera
+
+from __future__ import division, absolute_import, print_function
+import ast
+import re
+from textwrap import dedent
+
+SYNTAX_ERROR_DOCSTRING = """
+SyntaxError
+===========
+
+Example script with invalid Python syntax
+"""
+
+
+def get_docstring_and_rest(filename):
+ """Separate `filename` content between docstring and the rest
+
+ Strongly inspired from ast.get_docstring.
+
+ Returns
+ -------
+ docstring: str
+ docstring of `filename`
+ rest: str
+ `filename` content without the docstring
+ """
+ # can't use codecs.open(filename, 'r', 'utf-8') here b/c ast doesn't
+ # seem to work with unicode strings in Python2.7
+ # "SyntaxError: encoding declaration in Unicode string"
+ with open(filename, 'rb') as fid:
+ content = fid.read()
+ # change from Windows format to UNIX for uniformity
+ content = content.replace(b'\r\n', b'\n')
+
+ try:
+ node = ast.parse(content)
+ except SyntaxError:
+ return SYNTAX_ERROR_DOCSTRING, content.decode('utf-8')
+
+ if not isinstance(node, ast.Module):
+ raise TypeError("This function only supports modules. "
+ "You provided {0}".format(node.__class__.__name__))
+ if node.body and isinstance(node.body[0], ast.Expr) and \
+ isinstance(node.body[0].value, ast.Str):
+ docstring_node = node.body[0]
+ docstring = docstring_node.value.s
+ if hasattr(docstring, 'decode'): # python2.7
+ docstring = docstring.decode('utf-8')
+ # This get the content of the file after the docstring last line
+ # Note: 'maxsplit' argument is not a keyword argument in python2
+ rest = content.decode('utf-8').split('\n', docstring_node.lineno)[-1]
+ return docstring, rest
+ else:
+ raise ValueError(('Could not find docstring in file "{0}". '
+ 'A docstring is required by sphinx-gallery')
+ .format(filename))
+
+
+def split_code_and_text_blocks(source_file):
+ """Return list with source file separated into code and text blocks.
+
+ Returns
+ -------
+ blocks : list of (label, content)
+ List where each element is a tuple with the label ('text' or 'code'),
+ and content string of block.
+ """
+ docstring, rest_of_content = get_docstring_and_rest(source_file)
+ blocks = [('text', docstring)]
+
+ pattern = re.compile(
+ r'(?P^#{20,}.*)\s(?P(?:^#.*\s)*)',
+ flags=re.M)
+
+ pos_so_far = 0
+ for match in re.finditer(pattern, rest_of_content):
+ match_start_pos, match_end_pos = match.span()
+ code_block_content = rest_of_content[pos_so_far:match_start_pos]
+ text_content = match.group('text_content')
+ sub_pat = re.compile('^#', flags=re.M)
+ text_block_content = dedent(re.sub(sub_pat, '', text_content)).lstrip()
+ if code_block_content.strip():
+ blocks.append(('code', code_block_content))
+ if text_block_content.strip():
+ blocks.append(('text', text_block_content))
+ pos_so_far = match_end_pos
+
+ remaining_content = rest_of_content[pos_so_far:]
+ if remaining_content.strip():
+ blocks.append(('code', remaining_content))
+
+ return blocks
diff --git a/doc/themes/nilearn/layout.html b/doc/themes/nilearn/layout.html
index d4755755fc..d4c42eb9ff 100644
--- a/doc/themes/nilearn/layout.html
+++ b/doc/themes/nilearn/layout.html
@@ -22,7 +22,7 @@
"],_default:[0,"",""]};ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;function ye(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&N(e,t)?w.merge([e],n):n}function ve(e,t){for(var n=0,r=e.length;n-1)i&&i.push(o);else if(l=w.contains(o.ownerDocument,o),a=ye(f.appendChild(o),"script"),l&&ve(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}!function(){var e=r.createDocumentFragment().appendChild(r.createElement("div")),t=r.createElement("input");t.setAttribute("type","radio"),t.setAttribute("checked","checked"),t.setAttribute("name","t"),e.appendChild(t),h.checkClone=e.cloneNode(!0).cloneNode(!0).lastChild.checked,e.innerHTML="",h.noCloneChecked=!!e.cloneNode(!0).lastChild.defaultValue}();var be=r.documentElement,we=/^key/,Te=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ce=/^([^.]*)(?:\.(.+)|)/;function Ee(){return!0}function ke(){return!1}function Se(){try{return r.activeElement}catch(e){}}function De(e,t,n,r,i,o){var a,s;if("object"==typeof t){"string"!=typeof n&&(r=r||n,n=void 0);for(s in t)De(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=ke;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return w().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=w.guid++)),e.each(function(){w.event.add(this,t,i,r,n)})}w.event={global:{},add:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.get(e);if(y){n.handler&&(n=(o=n).handler,i=o.selector),i&&w.find.matchesSelector(be,i),n.guid||(n.guid=w.guid++),(u=y.events)||(u=y.events={}),(a=y.handle)||(a=y.handle=function(t){return"undefined"!=typeof w&&w.event.triggered!==t.type?w.event.dispatch.apply(e,arguments):void 0}),l=(t=(t||"").match(M)||[""]).length;while(l--)d=g=(s=Ce.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=w.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=w.event.special[d]||{},c=w.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&w.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(e,r,h,a)||e.addEventListener&&e.addEventListener(d,a)),f.add&&(f.add.call(e,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),w.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.hasData(e)&&J.get(e);if(y&&(u=y.events)){l=(t=(t||"").match(M)||[""]).length;while(l--)if(s=Ce.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){f=w.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,y.handle)||w.removeEvent(e,d,y.handle),delete u[d])}else for(d in u)w.event.remove(e,d+t[l],n,r,!0);w.isEmptyObject(u)&&J.remove(e,"handle events")}},dispatch:function(e){var t=w.event.fix(e),n,r,i,o,a,s,u=new Array(arguments.length),l=(J.get(this,"events")||{})[t.type]||[],c=w.event.special[t.type]||{};for(u[0]=t,n=1;n=1))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n-1:w.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u\x20\t\r\n\f]*)[^>]*)\/>/gi,Ae=/
+
+
+ """.format(js_utils)
+ else:
+ with open(os.path.join(js_dir, 'jquery.min.js')) as f:
+ jquery = f.read()
+ with open(os.path.join(js_dir, 'plotly-gl3d-latest.min.js')) as f:
+ plotly = f.read()
+ js_lib = """
+
+
+
+ """.format(jquery, plotly, js_utils)
+ return html.replace('INSERT_JS_LIBRARIES_HERE', js_lib)
+
+
+def get_html_template(template_name):
+ """Get an HTML file from package data"""
+ template_path = os.path.join(
+ os.path.dirname(__file__), 'data', 'html', template_name)
+ with open(template_path, 'rb') as f:
+ return f.read().decode('utf-8')
+
+
+def _remove_after_n_seconds(file_name, n_seconds):
+ script = os.path.join(os.path.dirname(__file__), 'rm_file.py')
+ subprocess.Popen(['python', script, file_name, str(n_seconds)])
+
+
+class HTMLDocument(object):
+ """
+ Embeds a plot in a web page.
+
+ If you are running a Jupyter notebook, the plot will be displayed
+ inline if this object is the output of a cell.
+ Otherwise, use open_in_browser() to open it in a web browser (or
+ save_as_html("filename.html") to save it as an html file).
+
+ use str(document) or document.html to get the content of the web page,
+ and document.get_iframe() to have it wrapped in an iframe.
+
+ """
+ _all_open_html_repr = weakref.WeakSet()
+
+ def __init__(self, html, width=600, height=400):
+ self.html = html
+ self.width = width
+ self.height = height
+ self._temp_file = None
+ self._check_n_open()
+
+ def _check_n_open(self):
+ HTMLDocument._all_open_html_repr.add(self)
+ if len(HTMLDocument._all_open_html_repr) > 9:
+ warnings.warn('It seems you have created more than 10 '
+ 'nilearn views. As each view uses dozens '
+ 'of megabytes of RAM, you might want to '
+ 'delete some of them.')
+
+ def resize(self, width, height):
+ """Resize the plot displayed in a Jupyter notebook."""
+ self.width, self.height = width, height
+ return self
+
+ def get_iframe(self, width=None, height=None):
+ """
+ Get the document wrapped in an inline frame.
+
+ For inserting in another HTML page of for display in a Jupyter
+ notebook.
+
+ """
+ if width is None:
+ width = self.width
+ if height is None:
+ height = self.height
+ escaped = cgi.escape(self.html, quote=True)
+ wrapped = ''.format(
+ escaped, width, height)
+ return wrapped
+
+ def get_standalone(self):
+ """ Get the plot in an HTML page."""
+ return self.html
+
+ def _repr_html_(self):
+ """
+ Used by the Jupyter notebook.
+
+ Users normally won't call this method explicitely.
+ """
+ return self.get_iframe()
+
+ def __str__(self):
+ return self.html
+
+ def save_as_html(self, file_name):
+ """
+ Save the plot in an HTML file, that can later be opened in a browser.
+ """
+ with open(file_name, 'wb') as f:
+ f.write(self.html.encode('utf-8'))
+
+ def open_in_browser(self, file_name=None, temp_file_lifetime=30):
+ """
+ Save the plot to a temporary HTML file and open it in a browser.
+
+ Parameters
+ ----------
+
+ file_name : str, optional
+ .html file to use as temporary file
+
+ temp_file_lifetime : float, optional (default=30.)
+ Time, in seconds, after which the temporary file is removed.
+ If None, it is never removed.
+
+ """
+ if file_name is None:
+ fd, file_name = tempfile.mkstemp('.html', 'nilearn_surface_plot_')
+ os.close(fd)
+ self.save_as_html(file_name)
+ self._temp_file = file_name
+ file_size = os.path.getsize(file_name) / 1e6
+ if temp_file_lifetime is None:
+ print(("Saved HTML in temporary file: {}\n"
+ "file size is {:.1f}M, delete it when you're done, "
+ "for example by calling this.remove_temp_file").format(
+ file_name, file_size))
+ else:
+ _remove_after_n_seconds(self._temp_file, temp_file_lifetime)
+ webbrowser.open('file://{}'.format(file_name))
+
+ def remove_temp_file(self):
+ """
+ Remove the temporary file created by `open_in_browser`, if necessary.
+ """
+ if self._temp_file is None:
+ return
+ if not os.path.isfile(self._temp_file):
+ return
+ os.remove(self._temp_file)
+ print('removed {}'.format(self._temp_file))
+ self._temp_file = None
+
+
+def colorscale(cmap, values, threshold=None, symmetric_cmap=True, vmax=None):
+ """Normalize a cmap, put it in plotly format, get threshold and range"""
+ cmap = mpl_cm.get_cmap(cmap)
+ abs_values = np.abs(values)
+ if not symmetric_cmap and (values.min() < 0):
+ warnings.warn('you have specified symmetric_cmap=False'
+ 'but the map contains negative values; '
+ 'setting symmetric_cmap to True')
+ symmetric_cmap = True
+ if vmax is None:
+ if symmetric_cmap:
+ vmax = abs_values.max()
+ vmin = - vmax
+ else:
+ vmin, vmax = values.min(), values.max()
+ else:
+ vmin = -vmax if symmetric_cmap else 0
+ norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
+ cmaplist = [cmap(i) for i in range(cmap.N)]
+ abs_threshold = None
+ if threshold is not None:
+ abs_threshold = check_threshold(threshold, values, fast_abs_percentile)
+ istart = int(norm(-abs_threshold, clip=True) * (cmap.N - 1))
+ istop = int(norm(abs_threshold, clip=True) * (cmap.N - 1))
+ for i in range(istart, istop):
+ cmaplist[i] = (0.5, 0.5, 0.5, 1.) # just an average gray color
+ our_cmap = mpl.colors.LinearSegmentedColormap.from_list(
+ 'Custom cmap', cmaplist, cmap.N)
+ x = np.linspace(0, 1, 100)
+ rgb = our_cmap(x, bytes=True)[:, :3]
+ rgb = np.array(rgb, dtype=int)
+ colors = []
+ for i, col in zip(x, rgb):
+ colors.append([np.round(i, 3), "rgb({}, {}, {})".format(*col)])
+ return {
+ 'colors': colors, 'vmin': vmin, 'vmax': vmax, 'cmap': our_cmap,
+ 'norm': norm, 'abs_threshold': abs_threshold,
+ 'symmetric_cmap': symmetric_cmap
+ }
+
+
+def encode(a):
+ """Base64 encode a numpy array"""
+ try:
+ data = a.tobytes()
+ except AttributeError:
+ # np < 1.9
+ data = a.tostring()
+ return base64.b64encode(data).decode('utf-8')
+
+
+def decode(b, dtype):
+ """Decode a numpy array encoded as Base64"""
+ return np.frombuffer(base64.b64decode(b.encode('utf-8')), dtype)
+
+
+def mesh_to_plotly(mesh):
+ mesh = surface.load_surf_mesh(mesh)
+ x, y, z = map(encode, np.asarray(mesh[0].T, dtype='= 1.0.0.
+
+ .. versionadded:: 0.4.1
+
+ kwargs : extra keyword arguments
+ Extra keyword arguments are sent to pylab.imshow
+
+ Returns
+ -------
+ display : instance of matplotlib
+ Axes image.
+ """
+ if reorder:
+ if labels is None or labels is False:
+ raise ValueError("Labels are needed to show the reordering.")
+ try:
+ from scipy.cluster.hierarchy import (linkage, optimal_leaf_ordering,
+ leaves_list)
+ except ImportError:
+ raise ImportError("A scipy version of at least 1.0 is needed "
+ "for ordering the matrix with "
+ "optimal_leaf_ordering.")
+ valid_reorder_args = [True, 'single', 'complete', 'average']
+ if reorder not in valid_reorder_args:
+ raise ValueError("Parameter reorder needs to be "
+ "one of {}.".format(valid_reorder_args))
+ if reorder is True:
+ reorder = 'average'
+ linkage_matrix = linkage(mat, method=reorder)
+ ordered_linkage = optimal_leaf_ordering(linkage_matrix, mat)
+ index = leaves_list(ordered_linkage)
+ # make sure labels is an ndarray and copy it
+ labels = np.array(labels).copy()
+ mat = mat.copy()
+ # and reorder labels and matrix
+ labels = labels[index]
+ mat = mat[index, :][:, index]
+
+ if tri == 'lower':
+ mask = np.tri(mat.shape[0], k=-1, dtype=np.bool) ^ True
+ mat = np.ma.masked_array(mat, mask)
+ elif tri == 'diag':
+ mask = np.tri(mat.shape[0], dtype=np.bool) ^ True
+ mat = np.ma.masked_array(mat, mask)
+ if axes is not None and figure is not None:
+ raise ValueError("Parameters figure and axes cannot be specified "
+ "together. You gave 'figure=%s, axes=%s'"
+ % (figure, axes))
+ if figure is not None:
+ if isinstance(figure, plt.Figure):
+ fig = figure
+ else:
+ fig = plt.figure(figsize=figure)
+ axes = plt.gca()
+ own_fig = True
+ else:
+ if axes is None:
+ fig, axes = plt.subplots(1, 1, figsize=(7, 5))
+ own_fig = True
+ else:
+ fig = axes.figure
+ own_fig = False
+ display = axes.imshow(mat, aspect='equal', interpolation='nearest',
+ cmap=cmap, **kwargs)
+ axes.set_autoscale_on(False)
+ ymin, ymax = axes.get_ylim()
+ if labels is False:
+ axes.xaxis.set_major_formatter(plt.NullFormatter())
+ axes.yaxis.set_major_formatter(plt.NullFormatter())
+ elif labels is not None:
+ axes.set_xticks(np.arange(len(labels)))
+ axes.set_xticklabels(labels, size='x-small')
+ for label in axes.get_xticklabels():
+ label.set_ha('right')
+ label.set_rotation(50)
+ axes.set_yticks(np.arange(len(labels)))
+ axes.set_yticklabels(labels, size='x-small')
+ for label in axes.get_yticklabels():
+ label.set_ha('right')
+ label.set_va('top')
+ label.set_rotation(10)
+
+ if grid is not False:
+ size = len(mat)
+ # Different grids for different layouts
+ if tri == 'lower':
+ for i in range(size):
+ # Correct for weird mis-sizing
+ i = 1.001 * i
+ axes.plot([i + 0.5, i + 0.5], [size - 0.5, i + 0.5],
+ color='grey')
+ axes.plot([i + 0.5, -0.5], [i + 0.5, i + 0.5],
+ color='grey')
+ elif tri == 'diag':
+ for i in range(size):
+ # Correct for weird mis-sizing
+ i = 1.001 * i
+ axes.plot([i + 0.5, i + 0.5], [size - 0.5, i - 0.5],
+ color='grey')
+ axes.plot([i + 0.5, -0.5], [i - 0.5, i - 0.5], color='grey')
+ else:
+ for i in range(size):
+ # Correct for weird mis-sizing
+ i = 1.001 * i
+ axes.plot([i + 0.5, i + 0.5], [size - 0.5, -0.5], color='grey')
+ axes.plot([size - 0.5, -0.5], [i + 0.5, i + 0.5], color='grey')
+
+ axes.set_ylim(ymin, ymax)
+
+ if auto_fit:
+ if labels is not None and labels is not False:
+ fit_axes(axes)
+ elif own_fig:
+ plt.tight_layout(pad=.1,
+ rect=((0, 0, .95, 1) if colorbar
+ else (0, 0, 1, 1)))
+
+ if colorbar:
+ cax, kw = make_axes(axes, location='right', fraction=0.05, shrink=0.8,
+ pad=.0)
+ fig.colorbar(mappable=display, cax=cax)
+ # make some room
+ fig.subplots_adjust(right=0.8)
+ # change current axis back to matrix
+ plt.sca(axes)
+
+ if title is not None:
+ # Adjust the size
+ text_len = np.max([len(t) for t in title.split('\n')])
+ size = axes.bbox.size[0] / text_len
+ axes.text(0.95, 0.95, title,
+ horizontalalignment='right',
+ verticalalignment='top',
+ transform=axes.transAxes,
+ size=size)
+
+ return display
diff --git a/nilearn/plotting/rm_file.py b/nilearn/plotting/rm_file.py
new file mode 100644
index 0000000000..08d04894a2
--- /dev/null
+++ b/nilearn/plotting/rm_file.py
@@ -0,0 +1,23 @@
+"""
+Remove a file after a certain time. This is run in a subprocess
+by nilearn.plotting.html_surface.SurfaceView to remove the temporary
+file it uses to open a plot in a web browser.
+
+"""
+import os
+import time
+import warnings
+import argparse
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('file_name', type=str)
+ parser.add_argument('n_seconds', type=float)
+ args = parser.parse_args()
+
+ time.sleep(args.n_seconds)
+ if os.path.isfile(args.file_name):
+ try:
+ os.remove(args.file_name)
+ except Exception as e:
+ warnings.warn('failed to remove {}:\n{}'.format(args.file_name, e))
diff --git a/nilearn/plotting/surf_plotting.py b/nilearn/plotting/surf_plotting.py
new file mode 100644
index 0000000000..befac5c6e4
--- /dev/null
+++ b/nilearn/plotting/surf_plotting.py
@@ -0,0 +1,541 @@
+"""
+Functions for surface visualization.
+Only matplotlib is required.
+"""
+import numpy as np
+
+import matplotlib.pyplot as plt
+
+from mpl_toolkits.mplot3d import Axes3D
+
+from matplotlib.colorbar import make_axes
+from matplotlib.cm import ScalarMappable, get_cmap
+from matplotlib.colors import Normalize, LinearSegmentedColormap
+
+from ..surface import load_surf_data, load_surf_mesh
+from .._utils.compat import _basestring
+from .img_plotting import _get_colorbar_and_data_ranges, _crop_colorbar
+
+
+def plot_surf(surf_mesh, surf_map=None, bg_map=None,
+ hemi='left', view='lateral', cmap=None, colorbar=False,
+ avg_method='mean', threshold=None, alpha='auto',
+ bg_on_data=False, darkness=1, vmin=None, vmax=None,
+ cbar_vmin=None, cbar_vmax=None,
+ title=None, output_file=None, axes=None, figure=None, **kwargs):
+ """ Plotting of surfaces with optional background and data
+
+ .. versionadded:: 0.3
+
+ Parameters
+ ----------
+ surf_mesh: str or list of two numpy.ndarray
+ Surface mesh geometry, can be a file (valid formats are
+ .gii or Freesurfer specific files such as .orig, .pial,
+ .sphere, .white, .inflated) or
+ a list of two Numpy arrays, the first containing the x-y-z coordinates
+ of the mesh vertices, the second containing the indices
+ (into coords) of the mesh faces.
+
+ surf_map: str or numpy.ndarray, optional.
+ Data to be displayed on the surface mesh. Can be a file (valid formats
+ are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific files such as
+ .thickness, .curv, .sulc, .annot, .label) or
+ a Numpy array
+
+ bg_map: Surface data object (to be defined), optional,
+ Background image to be plotted on the mesh underneath the
+ surf_data in greyscale, most likely a sulcal depth map for
+ realistic shading.
+
+ hemi : {'left', 'right'}, default is 'left'
+ Hemisphere to display.
+
+ view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'}, default is 'lateral'
+ View of the surface that is rendered.
+
+ cmap: matplotlib colormap, str or colormap object, default is None
+ To use for plotting of the stat_map. Either a string
+ which is a name of a matplotlib colormap, or a matplotlib
+ colormap object. If None, matplolib default will be chosen
+
+ colorbar : bool, optional, default is False
+ If True, a colorbar of surf_map is displayed.
+
+ avg_method: {'mean', 'median'}, default is 'mean'
+ How to average vertex values to derive the face value, mean results
+ in smooth, median in sharp boundaries.
+
+ threshold : a number, None, or 'auto', default is None.
+ If None is given, the image is not thresholded.
+ If a number is given, it is used to threshold the image, values
+ below the threshold (in absolute value) are plotted as transparent.
+
+ alpha: float, alpha level of the mesh (not surf_data), default 'auto'
+ If 'auto' is chosen, alpha will default to .5 when no bg_map
+ is passed and to 1 if a bg_map is passed.
+
+ bg_on_stat: bool, default is False
+ If True, and a bg_map is specified, the surf_data data is multiplied
+ by the background image, so that e.g. sulcal depth is visible beneath
+ the surf_data.
+ NOTE: that this non-uniformly changes the surf_data values according
+ to e.g the sulcal depth.
+
+ darkness: float, between 0 and 1, default is 1
+ Specifying the darkness of the background image.
+ 1 indicates that the original values of the background are used.
+ .5 indicates the background values are reduced by half before being
+ applied.
+
+ vmin, vmax: lower / upper bound to plot surf_data values
+ If None , the values will be set to min/max of the data
+
+ title : str, optional
+ Figure title.
+
+ output_file: str, or None, optional
+ The name of an image file to export plot to. Valid extensions
+ are .png, .pdf, .svg. If output_file is not None, the plot
+ is saved to a file, and the display is closed.
+
+ axes: instance of matplotlib axes, None, optional
+ The axes instance to plot to. The projection must be '3d' (e.g.,
+ `figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`,
+ where axes should be passed.).
+ If None, a new axes is created.
+
+ figure: instance of matplotlib figure, None, optional
+ The figure instance to plot to. If None, a new figure is created.
+
+ See Also
+ --------
+ nilearn.datasets.fetch_surf_fsaverage : For surface data object to be
+ used as background map for this plotting function.
+
+ nilearn.plotting.plot_surf_roi : For plotting statistical maps on brain
+ surfaces.
+
+ nilearn.plotting.plot_surf_stat_map for plotting statistical maps on
+ brain surfaces.
+ """
+
+ # load mesh and derive axes limits
+ mesh = load_surf_mesh(surf_mesh)
+ coords, faces = mesh[0], mesh[1]
+ limits = [coords.min(), coords.max()]
+
+ # set view
+ if hemi == 'right':
+ if view == 'lateral':
+ elev, azim = 0, 0
+ elif view == 'medial':
+ elev, azim = 0, 180
+ elif view == 'dorsal':
+ elev, azim = 90, 0
+ elif view == 'ventral':
+ elev, azim = 270, 0
+ elif view == 'anterior':
+ elev, azim = 0, 90
+ elif view == 'posterior':
+ elev, azim = 0, 270
+ else:
+ raise ValueError('view must be one of lateral, medial, '
+ 'dorsal, ventral, anterior, or posterior')
+ elif hemi == 'left':
+ if view == 'medial':
+ elev, azim = 0, 0
+ elif view == 'lateral':
+ elev, azim = 0, 180
+ elif view == 'dorsal':
+ elev, azim = 90, 0
+ elif view == 'ventral':
+ elev, azim = 270, 0
+ elif view == 'anterior':
+ elev, azim = 0, 90
+ elif view == 'posterior':
+ elev, azim = 0, 270
+ else:
+ raise ValueError('view must be one of lateral, medial, '
+ 'dorsal, ventral, anterior, or posterior')
+ else:
+ raise ValueError('hemi must be one of right or left')
+
+ # set alpha if in auto mode
+ if alpha == 'auto':
+ if bg_map is None:
+ alpha = .5
+ else:
+ alpha = 1
+
+ # if no cmap is given, set to matplotlib default
+ if cmap is None:
+ cmap = plt.cm.get_cmap(plt.rcParamsDefault['image.cmap'])
+ else:
+ # if cmap is given as string, translate to matplotlib cmap
+ if isinstance(cmap, _basestring):
+ cmap = plt.cm.get_cmap(cmap)
+
+ # initiate figure and 3d axes
+ if axes is None:
+ if figure is None:
+ figure = plt.figure()
+ axes = Axes3D(figure, rect=[0, 0, 1, 1],
+ xlim=limits, ylim=limits)
+ else:
+ if figure is None:
+ figure = axes.get_figure()
+ axes.set_xlim(*limits)
+ axes.set_ylim(*limits)
+ axes.set_aspect(.74)
+ axes.view_init(elev=elev, azim=azim)
+ axes.set_axis_off()
+
+ # plot mesh without data
+ p3dcollec = axes.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
+ triangles=faces, linewidth=0.,
+ antialiased=False,
+ color='white')
+
+ # reduce viewing distance to remove space around mesh
+ axes.dist = 8
+
+ # If depth_map and/or surf_map are provided, map these onto the surface
+ # set_facecolors function of Poly3DCollection is used as passing the
+ # facecolors argument to plot_trisurf does not seem to work
+ if bg_map is not None or surf_map is not None:
+
+ face_colors = np.ones((faces.shape[0], 4))
+ # face_colors[:, :3] = .5*face_colors[:, :3] # why this?
+
+ if bg_map is not None:
+ bg_data = load_surf_data(bg_map)
+ if bg_data.shape[0] != coords.shape[0]:
+ raise ValueError('The bg_map does not have the same number '
+ 'of vertices as the mesh.')
+ bg_faces = np.mean(bg_data[faces], axis=1)
+ bg_faces = bg_faces - bg_faces.min()
+ bg_faces = bg_faces / bg_faces.max()
+ # control background darkness
+ bg_faces *= darkness
+ face_colors = plt.cm.gray_r(bg_faces)
+
+ # modify alpha values of background
+ face_colors[:, 3] = alpha * face_colors[:, 3]
+ # should it be possible to modify alpha of surf data as well?
+
+ if surf_map is not None:
+ surf_map_data = load_surf_data(surf_map)
+ if len(surf_map_data.shape) is not 1:
+ raise ValueError('surf_map can only have one dimension but has'
+ '%i dimensions' % len(surf_map_data.shape))
+ if surf_map_data.shape[0] != coords.shape[0]:
+ raise ValueError('The surf_map does not have the same number '
+ 'of vertices as the mesh.')
+
+ # create face values from vertex values by selected avg methods
+ if avg_method == 'mean':
+ surf_map_faces = np.mean(surf_map_data[faces], axis=1)
+ elif avg_method == 'median':
+ surf_map_faces = np.median(surf_map_data[faces], axis=1)
+
+ # if no vmin/vmax are passed figure them out from data
+ if vmin is None:
+ vmin = np.nanmin(surf_map_faces)
+ if vmax is None:
+ vmax = np.nanmax(surf_map_faces)
+
+ # treshold if inidcated
+ if threshold is None:
+ kept_indices = np.where(surf_map_faces)[0]
+ else:
+ kept_indices = np.where(np.abs(surf_map_faces) >= threshold)[0]
+
+ surf_map_faces = surf_map_faces - vmin
+ surf_map_faces = surf_map_faces / (vmax - vmin)
+
+ # multiply data with background if indicated
+ if bg_on_data:
+ face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])\
+ * face_colors[kept_indices]
+ else:
+ face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])
+
+ if colorbar:
+ our_cmap = get_cmap(cmap)
+ norm = Normalize(vmin=vmin, vmax=vmax)
+
+ nb_ticks = 5
+ ticks = np.linspace(vmin, vmax, nb_ticks)
+ bounds = np.linspace(vmin, vmax, our_cmap.N)
+
+ if threshold is not None:
+ cmaplist = [our_cmap(i) for i in range(our_cmap.N)]
+ # set colors to grey for absolute values < threshold
+ istart = int(norm(-threshold, clip=True) *
+ (our_cmap.N - 1))
+ istop = int(norm(threshold, clip=True) *
+ (our_cmap.N - 1))
+ for i in range(istart, istop):
+ cmaplist[i] = (0.5, 0.5, 0.5, 1.)
+ our_cmap = LinearSegmentedColormap.from_list(
+ 'Custom cmap', cmaplist, our_cmap.N)
+
+ # we need to create a proxy mappable
+ proxy_mappable = ScalarMappable(cmap=our_cmap, norm=norm)
+ proxy_mappable.set_array(surf_map_faces)
+ cax, kw = make_axes(axes, location='right', fraction=.1,
+ shrink=.6, pad=.0)
+ cbar = figure.colorbar(
+ proxy_mappable, cax=cax, ticks=ticks,
+ boundaries=bounds, spacing='proportional',
+ format='%.2g', orientation='vertical')
+ _crop_colorbar(cbar, cbar_vmin, cbar_vmax)
+
+ p3dcollec.set_facecolors(face_colors)
+
+ if title is not None:
+ axes.set_title(title, position=(.5, .95))
+
+ # save figure if output file is given
+ if output_file is not None:
+ figure.savefig(output_file)
+ plt.close(figure)
+ else:
+ return figure
+
+
+def plot_surf_stat_map(surf_mesh, stat_map, bg_map=None,
+ hemi='left', view='lateral', threshold=None,
+ alpha='auto', vmax=None, cmap='cold_hot',
+ colorbar=True, symmetric_cbar="auto", bg_on_data=False,
+ darkness=1, title=None, output_file=None, axes=None,
+ figure=None, **kwargs):
+ """ Plotting a stats map on a surface mesh with optional background
+
+ .. versionadded:: 0.3
+
+ Parameters
+ ----------
+ surf_mesh : str or list of two numpy.ndarray
+ Surface mesh geometry, can be a file (valid formats are
+ .gii or Freesurfer specific files such as .orig, .pial,
+ .sphere, .white, .inflated) or
+ a list of two Numpy arrays, the first containing the x-y-z
+ coordinates of the mesh vertices, the second containing the
+ indices (into coords) of the mesh faces
+
+ stat_map : str or numpy.ndarray
+ Statistical map to be displayed on the surface mesh, can
+ be a file (valid formats are .gii, .mgz, .nii, .nii.gz, or
+ Freesurfer specific files such as .thickness, .curv, .sulc, .annot,
+ .label) or
+ a Numpy array
+
+ bg_map : Surface data object (to be defined), optional,
+ Background image to be plotted on the mesh underneath the
+ stat_map in greyscale, most likely a sulcal depth map for
+ realistic shading.
+
+ hemi : {'left', 'right'}, default is 'left'
+ Hemispere to display.
+
+ view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'}, default is 'lateral'
+ View of the surface that is rendered.
+
+ threshold : a number, None, or 'auto', default is None
+ If None is given, the image is not thresholded.
+ If a number is given, it is used to threshold the image,
+ values below the threshold (in absolute value) are plotted
+ as transparent.
+
+ cmap : matplotlib colormap in str or colormap object, default 'coolwarm'
+ To use for plotting of the stat_map. Either a string
+ which is a name of a matplotlib colormap, or a matplotlib
+ colormap object.
+
+ colorbar : bool, optional, default is False
+ If True, a symmetric colorbar of the statistical map is displayed.
+
+ alpha : float, alpha level of the mesh (not the stat_map), default 'auto'
+ If 'auto' is chosen, alpha will default to .5 when no bg_map is
+ passed and to 1 if a bg_map is passed.
+
+ vmax : upper bound for plotting of stat_map values.
+
+ symmetric_cbar : bool or 'auto', optional, default 'auto'
+ Specifies whether the colorbar should range from -vmax to vmax
+ or from vmin to vmax. Setting to 'auto' will select the latter
+ if the range of the whole image is either positive or negative.
+ Note: The colormap will always range from -vmax to vmax.
+
+ bg_on_data : bool, default is False
+ If True, and a bg_map is specified, the stat_map data is multiplied
+ by the background image, so that e.g. sulcal depth is visible beneath
+ the stat_map.
+ NOTE: that this non-uniformly changes the stat_map values according
+ to e.g the sulcal depth.
+
+ darkness: float, between 0 and 1, default 1
+ Specifying the darkness of the background image. 1 indicates that the
+ original values of the background are used. .5 indicates the
+ background values are reduced by half before being applied.
+
+ title : str, optional
+ Figure title.
+
+ output_file: str, or None, optional
+ The name of an image file to export plot to. Valid extensions
+ are .png, .pdf, .svg. If output_file is not None, the plot
+ is saved to a file, and the display is closed.
+
+ axes: instance of matplotlib axes, None, optional
+ The axes instance to plot to. The projection must be '3d' (e.g.,
+ `figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`,
+ where axes should be passed.).
+ If None, a new axes is created.
+
+ figure: instance of matplotlib figure, None, optional
+ The figure instance to plot to. If None, a new figure is created.
+
+ See Also
+ --------
+ nilearn.datasets.fetch_surf_fsaverage : For surface data object to be
+ used as background map for this plotting function.
+
+ nilearn.plotting.plot_surf : For brain surface visualization.
+ """
+
+ # Call _get_colorbar_and_data_ranges to derive symmetric vmin, vmax
+ # And colorbar limits depending on symmetric_cbar settings
+ cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
+ stat_map, vmax, symmetric_cbar, kwargs)
+
+ display = plot_surf(
+ surf_mesh, surf_map=stat_map, bg_map=bg_map, hemi=hemi, view=view,
+ avg_method='mean', threshold=threshold, cmap=cmap, colorbar=colorbar,
+ alpha=alpha, bg_on_data=bg_on_data, darkness=1, vmax=vmax, vmin=vmin,
+ title=title, output_file=output_file, axes=axes, figure=figure,
+ cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax, **kwargs)
+
+ return display
+
+
+def plot_surf_roi(surf_mesh, roi_map, bg_map=None,
+ hemi='left', view='lateral', alpha='auto',
+ vmin=None, vmax=None, cmap='gist_ncar',
+ bg_on_data=False, darkness=1, title=None,
+ output_file=None, axes=None, figure=None, **kwargs):
+ """ Plotting ROI on a surface mesh with optional background
+
+ .. versionadded:: 0.3
+
+ Parameters
+ ----------
+ surf_mesh : str or list of two numpy.ndarray
+ Surface mesh geometry, can be a file (valid formats are
+ .gii or Freesurfer specific files such as .orig, .pial,
+ .sphere, .white, .inflated) or
+ a list of two Numpy arrays, the first containing the x-y-z
+ coordinates of the mesh vertices, the second containing the indices
+ (into coords) of the mesh faces
+
+ roi_map : str or numpy.ndarray or list of numpy.ndarray
+ ROI map to be displayed on the surface mesh, can be a file
+ (valid formats are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific
+ files such as .annot or .label), or
+ a Numpy array containing a value for each vertex, or
+ a list of Numpy arrays, one array per ROI which contains indices
+ of all vertices included in that ROI.
+
+ hemi : {'left', 'right'}, default is 'left'
+ Hemisphere to display.
+
+ bg_map : Surface data object (to be defined), optional,
+ Background image to be plotted on the mesh underneath the
+ stat_map in greyscale, most likely a sulcal depth map for
+ realistic shading.
+
+ view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'}, default is 'lateral'
+ View of the surface that is rendered.
+
+ cmap : matplotlib colormap str or colormap object, default 'coolwarm'
+ To use for plotting of the rois. Either a string which is a name
+ of a matplotlib colormap, or a matplotlib colormap object.
+
+ alpha : float, default is 'auto'
+ Alpha level of the mesh (not the stat_map). If default,
+ alpha will default to .5 when no bg_map is passed
+ and to 1 if a bg_map is passed.
+
+ bg_on_data : bool, default is False
+ If True, and a bg_map is specified, the stat_map data is multiplied
+ by the background image, so that e.g. sulcal depth is visible beneath
+ the stat_map. Beware that this non-uniformly changes the stat_map
+ values according to e.g the sulcal depth.
+
+ darkness : float, between 0 and 1, default is 1
+ Specifying the darkness of the background image. 1 indicates that the
+ original values of the background are used. .5 indicates the background
+ values are reduced by half before being applied.
+
+ title : str, optional
+ Figure title.
+
+ output_file: str, or None, optional
+ The name of an image file to export plot to. Valid extensions
+ are .png, .pdf, .svg. If output_file is not None, the plot
+ is saved to a file, and the display is closed.
+
+ axes: Axes instance | None
+ The axes instance to plot to. The projection must be '3d' (e.g.,
+ `plt.subplots(subplot_kw={'projection': '3d'})`).
+ If None, a new axes is created.
+
+ figure: Figure instance | None
+ The figure to plot to. If None, a new figure is created.
+
+ See Also
+ --------
+ nilearn.datasets.fetch_surf_fsaverage: For surface data object to be
+ used as background map for this plotting function.
+
+ nilearn.plotting.plot_surf: For brain surface visualization.
+ """
+
+ v, _ = load_surf_mesh(surf_mesh)
+
+ # if roi_map is a list of arrays with indices for different rois
+ if isinstance(roi_map, list):
+ roi_list = roi_map[:]
+ roi_map = np.zeros(v.shape[0])
+ idx = 1
+ for arr in roi_list:
+ roi_map[arr] = idx
+ idx += 1
+
+ elif isinstance(roi_map, np.ndarray):
+ # if roi_map is an array with values for all surface nodes
+ roi_data = load_surf_data(roi_map)
+ # or a single array with indices for a single roi
+ if roi_data.shape[0] != v.shape[0]:
+ roi_map = np.zeros(v.shape[0], dtype=int)
+ roi_map[roi_data] = 1
+
+ else:
+ raise ValueError('Invalid input for roi_map. Input can be a file '
+ '(valid formats are .gii, .mgz, .nii, '
+ '.nii.gz, or Freesurfer specific files such as '
+ '.annot or .label), or a Numpy array containing a '
+ 'value for each vertex, or a list of Numpy arrays, '
+ 'one array per ROI which contains indices of all '
+ 'vertices included in that ROI')
+ vmin, vmax = np.min(roi_map), 1 + np.max(roi_map)
+ display = plot_surf(surf_mesh, surf_map=roi_map, bg_map=bg_map,
+ hemi=hemi, view=view, avg_method='median',
+ cmap=cmap, alpha=alpha, bg_on_data=bg_on_data,
+ darkness=darkness, vmin=vmin, vmax=vmax,
+ title=title, output_file=output_file,
+ axes=axes, figure=figure, **kwargs)
+
+ return display
diff --git a/nilearn/plotting/tests/test_cm.py b/nilearn/plotting/tests/test_cm.py
index 92554bbd06..36e44058eb 100644
--- a/nilearn/plotting/tests/test_cm.py
+++ b/nilearn/plotting/tests/test_cm.py
@@ -19,3 +19,7 @@ def test_replace_inside():
if hasattr(plt.cm, 'gnuplot'):
# gnuplot is only in recent version of MPL
replace_inside(plt.cm.gnuplot, plt.cm.gnuplot2, .2, .8)
+
+
+def test_cm_preload():
+ plt.imshow([list(range(10))], cmap="cold_hot")
diff --git a/nilearn/plotting/tests/test_displays.py b/nilearn/plotting/tests/test_displays.py
index e19fbf0c77..7d376b2da4 100644
--- a/nilearn/plotting/tests/test_displays.py
+++ b/nilearn/plotting/tests/test_displays.py
@@ -2,17 +2,18 @@
# vi: set ft=python sts=4 ts=4 sw=4 et:
import tempfile
-import numpy as np
-
import matplotlib.pyplot as plt
+import nibabel
+import numpy as np
from nilearn.plotting.displays import OrthoSlicer, XSlicer, OrthoProjector
+from nilearn.plotting.displays import LZRYProjector
from nilearn.datasets import load_mni152_template
-
##############################################################################
# Some smoke testing for graphics-related code
+
def test_demo_ortho_slicer():
# This is only a smoke test
oslicer = OrthoSlicer(cut_coords=(0, 0, 0))
@@ -40,3 +41,74 @@ def test_demo_ortho_projector():
with tempfile.TemporaryFile() as fp:
oprojector.savefig(fp)
oprojector.close()
+
+
+def test_contour_fillings_levels_in_add_contours():
+ oslicer = OrthoSlicer(cut_coords=(0, 0, 0))
+ img = load_mni152_template()
+ # levels should be atleast 2
+ # If single levels are passed then we force upper level to be inf
+ oslicer.add_contours(img, filled=True, colors='r',
+ alpha=0.2, levels=[0.])
+
+ # If two levels are passed, it should be increasing from zero index
+ # In this case, we simply omit appending inf
+ oslicer.add_contours(img, filled=True, colors='b',
+ alpha=0.1, levels=[0., 0.2])
+
+ # without passing colors and alpha. In this case, default values are
+ # chosen from matplotlib
+ oslicer.add_contours(img, filled=True, levels=[0., 0.2])
+
+ # levels with only one value
+ oslicer.add_contours(img, filled=True, levels=[0.])
+
+ # without passing levels, should work with default levels from
+ # matplotlib
+ oslicer.add_contours(img, filled=True)
+
+
+def test_user_given_cmap_with_colorbar():
+ img = load_mni152_template()
+ oslicer = OrthoSlicer(cut_coords=(0, 0, 0))
+
+ # Test with cmap given as a string
+ oslicer.add_overlay(img, cmap='Paired', colorbar=True)
+ oslicer.close()
+
+
+def test_data_complete_mask():
+ """This special case test is due to matplotlib 2.1.0.
+
+ When the data is completely masked, then we have plotting issues
+ See similar issue #9280 reported in matplotlib. This function
+ tests the patch added for this particular issue.
+ """
+ # data is completely masked
+ data = np.zeros((10, 20, 30))
+ affine = np.eye(4)
+
+ img = nibabel.Nifti1Image(data, affine)
+ oslicer = OrthoSlicer(cut_coords=(0, 0, 0))
+ oslicer.add_overlay(img)
+ oslicer.close()
+
+
+def test_add_markers_cut_coords_is_none():
+ # A special case test for add_markers when cut_coords are None. This
+ # case is used when coords are placed on glass brain
+ orthoslicer = OrthoSlicer(cut_coords=(None, None, None))
+ orthoslicer.add_markers([(0, 0, 2)])
+ orthoslicer.close()
+
+
+def test_add_graph_with_node_color_as_string():
+ lzry_projector = LZRYProjector(cut_coords=(0, 0, 0, 0))
+ matrix = np.array([[0, 3], [3, 0]])
+ node_coords = [[-53.60, -62.80, 36.64], [23.87, 0.31, 69.42]]
+ # node_color as string
+ lzry_projector.add_graph(matrix, node_coords, node_color='red')
+ lzry_projector.close()
+ # node_color as sequence of string
+ lzry_projector.add_graph(matrix, node_coords, node_color=['red', 'blue'])
+ lzry_projector.close()
diff --git a/nilearn/plotting/tests/test_edge_detect.py b/nilearn/plotting/tests/test_edge_detect.py
index 9da180678f..ad9f00e476 100644
--- a/nilearn/plotting/tests/test_edge_detect.py
+++ b/nilearn/plotting/tests/test_edge_detect.py
@@ -1,10 +1,19 @@
import numpy as np
from nilearn.plotting.edge_detect import _edge_detect
-
+from nose.tools import assert_true
def test_edge_detect():
img = np.zeros((10, 10))
img[:5] = 1
_, edge_mask = _edge_detect(img)
np.testing.assert_almost_equal(img[4], 1)
+
+
+def test_edge_nan():
+ img = np.zeros((10, 10))
+ img[:5] = 1
+ img[0] = np.NaN
+ grad_mag, edge_mask = _edge_detect(img)
+ np.testing.assert_almost_equal(img[4], 1)
+ assert_true((grad_mag[0] > 2).all())
diff --git a/nilearn/plotting/tests/test_find_cuts.py b/nilearn/plotting/tests/test_find_cuts.py
index a587fef832..394a4f7d93 100644
--- a/nilearn/plotting/tests/test_find_cuts.py
+++ b/nilearn/plotting/tests/test_find_cuts.py
@@ -1,10 +1,12 @@
import numpy as np
-from nose.tools import assert_equal, assert_true
+from nose.tools import assert_equal, assert_true, assert_not_equal
import nibabel
from nilearn.plotting.find_cuts import (find_xyz_cut_coords, find_cut_slices,
- _transform_cut_coords)
+ _transform_cut_coords,
+ find_parcellation_cut_coords,
+ find_probabilistic_atlas_cut_coords)
from nilearn._utils.testing import assert_raises_regex, assert_warns
-from nilearn.plotting.find_cuts import find_xyz_cut_coords
+from nilearn.masking import compute_epi_mask
def test_find_cut_coords():
@@ -15,7 +17,10 @@ def test_find_cut_coords():
# identity affine
affine = np.eye(4)
img = nibabel.Nifti1Image(data, affine)
- x, y, z = find_xyz_cut_coords(img, mask=np.ones(data.shape, np.bool))
+ mask_img = compute_epi_mask(img)
+ x, y, z = find_xyz_cut_coords(img,
+ mask_img=mask_img)
+
np.testing.assert_allclose((x, y, z),
(x_map, y_map, z_map),
# Need such a high tolerance for the test to
@@ -25,7 +30,8 @@ def test_find_cut_coords():
# non-trivial affine
affine = np.diag([1. / 2, 1 / 3., 1 / 4., 1.])
img = nibabel.Nifti1Image(data, affine)
- x, y, z = find_xyz_cut_coords(img, mask=np.ones(data.shape, np.bool))
+ mask_img = compute_epi_mask(img)
+ x, y, z = find_xyz_cut_coords(img, mask_img=mask_img)
np.testing.assert_allclose((x, y, z),
(x_map / 2., y_map / 3., z_map / 4.),
# Need such a high tolerance for the test to
@@ -42,6 +48,25 @@ def test_find_cut_coords():
np.array([x, y, z]),
0.5 * np.array(data.shape).astype(np.float))
+ # regression test (cf. #922)
+ # pseudo-4D images as input (i.e., X, Y, Z, 1)
+ # previously raised "ValueError: too many values to unpack"
+ rng = np.random.RandomState(42)
+ data_3d = rng.randn(10, 10, 10)
+ data_4d = data_3d[..., np.newaxis]
+ affine = np.eye(4)
+ img_3d = nibabel.Nifti1Image(data_3d, affine)
+ img_4d = nibabel.Nifti1Image(data_4d, affine)
+ assert_equal(find_xyz_cut_coords(img_3d), find_xyz_cut_coords(img_4d))
+
+ # test passing empty image returns coordinates pointing to AC-PC line
+ data = np.zeros((20, 30, 40))
+ affine = np.eye(4)
+ img = nibabel.Nifti1Image(data, affine)
+ cut_coords = find_xyz_cut_coords(img)
+ assert_equal(cut_coords, [0.0, 0.0, 0.0])
+ cut_coords = assert_warns(UserWarning, find_xyz_cut_coords, img)
+
def test_find_cut_slices():
data = np.zeros((50, 50, 50))
@@ -61,9 +86,9 @@ def test_find_cut_slices():
# of the data
for cut in cuts:
if direction == 'x':
- cut_value = data[cut]
+ cut_value = data[int(cut)]
elif direction == 'z':
- cut_value = data[..., cut]
+ cut_value = data[..., int(cut)]
assert_equal(cut_value.max(), 1)
# Now ask more cuts than it is possible to have with a given spacing
@@ -73,6 +98,30 @@ def test_find_cut_slices():
cuts = find_cut_slices(img, direction=direction,
n_cuts=n_cuts, spacing=2)
+ # non-diagonal affines
+ affine = np.array([[-1., 0., 0., 123.46980286],
+ [0., 0., 1., -94.11079407],
+ [0., -1., 0., 160.694],
+ [0., 0., 0., 1.]])
+ img = nibabel.Nifti1Image(data, affine)
+ cuts = find_cut_slices(img, direction='z')
+ assert_not_equal(np.diff(cuts).min(), 0.)
+ affine = np.array([[-2., 0., 0., 123.46980286],
+ [0., 0., 2., -94.11079407],
+ [0., -2., 0., 160.694],
+ [0., 0., 0., 1.]])
+ img = nibabel.Nifti1Image(data, affine)
+ cuts = find_cut_slices(img, direction='z')
+ assert_not_equal(np.diff(cuts).min(), 0.)
+ # Rotate it slightly
+ angle = np.pi / 180 * 15
+ rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)],
+ [np.sin(angle), np.cos(angle)]])
+ affine[:2, :2] = rotation_matrix * 2.0
+ img = nibabel.Nifti1Image(data, affine)
+ cuts = find_cut_slices(img, direction='z')
+ assert_not_equal(np.diff(cuts).min(), 0.)
+
def test_validity_of_ncuts_error_in_find_cut_slices():
data = np.zeros((50, 50, 50))
@@ -130,9 +179,9 @@ def test_tranform_cut_coords():
def test_find_cuts_empty_mask_no_crash():
img = nibabel.Nifti1Image(np.ones((2, 2, 2)), np.eye(4))
- mask = np.zeros((2, 2, 2)).astype(np.bool)
+ mask_img = compute_epi_mask(img)
cut_coords = assert_warns(UserWarning, find_xyz_cut_coords, img,
- mask=mask)
+ mask_img=mask_img)
np.testing.assert_array_equal(cut_coords, [.5, .5, .5])
@@ -141,3 +190,108 @@ def test_fast_abs_percentile_no_index_error_find_cuts():
data = np.array([[[1., 2.], [3., 4.]], [[0., 0.], [0., 0.]]])
img = nibabel.Nifti1Image(data, np.eye(4))
assert_equal(len(find_xyz_cut_coords(img)), 3)
+
+
+def test_find_parcellation_cut_coords():
+ data = np.zeros((100, 100, 100))
+ x_map_a, y_map_a, z_map_a = (10, 10, 10)
+ x_map_b, y_map_b, z_map_b = (30, 30, 30)
+ x_map_c, y_map_c, z_map_c = (50, 50, 50)
+ # Defining 3 parcellations
+ data[x_map_a - 10:x_map_a + 10, y_map_a - 10:y_map_a + 10, z_map_a - 10: z_map_a + 10] = 1
+ data[x_map_b - 10:x_map_b + 10, y_map_b - 10:y_map_b + 10, z_map_b - 10: z_map_b + 10] = 2
+ data[x_map_c - 10:x_map_c + 10, y_map_c - 10:y_map_c + 10, z_map_c - 10: z_map_c + 10] = 3
+
+ # Number of labels
+ labels = np.unique(data)
+ labels = labels[labels != 0]
+ n_labels = len(labels)
+
+ # identity affine
+ affine = np.eye(4)
+ img = nibabel.Nifti1Image(data, affine)
+ # find coordinates with return label names is True
+ coords, labels_list = find_parcellation_cut_coords(img,
+ return_label_names=True)
+ # Check outputs
+ assert_equal((n_labels, 3), coords.shape)
+ # number of labels in data should equal number of labels list returned
+ assert_equal(n_labels, len(labels_list))
+ # Labels numbered should match the numbers in returned labels list
+ assert_equal(list(labels), labels_list)
+
+ # Match with the number of non-overlapping labels
+ np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]),
+ (x_map_a, y_map_a, z_map_a), rtol=6e-2)
+ np.testing.assert_allclose((coords[1][0], coords[1][1], coords[1][2]),
+ (x_map_b, y_map_b, z_map_b), rtol=6e-2)
+ np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]),
+ (x_map_c, y_map_c, z_map_c), rtol=6e-2)
+
+ # non-trivial affine
+ affine = np.diag([1 / 2., 1 / 3., 1 / 4., 1.])
+ img = nibabel.Nifti1Image(data, affine)
+ coords = find_parcellation_cut_coords(img)
+ assert_equal((n_labels, 3), coords.shape)
+ np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]),
+ (x_map_a / 2., y_map_a / 3., z_map_a / 4.),
+ rtol=6e-2)
+ np.testing.assert_allclose((coords[1][0], coords[1][1], coords[1][2]),
+ (x_map_b / 2., y_map_b / 3., z_map_b / 4.),
+ rtol=6e-2)
+ np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]),
+ (x_map_c / 2., y_map_c / 3., z_map_c / 4.),
+ rtol=6e-2)
+ # test raises an error with wrong label_hemisphere name with 'lft'
+ error_msg = ("Invalid label_hemisphere name:lft. Should be one of "
+ "these 'left' or 'right'.")
+ assert_raises_regex(ValueError, error_msg, find_parcellation_cut_coords,
+ labels_img=img, label_hemisphere='lft')
+
+
+def test_find_probabilistic_atlas_cut_coords():
+ # make data
+ arr1 = np.zeros((100, 100, 100))
+ x_map_a, y_map_a, z_map_a = 30, 40, 50
+ arr1[x_map_a - 10:x_map_a + 10, y_map_a - 20:y_map_a + 20, z_map_a - 30: z_map_a + 30] = 1
+
+ arr2 = np.zeros((100, 100, 100))
+ x_map_b, y_map_b, z_map_b = 40, 50, 60
+ arr2[x_map_b - 10:x_map_b + 10, y_map_b - 20:y_map_b + 20, z_map_b - 30: z_map_b + 30] = 1
+
+ # make data with empty in between non-empty maps to make sure that
+ # code does not crash
+ arr3 = np.zeros((100, 100, 100))
+
+ data = np.concatenate((arr1[..., np.newaxis], arr3[..., np.newaxis],
+ arr2[..., np.newaxis]), axis=3)
+
+ # Number of maps in time dimension
+ n_maps = data.shape[-1]
+
+ # run test on img with identity affine
+ affine = np.eye(4)
+ img = nibabel.Nifti1Image(data, affine)
+ coords = find_probabilistic_atlas_cut_coords(img)
+
+ # Check outputs
+ assert_equal((n_maps, 3), coords.shape)
+
+ np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]),
+ (x_map_a, y_map_a, z_map_a), rtol=6e-2)
+ np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]),
+ (x_map_b - 0.5, y_map_b - 0.5, z_map_b - 0.5),
+ rtol=6e-2)
+
+ # non-trivial affine
+ affine = np.diag([1 / 2., 1 / 3., 1 / 4., 1.])
+ img = nibabel.Nifti1Image(data, affine)
+ coords = find_probabilistic_atlas_cut_coords(img)
+ # Check outputs
+ assert_equal((n_maps, 3), coords.shape)
+ np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]),
+ (x_map_a / 2., y_map_a / 3., z_map_a / 4.),
+ rtol=6e-2)
+ np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]),
+ (x_map_b / 2., y_map_b / 3., z_map_b / 4.),
+ rtol=6e-2)
diff --git a/nilearn/plotting/tests/test_html_connectome.py b/nilearn/plotting/tests/test_html_connectome.py
new file mode 100644
index 0000000000..43c47a1d16
--- /dev/null
+++ b/nilearn/plotting/tests/test_html_connectome.py
@@ -0,0 +1,79 @@
+import numpy as np
+
+from nilearn.plotting.js_plotting_utils import decode
+from nilearn.plotting import html_connectome
+
+from .test_js_plotting_utils import check_html
+
+
+def test_prepare_line():
+ e = np.asarray([0, 1, 2, 3], dtype=int)
+ n = np.asarray([[0, 1], [0, 2], [2, 3], [8, 9]], dtype=int)
+ pe, pn = html_connectome._prepare_line(e, n)
+ assert (pn == [0, 1, 0, 0, 2, 0, 2, 3, 0, 8, 9, 0]).all()
+ assert(pe == [0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 3, 0]).all()
+
+
+def _make_connectome():
+ adj = np.diag([1.5, .3, 2.5], 2)
+ adj += adj.T
+ adj += np.eye(5)
+
+ coord = np.arange(5)
+ coord = np.asarray([coord * 10, -coord, coord[::-1]]).T
+ return adj, coord
+
+
+def test_get_connectome():
+ adj, coord = _make_connectome()
+ connectome = html_connectome._get_connectome(adj, coord)
+ con_x = decode(connectome['_con_x'], ' (-1, 0)
assert_true(plotted_array.mask[-1, 0])
+ # Save execution time and memory
+ plt.close()
+
def test_plot_glass_brain_threshold_for_uint8():
# mask was applied in [-threshold, threshold] which is problematic
@@ -213,6 +239,9 @@ def test_plot_glass_brain_threshold_for_uint8():
# axis orientation seem to be flipped, hence (0, 0) -> (-1, 0)
assert_true(plotted_array.mask[-1, 0])
+ # Save execution time and memory
+ plt.close()
+
def test_save_plot():
img = _generate_img()
@@ -234,6 +263,9 @@ def test_save_plot():
finally:
os.remove(filename)
+ # Save execution time and memory
+ plt.close()
+
def test_display_methods():
img = _generate_img()
@@ -253,6 +285,9 @@ def test_plot_with_axes_or_figure():
ax = plt.subplot(111)
plot_img(img, axes=ax)
+ # Save execution time and memory
+ plt.close()
+
def test_plot_stat_map_colorbar_variations():
# This is only a smoke test
@@ -283,6 +318,9 @@ def test_plot_empty_slice():
img = nibabel.Nifti1Image(data, mni_affine)
plot_img(img, display_mode='y', threshold=1)
+ # Save execution time and memory
+ plt.close()
+
def test_plot_img_invalid():
# Check that we get a meaningful error message when we give a wrong
@@ -299,6 +337,9 @@ def test_plot_img_with_auto_cut_coords():
plot_img(img, cut_coords=None, display_mode=display_mode,
black_bg=True)
+ # Save execution time and memory
+ plt.close()
+
def test_plot_img_with_resampling():
data = _generate_img().get_data()
@@ -313,6 +354,9 @@ def test_plot_img_with_resampling():
colors=['limegreen', 'yellow'])
display.add_edges(img, color='c')
+ # Save execution time and memory
+ plt.close()
+
def test_plot_noncurrent_axes():
"""Regression test for Issue #450"""
@@ -331,6 +375,9 @@ def test_plot_noncurrent_axes():
ax_fh = niax.ax.get_figure()
assert_equal(ax_fh, fh1, 'New axis %s should be in fh1.' % ax_name)
+ # Save execution time and memory
+ plt.close()
+
def test_plot_connectome():
node_color = ['green', 'blue', 'k', 'cyan']
@@ -346,6 +393,7 @@ def test_plot_connectome():
title='threshold=0.38',
node_size=10, node_color=node_color)
plot_connectome(*args, **kwargs)
+ plt.close()
# used to speed-up tests for the next plots
kwargs['display_mode'] = 'x'
@@ -363,6 +411,7 @@ def test_plot_connectome():
os.path.getsize(filename) > 0)
finally:
os.remove(filename)
+ plt.close()
# with node_kwargs, edge_kwargs and edge_cmap arguments
plot_connectome(*args,
@@ -370,21 +419,25 @@ def test_plot_connectome():
node_size=[10, 20, 30, 40],
node_color=np.zeros((4, 3)),
edge_cmap='RdBu',
+ colorbar=True,
node_kwargs={
'marker': 'v'},
edge_kwargs={
'linewidth': 4})
+ plt.close()
# masked array support
masked_adjacency_matrix = np.ma.masked_array(
adjacency_matrix, np.abs(adjacency_matrix) < 0.5)
plot_connectome(masked_adjacency_matrix, node_coords,
**kwargs)
+ plt.close()
# sparse matrix support
sparse_adjacency_matrix = sparse.coo_matrix(adjacency_matrix)
plot_connectome(sparse_adjacency_matrix, node_coords,
**kwargs)
+ plt.close()
# NaN matrix support
nan_adjacency_matrix = np.array([[1., np.nan, 0.],
@@ -392,10 +445,26 @@ def test_plot_connectome():
[np.nan, 2., 1.]])
nan_node_coords = np.arange(3 * 3).reshape(3, 3)
plot_connectome(nan_adjacency_matrix, nan_node_coords, **kwargs)
+ plt.close()
# smoke-test where there is no edge to draw, e.g. when
# edge_threshold is too high
plot_connectome(*args, edge_threshold=1e12)
+ plt.close()
+
+ # with colorbar=True
+ plot_connectome(*args, colorbar=True)
+ plt.close()
+
+ # smoke-test with hemispheric saggital cuts
+ plot_connectome(*args, display_mode='lzry')
+ plt.close()
+
+ # test node_color as a string with display_mode='lzry'
+ plot_connectome(*args, node_color='red', display_mode='lzry')
+ plt.close()
+ plot_connectome(*args, node_color=['red'], display_mode='lzry')
+ plt.close()
def test_plot_connectome_exceptions():
@@ -482,6 +551,7 @@ def test_singleton_ax_dim():
shape[axis] = 1
img = nibabel.Nifti1Image(np.ones(shape), np.eye(4))
plot_stat_map(img, None, display_mode=direction)
+ plt.close()
def test_plot_prob_atlas():
@@ -492,34 +562,39 @@ def test_plot_prob_atlas():
img = nibabel.Nifti1Image(data_rng, affine)
# Testing the 4D plot prob atlas with contours
plot_prob_atlas(img, view_type='contours')
+ plt.close()
# Testing the 4D plot prob atlas with contours
plot_prob_atlas(img, view_type='filled_contours',
threshold=0.2)
+ plt.close()
# Testing the 4D plot prob atlas with contours
plot_prob_atlas(img, view_type='continuous')
+ plt.close()
+ # Testing the 4D plot prob atlas with colormap
+ plot_prob_atlas(img, view_type='filled_contours', colorbar=True)
+ plt.close()
+ # threshold=None
+ plot_prob_atlas(img, threshold=None)
+ plt.close()
def test_get_colorbar_and_data_ranges_with_vmin():
- affine = np.eye(4)
data = np.array([[-.5, 1., np.nan],
[0., np.nan, -.2],
[1.5, 2.5, 3.]])
- img = nibabel.Nifti1Image(data, affine)
assert_raises_regex(ValueError,
'does not accept a "vmin" argument',
_get_colorbar_and_data_ranges,
- img, vmax=None,
+ data, vmax=None,
symmetric_cbar=True, kwargs={'vmin': 1.})
def test_get_colorbar_and_data_ranges_pos_neg():
# data with positive and negative range
- affine = np.eye(4)
data = np.array([[-.5, 1., np.nan],
[0., np.nan, -.2],
[1.5, 2.5, 3.]])
- img = nibabel.Nifti1Image(data, affine)
# Reasonable additional arguments that would end up being passed
# to imshow in a real plotting use case
@@ -527,7 +602,7 @@ def test_get_colorbar_and_data_ranges_pos_neg():
# symmetric_cbar set to True
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=None,
+ data, vmax=None,
symmetric_cbar=True,
kwargs=kwargs)
assert_equal(vmin, -np.nanmax(data))
@@ -536,7 +611,7 @@ def test_get_colorbar_and_data_ranges_pos_neg():
assert_equal(cbar_vmax, None)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=2,
+ data, vmax=2,
symmetric_cbar=True,
kwargs=kwargs)
assert_equal(vmin, -2)
@@ -546,7 +621,7 @@ def test_get_colorbar_and_data_ranges_pos_neg():
# symmetric_cbar is set to False
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=None,
+ data, vmax=None,
symmetric_cbar=False,
kwargs=kwargs)
assert_equal(vmin, -np.nanmax(data))
@@ -555,7 +630,7 @@ def test_get_colorbar_and_data_ranges_pos_neg():
assert_equal(cbar_vmax, np.nanmax(data))
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=2,
+ data, vmax=2,
symmetric_cbar=False,
kwargs=kwargs)
assert_equal(vmin, -2)
@@ -565,7 +640,7 @@ def test_get_colorbar_and_data_ranges_pos_neg():
# symmetric_cbar is set to 'auto', same behaviours as True for this case
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=None,
+ data, vmax=None,
symmetric_cbar='auto',
kwargs=kwargs)
assert_equal(vmin, -np.nanmax(data))
@@ -574,7 +649,7 @@ def test_get_colorbar_and_data_ranges_pos_neg():
assert_equal(cbar_vmax, None)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=2,
+ data, vmax=2,
symmetric_cbar='auto',
kwargs=kwargs)
assert_equal(vmin, -2)
@@ -585,15 +660,13 @@ def test_get_colorbar_and_data_ranges_pos_neg():
def test_get_colorbar_and_data_ranges_pos():
# data with positive range
- affine = np.eye(4)
data_pos = np.array([[0, 1., np.nan],
[0., np.nan, 0],
[1.5, 2.5, 3.]])
- img_pos = nibabel.Nifti1Image(data_pos, affine)
# symmetric_cbar set to True
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_pos, vmax=None,
+ data_pos, vmax=None,
symmetric_cbar=True,
kwargs={})
assert_equal(vmin, -np.nanmax(data_pos))
@@ -602,7 +675,7 @@ def test_get_colorbar_and_data_ranges_pos():
assert_equal(cbar_vmax, None)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_pos, vmax=2,
+ data_pos, vmax=2,
symmetric_cbar=True,
kwargs={})
assert_equal(vmin, -2)
@@ -612,7 +685,7 @@ def test_get_colorbar_and_data_ranges_pos():
# symmetric_cbar is set to False
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_pos, vmax=None,
+ data_pos, vmax=None,
symmetric_cbar=False,
kwargs={})
assert_equal(vmin, -np.nanmax(data_pos))
@@ -621,7 +694,7 @@ def test_get_colorbar_and_data_ranges_pos():
assert_equal(cbar_vmax, None)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_pos, vmax=2,
+ data_pos, vmax=2,
symmetric_cbar=False,
kwargs={})
assert_equal(vmin, -2)
@@ -631,7 +704,7 @@ def test_get_colorbar_and_data_ranges_pos():
# symmetric_cbar is set to 'auto', same behaviour as false in this case
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_pos, vmax=None,
+ data_pos, vmax=None,
symmetric_cbar='auto',
kwargs={})
assert_equal(vmin, -np.nanmax(data_pos))
@@ -640,7 +713,7 @@ def test_get_colorbar_and_data_ranges_pos():
assert_equal(cbar_vmax, None)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_pos, vmax=2,
+ data_pos, vmax=2,
symmetric_cbar='auto',
kwargs={})
assert_equal(vmin, -2)
@@ -651,15 +724,13 @@ def test_get_colorbar_and_data_ranges_pos():
def test_get_colorbar_and_data_ranges_neg():
# data with negative range
- affine = np.eye(4)
data_neg = np.array([[-.5, 0, np.nan],
[0., np.nan, -.2],
[0, 0, 0]])
- img_neg = nibabel.Nifti1Image(data_neg, affine)
# symmetric_cbar set to True
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_neg, vmax=None,
+ data_neg, vmax=None,
symmetric_cbar=True,
kwargs={})
assert_equal(vmin, np.nanmin(data_neg))
@@ -668,7 +739,7 @@ def test_get_colorbar_and_data_ranges_neg():
assert_equal(cbar_vmax, None)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_neg, vmax=2,
+ data_neg, vmax=2,
symmetric_cbar=True,
kwargs={})
assert_equal(vmin, -2)
@@ -678,7 +749,7 @@ def test_get_colorbar_and_data_ranges_neg():
# symmetric_cbar is set to False
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_neg, vmax=None,
+ data_neg, vmax=None,
symmetric_cbar=False,
kwargs={})
assert_equal(vmin, np.nanmin(data_neg))
@@ -687,7 +758,7 @@ def test_get_colorbar_and_data_ranges_neg():
assert_equal(cbar_vmax, 0)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_neg, vmax=2,
+ data_neg, vmax=2,
symmetric_cbar=False,
kwargs={})
assert_equal(vmin, -2)
@@ -697,7 +768,7 @@ def test_get_colorbar_and_data_ranges_neg():
# symmetric_cbar is set to 'auto', same behaviour as False in this case
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_neg, vmax=None,
+ data_neg, vmax=None,
symmetric_cbar='auto',
kwargs={})
assert_equal(vmin, np.nanmin(data_neg))
@@ -706,7 +777,7 @@ def test_get_colorbar_and_data_ranges_neg():
assert_equal(cbar_vmax, 0)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img_neg, vmax=2,
+ data_neg, vmax=2,
symmetric_cbar='auto',
kwargs={})
assert_equal(vmin, -2)
@@ -717,7 +788,6 @@ def test_get_colorbar_and_data_ranges_neg():
def test_get_colorbar_and_data_ranges_masked_array():
# data with positive and negative range
- affine = np.eye(4)
data = np.array([[-.5, 1., np.nan],
[0., np.nan, -.2],
[1.5, 2.5, 3.]])
@@ -725,15 +795,13 @@ def test_get_colorbar_and_data_ranges_masked_array():
# Easier to fill masked values with NaN to test against later on
filled_data = masked_data.filled(np.nan)
- img = nibabel.Nifti1Image(masked_data, affine)
-
# Reasonable additional arguments that would end up being passed
# to imshow in a real plotting use case
kwargs = {'aspect': 'auto', 'alpha': 0.9}
# symmetric_cbar set to True
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=None,
+ masked_data, vmax=None,
symmetric_cbar=True,
kwargs=kwargs)
assert_equal(vmin, -np.nanmax(filled_data))
@@ -742,7 +810,7 @@ def test_get_colorbar_and_data_ranges_masked_array():
assert_equal(cbar_vmax, None)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=2,
+ masked_data, vmax=2,
symmetric_cbar=True,
kwargs=kwargs)
assert_equal(vmin, -2)
@@ -752,7 +820,7 @@ def test_get_colorbar_and_data_ranges_masked_array():
# symmetric_cbar is set to False
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=None,
+ masked_data, vmax=None,
symmetric_cbar=False,
kwargs=kwargs)
assert_equal(vmin, -np.nanmax(filled_data))
@@ -761,7 +829,7 @@ def test_get_colorbar_and_data_ranges_masked_array():
assert_equal(cbar_vmax, np.nanmax(filled_data))
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=2,
+ masked_data, vmax=2,
symmetric_cbar=False,
kwargs=kwargs)
assert_equal(vmin, -2)
@@ -771,7 +839,7 @@ def test_get_colorbar_and_data_ranges_masked_array():
# symmetric_cbar is set to 'auto', same behaviours as True for this case
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=None,
+ masked_data, vmax=None,
symmetric_cbar='auto',
kwargs=kwargs)
assert_equal(vmin, -np.nanmax(filled_data))
@@ -780,10 +848,110 @@ def test_get_colorbar_and_data_ranges_masked_array():
assert_equal(cbar_vmax, None)
# same case if vmax has been set
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
- img, vmax=2,
+ masked_data, vmax=2,
symmetric_cbar='auto',
kwargs=kwargs)
assert_equal(vmin, -2)
assert_equal(vmax, 2)
assert_equal(cbar_vmin, None)
assert_equal(cbar_vmax, None)
+
+
+def test_invalid_in_display_mode_cut_coords_all_plots():
+ img = _generate_img()
+
+ for plot_func in [plot_img, plot_anat, plot_roi, plot_epi,
+ plot_stat_map, plot_prob_atlas, plot_glass_brain]:
+ assert_raises_regex(ValueError,
+ "The input given for display_mode='ortho' needs to "
+ "be a list of 3d world coordinates.",
+ plot_func,
+ img, display_mode='ortho', cut_coords=2)
+
+
+def test_outlier_cut_coords():
+ """ Test to plot a subset of a large set of cuts found for a small area."""
+ bg_img = load_mni152_template()
+
+ data = np.zeros((79, 95, 79))
+ affine = np.array([[ -2., 0., 0., 78.],
+ [ 0., 2., 0., -112.],
+ [ 0., 0., 2., -70.],
+ [ 0., 0., 0., 1.]])
+
+ # Color a cube around a corner area:
+ x, y, z = 20, 22, 60
+ x_map, y_map, z_map = coord_transform(x, y, z,
+ np.linalg.inv(affine))
+
+ data[int(x_map) - 1:int(x_map) + 1,
+ int(y_map) - 1:int(y_map) + 1,
+ int(z_map) - 1:int(z_map) + 1] = 1
+ img = nibabel.Nifti1Image(data, affine)
+ cuts = find_cut_slices(img, n_cuts=20, direction='z')
+
+ p = plot_stat_map(img, display_mode='z', cut_coords=cuts[-4:],
+ bg_img=bg_img)
+
+
+def test_plot_stat_map_with_nans():
+ img = _generate_img()
+ data = img.get_data()
+
+ data[6, 5, 1] = np.nan
+ data[1, 5, 2] = np.nan
+ data[1, 3, 2] = np.nan
+ data[6, 5, 2] = np.inf
+
+ img = nibabel.Nifti1Image(data, mni_affine)
+ plot_epi(img)
+ plot_stat_map(img)
+ plot_glass_brain(img)
+
+
+def test_plotting_functions_with_cmaps():
+ img = load_mni152_template()
+ cmaps = ['Paired', 'Set1', 'Set2', 'Set3']
+ for cmap in cmaps:
+ plot_roi(img, cmap=cmap, colorbar=True)
+ plot_stat_map(img, cmap=cmap, colorbar=True)
+ plot_glass_brain(img, cmap=cmap, colorbar=True)
+
+ if LooseVersion(matplotlib.__version__) >= LooseVersion('2.0.0'):
+ plot_stat_map(img, cmap='viridis', colorbar=True)
+
+ plt.close()
+
+
+def test_plotting_functions_with_nans_in_bg_img():
+ bg_img = _generate_img()
+ bg_data = bg_img.get_data()
+
+ bg_data[6, 5, 1] = np.nan
+ bg_data[1, 5, 2] = np.nan
+ bg_data[1, 3, 2] = np.nan
+ bg_data[6, 5, 2] = np.inf
+
+ bg_img = nibabel.Nifti1Image(bg_data, mni_affine)
+ plot_anat(bg_img)
+ # test with plot_roi passing background image which contains nans values
+ # in it
+ roi_img = _generate_img()
+ plot_roi(roi_img=roi_img, bg_img=bg_img)
+ stat_map_img = _generate_img()
+ plot_stat_map(stat_map_img=stat_map_img, bg_img=bg_img)
+
+ plt.close()
+
+
+def test_plotting_functions_with_dim_invalid_input():
+ # Test whether error raises with bad error to input
+ img = _generate_img()
+ assert_raises(ValueError, plot_stat_map, img, dim='-10')
+
+
+def test_add_markers_using_plot_glass_brain():
+ fig = plot_glass_brain(None)
+ coords = [(-34, -39, -9)]
+ fig.add_markers(coords)
+ fig.close()
diff --git a/nilearn/plotting/tests/test_js_plotting_utils.py b/nilearn/plotting/tests/test_js_plotting_utils.py
new file mode 100644
index 0000000000..41fed4a598
--- /dev/null
+++ b/nilearn/plotting/tests/test_js_plotting_utils.py
@@ -0,0 +1,331 @@
+import os
+import re
+import base64
+import webbrowser
+import time
+import tempfile
+
+import numpy as np
+import matplotlib
+from numpy.testing import assert_warns, assert_no_warnings
+try:
+ from lxml import etree
+ LXML_INSTALLED = True
+except ImportError:
+ LXML_INSTALLED = False
+
+from nilearn.plotting import js_plotting_utils
+from nilearn import surface
+from nilearn.datasets import fetch_surf_fsaverage
+
+
+# Note: html output by nilearn view_* functions
+# should validate as html5 using https://validator.w3.org/nu/ with no
+# warnings
+
+
+def _normalize_ws(text):
+ return re.sub(r'\s+', ' ', text)
+
+
+def test_add_js_lib():
+ html = js_plotting_utils.get_html_template('surface_plot_template.html')
+ cdn = js_plotting_utils.add_js_lib(html, embed_js=False)
+ assert "decodeBase64" in cdn
+ assert _normalize_ws("""
+
+ """) in _normalize_ws(cdn)
+ inline = _normalize_ws(js_plotting_utils.add_js_lib(html, embed_js=True))
+ assert _normalize_ws("""/*! jQuery v3.3.1 | (c) JS Foundation and other
+ contributors | jquery.org/license */""") in inline
+ assert _normalize_ws("""**
+ * plotly.js (gl3d - minified) v1.38.3
+ * Copyright 2012-2018, Plotly, Inc.
+ * All rights reserved.
+ * Licensed under the MIT license
+ */ """) in inline
+ assert "decodeBase64" in inline
+
+
+def check_colors(colors):
+ assert len(colors) == 100
+ val, cstring = zip(*colors)
+ assert np.allclose(np.linspace(0, 1, 100), val, atol=1e-3)
+ assert val[0] == 0
+ assert val[-1] == 1
+ for cs in cstring:
+ assert re.match(r'rgb\(\d+, \d+, \d+\)', cs)
+ return val, cstring
+
+
+def test_colorscale_no_threshold():
+ cmap = 'jet'
+ values = np.linspace(-13, -1.5, 20)
+ threshold = None
+ colors = js_plotting_utils.colorscale(cmap, values, threshold)
+ check_colors(colors['colors'])
+ assert (colors['vmin'], colors['vmax']) == (-13, 13)
+ assert colors['cmap'].N == 256
+ assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
+ assert colors['abs_threshold'] is None
+
+
+def test_colorscale_threshold_0():
+ cmap = 'jet'
+ values = np.linspace(-13, -1.5, 20)
+ threshold = '0%'
+ colors = js_plotting_utils.colorscale(cmap, values, threshold)
+ check_colors(colors['colors'])
+ assert (colors['vmin'], colors['vmax']) == (-13, 13)
+ assert colors['cmap'].N == 256
+ assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
+ assert colors['abs_threshold'] == 1.5
+ assert colors['symmetric_cmap']
+
+
+def test_colorscale_threshold_99():
+ cmap = 'jet'
+ values = np.linspace(-13, -1.5, 20)
+ threshold = '99%'
+ colors = js_plotting_utils.colorscale(cmap, values, threshold)
+ check_colors(colors['colors'])
+ assert (colors['vmin'], colors['vmax']) == (-13, 13)
+ assert colors['cmap'].N == 256
+ assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
+ assert colors['abs_threshold'] == 13
+ assert colors['symmetric_cmap']
+
+
+def test_colorscale_threshold_50():
+ cmap = 'jet'
+ values = np.linspace(-13, -1.5, 20)
+ threshold = '50%'
+ colors = js_plotting_utils.colorscale(cmap, values, threshold)
+ val, cstring = check_colors(colors['colors'])
+ assert cstring[50] == 'rgb(127, 127, 127)'
+ assert (colors['vmin'], colors['vmax']) == (-13, 13)
+ assert colors['cmap'].N == 256
+ assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
+ assert np.allclose(colors['abs_threshold'], 7.55, 2)
+ assert colors['symmetric_cmap']
+
+
+def test_colorscale_absolute_threshold():
+ cmap = 'jet'
+ values = np.linspace(-13, -1.5, 20)
+ threshold = 7.25
+ colors = js_plotting_utils.colorscale(cmap, values, threshold)
+ val, cstring = check_colors(colors['colors'])
+ assert cstring[50] == 'rgb(127, 127, 127)'
+ assert (colors['vmin'], colors['vmax']) == (-13, 13)
+ assert colors['cmap'].N == 256
+ assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
+ assert np.allclose(colors['abs_threshold'], 7.25)
+ assert colors['symmetric_cmap']
+
+
+def test_colorscale_asymmetric_cmap():
+ cmap = 'jet'
+ values = np.arange(15)
+ colors = js_plotting_utils.colorscale(cmap, values, symmetric_cmap=False)
+ assert (colors['vmin'], colors['vmax']) == (0, 14)
+ assert colors['cmap'].N == 256
+ assert (colors['norm'].vmax, colors['norm'].vmin) == (14, 0)
+ assert not colors['symmetric_cmap']
+
+
+def test_colorscale_vmax():
+ cmap = 'jet'
+ values = np.arange(15)
+ colors = js_plotting_utils.colorscale(cmap, values, vmax=7)
+ assert (colors['vmin'], colors['vmax']) == (-7, 7)
+ assert colors['cmap'].N == 256
+ assert (colors['norm'].vmax, colors['norm'].vmin) == (7, -7)
+ assert colors['symmetric_cmap']
+
+
+def test_colorscale_asymmetric_cmap_vmax():
+ cmap = 'jet'
+ values = np.arange(15)
+ colors = js_plotting_utils.colorscale(cmap, values, vmax=7,
+ symmetric_cmap=False)
+ assert (colors['vmin'], colors['vmax']) == (0, 7)
+ assert colors['cmap'].N == 256
+ assert (colors['norm'].vmax, colors['norm'].vmin) == (7, 0)
+ assert not colors['symmetric_cmap']
+
+
+def test_colorscale_asymmetric_cmap_negative_values():
+ cmap = 'jet'
+ values = np.linspace(-15, 4)
+ assert_warns(UserWarning, js_plotting_utils.colorscale, cmap,
+ values, symmetric_cmap=False)
+
+ colors = js_plotting_utils.colorscale(cmap, values, vmax=7,
+ symmetric_cmap=False)
+ assert (colors['vmin'], colors['vmax']) == (-7, 7)
+ assert colors['cmap'].N == 256
+ assert (colors['norm'].vmax, colors['norm'].vmin) == (7, -7)
+ assert colors['symmetric_cmap']
+
+
+def test_encode():
+ for dtype in ['f4', '>i4']:
+ a = np.arange(10, dtype=dtype)
+ encoded = js_plotting_utils.encode(a)
+ decoded = base64.b64decode(encoded.encode('utf-8'))
+ b = np.frombuffer(decoded, dtype=dtype)
+ assert np.allclose(js_plotting_utils.decode(encoded, dtype=dtype), b)
+ assert np.allclose(a, b)
+
+
+def test_mesh_to_plotly():
+ fsaverage = fetch_surf_fsaverage()
+ coord, triangles = surface.load_surf_mesh(fsaverage['pial_left'])
+ plotly = js_plotting_utils.mesh_to_plotly(fsaverage['pial_left'])
+ for i, key in enumerate(['_x', '_y', '_z']):
+ assert np.allclose(
+ js_plotting_utils.decode(plotly[key], '' in str(html)
+ _check_open_in_browser(html)
+ resized = html.resize(3, 17)
+ assert resized is html
+ assert (html.width, html.height) == (3, 17)
+ assert "width=3 height=17" in html.get_iframe()
+ assert "width=33 height=37" in html.get_iframe(33, 37)
+ if not LXML_INSTALLED:
+ return
+ root = etree.HTML(html.html.encode('utf-8'),
+ parser=etree.HTMLParser(huge_tree=True))
+ head = root.find('head')
+ assert len(head.findall('script')) == 5
+ body = root.find('body')
+ div = body.find('div')
+ assert ('id', plot_div_id) in div.items()
+ if not check_selects:
+ return
+ selects = body.findall('select')
+ assert len(selects) == 3
+ hemi = selects[0]
+ assert ('id', 'select-hemisphere') in hemi.items()
+ assert len(hemi.findall('option')) == 2
+ kind = selects[1]
+ assert ('id', 'select-kind') in kind.items()
+ assert len(kind.findall('option')) == 2
+ view = selects[2]
+ assert ('id', 'select-view') in view.items()
+ assert len(view.findall('option')) == 7
+
+
+def _open_mock(f):
+ print('opened {}'.format(f))
+
+
+def _check_open_in_browser(html):
+ wb_open = webbrowser.open
+ webbrowser.open = _open_mock
+ try:
+ html.open_in_browser(temp_file_lifetime=None)
+ temp_file = html._temp_file
+ assert html._temp_file is not None
+ assert os.path.isfile(temp_file)
+ html.remove_temp_file()
+ assert html._temp_file is None
+ assert not os.path.isfile(temp_file)
+ html.remove_temp_file()
+ html._temp_file = 'aaaaaaaaaaaaaaaaaaaaaa'
+ html.remove_temp_file()
+ finally:
+ webbrowser.open = wb_open
+ try:
+ os.remove(temp_file)
+ except Exception:
+ pass
+
+
+def test_temp_file_removing():
+ html = js_plotting_utils.HTMLDocument('hello')
+ wb_open = webbrowser.open
+ webbrowser.open = _open_mock
+ try:
+ html.open_in_browser(temp_file_lifetime=.5)
+ assert os.path.isfile(html._temp_file)
+ time.sleep(1.5)
+ assert not os.path.isfile(html._temp_file)
+ html.open_in_browser(temp_file_lifetime=None)
+ assert os.path.isfile(html._temp_file)
+ time.sleep(1.5)
+ assert os.path.isfile(html._temp_file)
+ finally:
+ webbrowser.open = wb_open
+ try:
+ os.remove(html._temp_file)
+ except Exception:
+ pass
+
+
+def _open_views():
+ return [js_plotting_utils.HTMLDocument('') for i in range(12)]
+
+
+def _open_one_view():
+ for i in range(12):
+ v = js_plotting_utils.HTMLDocument('')
+ return v
+
+
+def test_open_view_warning():
+ # opening many views (without deleting the SurfaceView objects)
+ # should raise a warning about memory usage
+ assert_warns(UserWarning, _open_views)
+ assert_no_warnings(_open_one_view)
+
+
+def test_to_color_strings():
+ colors = [[0, 0, 1], [1, 0, 0], [.5, .5, .5]]
+ as_str = js_plotting_utils.to_color_strings(colors)
+ assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
+
+ colors = [[0, 0, 1, 1], [1, 0, 0, 1], [.5, .5, .5, 0]]
+ as_str = js_plotting_utils.to_color_strings(colors)
+ assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
+
+ colors = ['#0000ff', '#ff0000', '#7f7f7f']
+ as_str = js_plotting_utils.to_color_strings(colors)
+ assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
+
+ colors = [[0, 0, 1, 1], [1, 0, 0, 1], [.5, .5, .5, 0]]
+ as_str = js_plotting_utils.to_color_strings(colors)
+ assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
+
+ colors = ['r', 'green', 'black', 'white']
+ as_str = js_plotting_utils.to_color_strings(colors)
+ assert as_str == ['#ff0000', '#008000', '#000000', '#ffffff']
+
+ if matplotlib.__version__ < '2':
+ return
+
+ colors = ['#0000ffff', '#ff0000ab', '#7f7f7f00']
+ as_str = js_plotting_utils.to_color_strings(colors)
+ assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
diff --git a/nilearn/plotting/tests/test_matrix_plotting.py b/nilearn/plotting/tests/test_matrix_plotting.py
new file mode 100644
index 0000000000..848089c295
--- /dev/null
+++ b/nilearn/plotting/tests/test_matrix_plotting.py
@@ -0,0 +1,46 @@
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+
+import matplotlib.pyplot as plt
+from nose.tools import assert_true, assert_equal, assert_raises
+from nilearn.plotting.matrix_plotting import plot_matrix
+
+##############################################################################
+# Some smoke testing for graphics-related code
+
+
+def test_matrix_plotting():
+ from numpy import zeros
+ from distutils.version import LooseVersion
+ mat = zeros((10, 10))
+ labels = [str(i) for i in range(10)]
+ ax = plot_matrix(mat, labels=labels, title='foo')
+ plt.close()
+ # test if plotting lower triangle works
+ ax = plot_matrix(mat, labels=labels, tri='lower')
+ # test if it returns an AxesImage
+ ax.axes.set_title('Title')
+ plt.close()
+ import scipy
+ if LooseVersion(scipy.__version__) >= LooseVersion('1.0.0'):
+ # test if a ValueError is raised when reorder=True without labels
+ assert_raises(ValueError, plot_matrix, mat, labels=None, reorder=True)
+ # test if a ValueError is raised when reorder argument is wrong
+ assert_raises(ValueError, plot_matrix, mat, labels=labels, reorder=' ')
+ # test if reordering with default linkage works
+ idx = [2, 3, 5]
+ from itertools import permutations
+ # make symmetric matrix of similarities so we can get a block
+ for perm in permutations(idx, 2):
+ mat[perm] = 1
+ ax = plot_matrix(mat, labels=labels, reorder=True)
+ assert_equal(len(labels), len(ax.axes.get_xticklabels()))
+ reordered_labels = [int(lbl.get_text())
+ for lbl in ax.axes.get_xticklabels()]
+ # block order does not matter
+ assert_true(reordered_labels[:3] == idx or reordered_labels[-3:] == idx,
+ 'Clustering does not find block structure.')
+ plt.close()
+ # test if reordering with specific linkage works
+ ax = plot_matrix(mat, labels=labels, reorder='complete')
+ plt.close()
diff --git a/nilearn/plotting/tests/test_surf_plotting.py b/nilearn/plotting/tests/test_surf_plotting.py
new file mode 100644
index 0000000000..55d10725a3
--- /dev/null
+++ b/nilearn/plotting/tests/test_surf_plotting.py
@@ -0,0 +1,198 @@
+# Tests for functions in surf_plotting.py
+
+import tempfile
+
+from distutils.version import LooseVersion
+from nose import SkipTest
+from nilearn._utils.testing import assert_raises_regex
+
+import numpy as np
+import matplotlib
+import matplotlib.pyplot as plt
+
+from nilearn.plotting.surf_plotting import (plot_surf, plot_surf_stat_map,
+ plot_surf_roi)
+from nilearn.surface.tests.test_surface import _generate_surf
+
+
+def test_plot_surf():
+ mesh = _generate_surf()
+ rng = np.random.RandomState(0)
+ bg = rng.randn(mesh[0].shape[0], )
+
+ # Plot mesh only
+ plot_surf(mesh)
+
+ # Plot mesh with background
+ plot_surf(mesh, bg_map=bg)
+ plot_surf(mesh, bg_map=bg, darkness=0.5)
+ plot_surf(mesh, bg_map=bg, alpha=0.5)
+
+ # Plot different views
+ plot_surf(mesh, bg_map=bg, hemi='right')
+ plot_surf(mesh, bg_map=bg, view='medial')
+ plot_surf(mesh, bg_map=bg, hemi='right', view='medial')
+
+ # Plot with colorbar
+ plot_surf(mesh, bg_map=bg, colorbar=True)
+
+ # Save execution time and memory
+ plt.close()
+
+
+def test_plot_surf_error():
+ mesh = _generate_surf()
+ rng = np.random.RandomState(0)
+
+ # Wrong inputs for view or hemi
+ assert_raises_regex(ValueError, 'view must be one of',
+ plot_surf, mesh, view='middle')
+ assert_raises_regex(ValueError, 'hemi must be one of',
+ plot_surf, mesh, hemi='lft')
+
+ # Wrong size of background image
+ assert_raises_regex(ValueError,
+ 'bg_map does not have the same number of vertices',
+ plot_surf, mesh,
+ bg_map=rng.randn(mesh[0].shape[0] - 1, ))
+
+ # Wrong size of surface data
+ assert_raises_regex(ValueError,
+ 'surf_map does not have the same number of vertices',
+ plot_surf, mesh,
+ surf_map=rng.randn(mesh[0].shape[0] + 1, ))
+
+ assert_raises_regex(ValueError,
+ 'surf_map can only have one dimension', plot_surf,
+ mesh, surf_map=rng.randn(mesh[0].shape[0], 2))
+
+
+def test_plot_surf_stat_map():
+ mesh = _generate_surf()
+ rng = np.random.RandomState(0)
+ bg = rng.randn(mesh[0].shape[0], )
+ data = 10 * rng.randn(mesh[0].shape[0], )
+
+ # Plot mesh with stat map
+ plot_surf_stat_map(mesh, stat_map=data)
+ plot_surf_stat_map(mesh, stat_map=data, colorbar=True)
+ plot_surf_stat_map(mesh, stat_map=data, alpha=1)
+
+ # Plot mesh with background and stat map
+ plot_surf_stat_map(mesh, stat_map=data, bg_map=bg)
+ plot_surf_stat_map(mesh, stat_map=data, bg_map=bg,
+ bg_on_data=True, darkness=0.5)
+ plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, colorbar=True,
+ bg_on_data=True, darkness=0.5)
+
+ # Apply threshold
+ plot_surf_stat_map(mesh, stat_map=data, bg_map=bg,
+ bg_on_data=True, darkness=0.5,
+ threshold=0.3)
+ plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, colorbar=True,
+ bg_on_data=True, darkness=0.5,
+ threshold=0.3)
+
+ # Change vmax
+ plot_surf_stat_map(mesh, stat_map=data, vmax=5)
+ plot_surf_stat_map(mesh, stat_map=data, vmax=5, colorbar=True)
+
+ # Change colormap
+ plot_surf_stat_map(mesh, stat_map=data, cmap='cubehelix')
+ plot_surf_stat_map(mesh, stat_map=data, cmap='cubehelix', colorbar=True)
+
+ # Plot to axes
+ axes = plt.subplots(ncols=2, subplot_kw={'projection': '3d'})[1]
+ for ax in axes.flatten():
+ plot_surf_stat_map(mesh, stat_map=data, ax=ax)
+ axes = plt.subplots(ncols=2, subplot_kw={'projection': '3d'})[1]
+ for ax in axes.flatten():
+ plot_surf_stat_map(mesh, stat_map=data, ax=ax, colorbar=True)
+
+ fig = plot_surf_stat_map(mesh, stat_map=data, colorbar=False)
+ assert len(fig.axes) == 1
+ # symmetric_cbar
+ fig = plot_surf_stat_map(
+ mesh, stat_map=data, colorbar=True, symmetric_cbar=True)
+ assert len(fig.axes) == 2
+ yticklabels = fig.axes[1].get_yticklabels()
+ first, last = yticklabels[0].get_text(), yticklabels[-1].get_text()
+ assert float(first) == - float(last)
+ # no symmetric_cbar
+ fig = plot_surf_stat_map(
+ mesh, stat_map=data, colorbar=True, symmetric_cbar=False)
+ assert len(fig.axes) == 2
+ yticklabels = fig.axes[1].get_yticklabels()
+ first, last = yticklabels[0].get_text(), yticklabels[-1].get_text()
+ assert float(first) != - float(last)
+ # Save execution time and memory
+ plt.close()
+
+
+def test_plot_surf_stat_map_error():
+ mesh = _generate_surf()
+ rng = np.random.RandomState(0)
+ data = 10 * rng.randn(mesh[0].shape[0], )
+
+ # Try to input vmin
+ assert_raises_regex(ValueError,
+ 'this function does not accept a "vmin" argument',
+ plot_surf_stat_map, mesh, stat_map=data, vmin=0)
+
+ # Wrong size of stat map data
+ assert_raises_regex(ValueError,
+ 'surf_map does not have the same number of vertices',
+ plot_surf_stat_map, mesh,
+ stat_map=np.hstack((data, data)))
+
+ assert_raises_regex(ValueError,
+ 'surf_map can only have one dimension',
+ plot_surf_stat_map, mesh,
+ stat_map=np.vstack((data, data)).T)
+
+
+def test_plot_surf_roi():
+ mesh = _generate_surf()
+ rng = np.random.RandomState(0)
+ roi1 = rng.randint(0, mesh[0].shape[0], size=5)
+ roi2 = rng.randint(0, mesh[0].shape[0], size=10)
+ parcellation = rng.rand(mesh[0].shape[0])
+
+ # plot roi
+ plot_surf_roi(mesh, roi_map=roi1)
+ plot_surf_roi(mesh, roi_map=roi1, colorbar=True)
+
+ # plot parcellation
+ plot_surf_roi(mesh, roi_map=parcellation)
+ plot_surf_roi(mesh, roi_map=parcellation, colorbar=True)
+
+ # plot roi list
+ plot_surf_roi(mesh, roi_map=[roi1, roi2])
+ plot_surf_roi(mesh, roi_map=[roi1, roi2], colorbar=True)
+
+ # plot to axes
+ plot_surf_roi(mesh, roi_map=roi1, ax=None, figure=plt.gcf())
+
+ # plot to axes
+ with tempfile.NamedTemporaryFile() as tmp_file:
+ plot_surf_roi(mesh, roi_map=roi1, ax=plt.gca(), figure=None,
+ output_file=tmp_file.name)
+ with tempfile.NamedTemporaryFile() as tmp_file:
+ plot_surf_roi(mesh, roi_map=roi1, ax=plt.gca(), figure=None,
+ output_file=tmp_file.name, colorbar=True)
+
+ # Save execution time and memory
+ plt.close()
+
+
+def test_plot_surf_roi_error():
+ mesh = _generate_surf()
+ rng = np.random.RandomState(0)
+ roi1 = rng.randint(0, mesh[0].shape[0], size=5)
+ roi2 = rng.randint(0, mesh[0].shape[0], size=10)
+
+ # Wrong input
+ assert_raises_regex(ValueError,
+ 'Invalid input for roi_map',
+ plot_surf_roi, mesh,
+ roi_map={'roi1': roi1, 'roi2': roi2})
diff --git a/nilearn/regions/__init__.py b/nilearn/regions/__init__.py
index dafd963de7..520d825dc1 100644
--- a/nilearn/regions/__init__.py
+++ b/nilearn/regions/__init__.py
@@ -2,14 +2,17 @@
The :mod:`nilearn.regions` class module includes region extraction
procedure on a 4D statistical/atlas maps and its function.
"""
-from .region_extractor import connected_regions, RegionExtractor
+from .region_extractor import (connected_regions, RegionExtractor,
+ connected_label_regions)
from .signal_extraction import (
img_to_signals_labels, signals_to_img_labels,
img_to_signals_maps, signals_to_img_maps,
)
+from .parcellations import Parcellations
__all__ = [
'connected_regions', 'RegionExtractor',
+ 'connected_label_regions',
'img_to_signals_labels', 'signals_to_img_labels',
'img_to_signals_maps', 'signals_to_img_maps',
-]
+ 'Parcellations']
diff --git a/nilearn/regions/parcellations.py b/nilearn/regions/parcellations.py
new file mode 100644
index 0000000000..5f208e8916
--- /dev/null
+++ b/nilearn/regions/parcellations.py
@@ -0,0 +1,430 @@
+"""Parcellation tools such as KMeans or Ward for fMRI images
+"""
+
+import numpy as np
+
+from sklearn.base import clone
+from sklearn.feature_extraction import image
+from sklearn.externals.joblib import Memory, delayed, Parallel
+
+from ..decomposition.multi_pca import MultiPCA
+from ..input_data import NiftiLabelsMasker
+from .._utils.compat import _basestring
+from .._utils.niimg import _safe_get_data
+from .._utils.niimg_conversions import _iter_check_niimg
+
+
+def _estimator_fit(data, estimator):
+ """ Estimator to fit on the data matrix
+
+ Parameters
+ ----------
+ data : numpy array
+ Data matrix
+
+ estimator : instance of estimator from sklearn
+ MiniBatchKMeans or AgglomerativeClustering
+
+ Returns
+ -------
+ labels_ : numpy.ndarray
+ labels_ estimated from estimator
+ """
+ estimator = clone(estimator)
+ estimator.fit(data.T)
+
+ return estimator.labels_
+
+
+def _check_parameters_transform(imgs, confounds):
+ """A helper function to check the parameters and prepare for processing
+ as a list.
+ """
+ if not isinstance(imgs, (list, tuple)) or \
+ isinstance(imgs, _basestring):
+ imgs = [imgs, ]
+ single_subject = True
+ elif isinstance(imgs, (list, tuple)) and len(imgs) == 1:
+ single_subject = True
+ else:
+ single_subject = False
+
+ if confounds is None and isinstance(imgs, (list, tuple)):
+ confounds = [None] * len(imgs)
+
+ if confounds is not None:
+ if not isinstance(confounds, (list, tuple)) or \
+ isinstance(confounds, _basestring):
+ confounds = [confounds, ]
+
+ if len(confounds) != len(imgs):
+ raise ValueError("Number of confounds given does not match with "
+ "the given number of images.")
+ return imgs, confounds, single_subject
+
+
+def _labels_masker_extraction(img, masker, confound):
+ """ Helper function for parallelizing NiftiLabelsMasker extractor
+ on list of Nifti images.
+
+ Parameters
+ ----------
+ img : 4D Nifti image like object
+ Image to process.
+
+ masker : instance of NiftiLabelsMasker
+ Used for extracting signals with fit_transform
+
+ confound : csv file or numpy array
+ Confound used for signal cleaning while extraction.
+ Passed to signal.clean
+
+ Returns
+ -------
+ signals : numpy array
+ Signals extracted on given img
+ """
+ masker = clone(masker)
+ signals = masker.fit_transform(img, confounds=confound)
+ return signals
+
+
+class Parcellations(MultiPCA):
+ """Learn parcellations on fMRI images.
+
+ Four different types of clustering methods can be used such as kmeans,
+ ward, complete, average. Kmeans will call MiniBatchKMeans whereas
+ ward, complete, average are used within in Agglomerative Clustering.
+ All methods are leveraged from scikit-learn.
+
+ .. versionadded:: 0.4.1
+
+ Parameters
+ ----------
+ method : str, {'kmeans', 'ward', 'complete', 'average'}
+ A method to choose between for brain parcellations.
+
+ n_parcels : int, default=50
+ Number of parcellations to divide the brain data into.
+
+ random_state : int or RandomState
+ Pseudo number generator state used for random sampling.
+
+ mask : Niimg-like object or NiftiMasker, MultiNiftiMasker instance
+ Mask/Masker used for masking the data.
+ If mask image if provided, it will be used in the MultiNiftiMasker.
+ If an instance of MultiNiftiMasker is provided, then this instance
+ parameters will be used in masking the data by overriding the default
+ masker parameters.
+ If None, mask will be automatically computed by a MultiNiftiMasker
+ with default parameters.
+
+ smoothing_fwhm : float, optional default=4.
+ If smoothing_fwhm is not None, it gives the full-width half maximum in
+ millimeters of the spatial smoothing to apply to the signal.
+
+ standardize : boolean, optional
+ If standardize is True, the time-series are centered and normed:
+ their mean is put to 0 and their variance to 1 in the time dimension.
+
+ detrend : boolean, optional
+ Whether to detrend signals or not.
+ This parameter is passed to signal.clean. Please see the related
+ documentation for details
+
+ low_pass: None or float, optional
+ This parameter is passed to signal.clean. Please see the related
+ documentation for details
+
+ high_pass: None or float, optional
+ This parameter is passed to signal.clean. Please see the related
+ documentation for details
+
+ t_r : float, optional
+ This parameter is passed to signal.clean. Please see the related
+ documentation for details
+
+ target_affine : 3x3 or 4x4 matrix, optional
+ This parameter is passed to image.resample_img. Please see the
+ related documentation for details. The given affine will be
+ considered as same for all given list of images.
+
+ target_shape : 3-tuple of integers, optional
+ This parameter is passed to image.resample_img. Please see the
+ related documentation for details.
+
+ memory : instance of joblib.Memory or str
+ Used to cache the masking process.
+ By default, no caching is done. If a string is given, it is the
+ path to the caching directory.
+
+ memory_level : integer, optional
+ Rough estimator of the amount of memory used by caching. Higher value
+ means more memory for caching.
+
+ n_jobs : integer, optional
+ The number of CPUs to use to do the computation. -1 means
+ 'all CPUs', -2 'all CPUs but one', and so on.
+
+ verbose : integer, optional
+ Indicate the level of verbosity. By default, nothing is printed.
+
+ Returns
+ -------
+ labels_img_ : Nifti1Image
+ Labels image to each parcellation learned on fmri images.
+
+ masker_ : instance of NiftiMasker or MultiNiftiMasker
+ The masker used to mask the data
+
+ connectivity_ : numpy.ndarray
+ voxel-to-voxel connectivity matrix computed from a mask.
+ Note that this attribute is only seen if selected methods are
+ Agglomerative Clustering type, 'ward', 'complete', 'average'.
+
+ Notes
+ -----
+ * Transforming list of Nifti images to data matrix takes few steps.
+ Reducing the data dimensionality using randomized SVD, build brain
+ parcellations using KMeans or various Agglomerative methods.
+
+ * This object uses spatially-constrained AgglomerativeClustering for
+ method='ward' or 'complete' or 'average'. Spatial connectivity matrix
+ (voxel-to-voxel) is built-in object which means no need of explicitly
+ giving the matrix.
+
+ """
+ VALID_METHODS = ['kmeans', 'ward', 'complete', 'average']
+
+ def __init__(self, method, n_parcels=50,
+ random_state=0, mask=None, smoothing_fwhm=4.,
+ standardize=False, detrend=False,
+ low_pass=None, high_pass=None, t_r=None,
+ target_affine=None, target_shape=None,
+ mask_strategy='epi', mask_args=None,
+ memory=Memory(cachedir=None),
+ memory_level=0, n_jobs=1, verbose=1):
+ self.method = method
+ self.n_parcels = n_parcels
+
+ MultiPCA.__init__(self, n_components=200,
+ random_state=random_state,
+ mask=mask, memory=memory,
+ smoothing_fwhm=smoothing_fwhm,
+ standardize=standardize, detrend=detrend,
+ low_pass=low_pass, high_pass=high_pass,
+ t_r=t_r, target_affine=target_affine,
+ target_shape=target_shape,
+ mask_strategy=mask_strategy,
+ mask_args=mask_args,
+ memory_level=memory_level,
+ n_jobs=n_jobs,
+ verbose=verbose)
+
+ def _raw_fit(self, data):
+ """ Fits the parcellation method on this reduced data.
+
+ Data are coming from a base decomposition estimator which computes
+ the mask and reduces the dimensionality of images using
+ randomized_svd.
+
+ Parameters
+ ----------
+ data : ndarray
+ Shape (n_samples, n_features)
+
+ Returns
+ -------
+ labels_ : numpy.ndarray
+ Labels to each cluster in the brain.
+
+ connectivity_ : numpy.ndarray
+ voxel-to-voxel connectivity matrix computed from a mask.
+ Note that, this attribute is returned only for selected methods
+ such as 'ward', 'complete', 'average'.
+ """
+ valid_methods = self.VALID_METHODS
+ if self.method is None:
+ raise ValueError("Parcellation method is specified as None. "
+ "Please select one of the method in "
+ "{0}".format(valid_methods))
+ if self.method is not None and self.method not in valid_methods:
+ raise ValueError("The method you have selected is not implemented "
+ "'{0}'. Valid methods are in {1}"
+ .format(self.method, valid_methods))
+
+ # we delay importing Ward or AgglomerativeClustering and same
+ # time import plotting module before that.
+
+ # Because sklearn.cluster imports scipy hierarchy and hierarchy imports
+ # matplotlib. So, we force import matplotlib first using our
+ # plotting to avoid backend display error with matplotlib
+ # happening in Travis
+ try:
+ from nilearn import plotting
+ except:
+ pass
+
+ components = MultiPCA._raw_fit(self, data)
+
+ mask_img_ = self.masker_.mask_img_
+ if self.verbose:
+ print("[{0}] computing {1}".format(self.__class__.__name__,
+ self.method))
+
+ if self.method == 'kmeans':
+ from sklearn.cluster import MiniBatchKMeans
+ kmeans = MiniBatchKMeans(n_clusters=self.n_parcels,
+ init='k-means++',
+ random_state=self.random_state,
+ verbose=self.verbose)
+ labels = self._cache(_estimator_fit,
+ func_memory_level=1)(components.T, kmeans)
+ else:
+ mask_ = _safe_get_data(mask_img_).astype(np.bool)
+ shape = mask_.shape
+ connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],
+ n_z=shape[2], mask=mask_)
+
+ from sklearn.cluster import AgglomerativeClustering
+
+ agglomerative = AgglomerativeClustering(
+ n_clusters=self.n_parcels, connectivity=connectivity,
+ linkage=self.method, memory=self.memory)
+
+ labels = self._cache(_estimator_fit,
+ func_memory_level=1)(components.T,
+ agglomerative)
+
+ self.connectivity_ = connectivity
+ # Avoid 0 label
+ labels = labels + 1
+ self.labels_img_ = self.masker_.inverse_transform(labels)
+
+ return self
+
+ def _check_fitted(self):
+ """Helper function to check whether fit is called or not.
+ """
+ if not hasattr(self, 'labels_img_'):
+ raise ValueError("Object has no labels_img_ attribute. "
+ "Ensure that fit() is called before transform.")
+
+ def transform(self, imgs, confounds=None):
+ """Extract signals from parcellations learned on fmri images.
+
+ Parameters
+ ----------
+ imgs : List of Nifti-like images
+ See http://nilearn.github.io/manipulating_images/input_output.html.
+ Images to process.
+
+ confounds: List of CSV files or arrays-like, optional
+ Each file or numpy array in a list should have shape
+ (number of scans, number of confounds)
+ This parameter is passed to signal.clean. Please see the related
+ documentation for details. Must be of same length of imgs.
+
+ Returns
+ -------
+ region_signals: List of or 2D numpy.ndarray
+ Signals extracted for each label for each image.
+ Example, for single image shape will be
+ (number of scans, number of labels)
+ """
+ self._check_fitted()
+ imgs, confounds, single_subject = _check_parameters_transform(
+ imgs, confounds)
+ # Requires for special cases like extracting signals on list of
+ # 3D images
+ imgs_list = _iter_check_niimg(imgs, atleast_4d=True)
+
+ masker = NiftiLabelsMasker(self.labels_img_,
+ mask_img=self.masker_.mask_img_,
+ smoothing_fwhm=self.smoothing_fwhm,
+ standardize=self.standardize,
+ detrend=self.detrend,
+ low_pass=self.low_pass,
+ high_pass=self.high_pass, t_r=self.t_r,
+ resampling_target='data',
+ memory=self.memory,
+ memory_level=self.memory_level,
+ verbose=self.verbose)
+
+ region_signals = Parallel(n_jobs=self.n_jobs)(
+ delayed(self._cache(_labels_masker_extraction,
+ func_memory_level=2))
+ (img, masker, confound)
+ for img, confound in zip(imgs_list, confounds))
+
+ if single_subject:
+ return region_signals[0]
+ else:
+ return region_signals
+
+ def fit_transform(self, imgs, confounds=None):
+ """Fit the images to parcellations and then transform them.
+
+ Parameters
+ ----------
+ imgs : List of Nifti-like images
+ See http://nilearn.github.io/manipulating_images/input_output.html.
+ Images for process for fit as well for transform to signals.
+
+ confounds : List of CSV files or arrays-like, optional
+ Each file or numpy array in a list should have shape
+ (number of scans, number of confounds).
+ This parameter is passed to signal.clean. Given confounds
+ should have same length as images if given as a list.
+
+ Note: same confounds will used for cleaning signals before
+ learning parcellations.
+
+ Returns
+ -------
+ region_signals: List of or 2D numpy.ndarray
+ Signals extracted for each label for each image.
+ Example, for single image shape will be
+ (number of scans, number of labels)
+ """
+ return self.fit(imgs, confounds=confounds).transform(imgs,
+ confounds=confounds)
+
+ def inverse_transform(self, signals):
+ """Transform signals extracted from parcellations back to brain
+ images.
+
+ Uses `labels_img_` (parcellations) built at fit() level.
+
+ Parameters
+ ----------
+ signals : List of 2D numpy.ndarray
+ Each 2D array with shape (number of scans, number of regions)
+
+ Returns
+ -------
+ imgs : List of or Nifti-like image
+ Brain image(s)
+ """
+ from .signal_extraction import signals_to_img_labels
+
+ self._check_fitted()
+
+ if not isinstance(signals, (list, tuple)) or\
+ isinstance(signals, np.ndarray):
+ signals = [signals, ]
+ single_subject = True
+ elif isinstance(signals, (list, tuple)) and len(signals) == 1:
+ single_subject = True
+ else:
+ single_subject = False
+
+ imgs = Parallel(n_jobs=self.n_jobs)(
+ delayed(self._cache(signals_to_img_labels, func_memory_level=2))
+ (each_signal, self.labels_img_, self.mask_img_)
+ for each_signal in signals)
+
+ if single_subject:
+ return imgs[0]
+ else:
+ return imgs
diff --git a/nilearn/regions/region_extractor.py b/nilearn/regions/region_extractor.py
index bc6ea73e09..c84bfc9c22 100644
--- a/nilearn/regions/region_extractor.py
+++ b/nilearn/regions/region_extractor.py
@@ -3,19 +3,21 @@
"""
import numbers
+import collections
import numpy as np
-from scipy.ndimage import label
+from scipy import ndimage
from scipy.stats import scoreatpercentile
from sklearn.externals.joblib import Memory
from .. import masking
from ..input_data import NiftiMapsMasker
-from .._utils import check_niimg, check_niimg_4d
+from .._utils import check_niimg, check_niimg_3d, check_niimg_4d
from ..image import new_img_like, resample_img
from ..image.image import _smooth_array, threshold_img
from .._utils.niimg_conversions import concat_niimgs, _check_same_fov
+from .._utils.niimg import _safe_get_data
from .._utils.compat import _basestring
from .._utils.ndimage import _peak_local_max
from .._utils.segmentation import _random_walker
@@ -53,7 +55,8 @@ def _threshold_maps_ratio(maps_img, threshold):
else:
ratio = threshold
- maps_data = maps.get_data()
+ maps_data = _safe_get_data(maps, ensure_finite=True)
+
abs_maps = np.abs(maps_data)
# thresholding
cutoff_threshold = scoreatpercentile(
@@ -65,6 +68,61 @@ def _threshold_maps_ratio(maps_img, threshold):
return threshold_maps_img
+def _remove_small_regions(input_data, index, affine, min_size):
+ """Remove small regions in volume from input_data of specified min_size.
+
+ min_size should be specified in mm^3 (region size in volume).
+
+ Parameters
+ ----------
+ input_data : numpy.ndarray
+ Values inside the regions defined by labels contained in input_data
+ are summed together to get the size and compare with given min_size.
+ For example, see scipy.ndimage.label
+
+ index : numpy.ndarray
+ A sequence of label numbers of the regions to be measured corresponding
+ to input_data. For example, sequence can be generated using
+ np.arange(n_labels + 1)
+
+ affine : numpy.ndarray
+ Affine of input_data is used to convert size in voxels to size in
+ volume of region in mm^3.
+
+ min_size : float in mm^3
+ Size of regions in input_data which falls below the specified min_size
+ of volume in mm^3 will be discarded.
+
+ Returns
+ -------
+ out : numpy.ndarray
+ Data returned will have regions removed specified by min_size
+ Otherwise, if criterion is not met then same input data will be
+ returned.
+ """
+ # with return_counts argument is introduced from numpy 1.9.0.
+ # _, region_sizes = np.unique(input_data, return_counts=True)
+
+ # For now, to count the region sizes, we use return_inverse from
+ # np.unique and then use np.bincount to count the region sizes.
+
+ _, region_indices = np.unique(input_data, return_inverse=True)
+ region_sizes = np.bincount(region_indices)
+ size_in_vox = min_size / np.abs(np.linalg.det(affine[:3, :3]))
+ labels_kept = region_sizes > size_in_vox
+ if not np.all(labels_kept):
+ # Put to zero the indices not kept
+ rejected_labels_mask = np.in1d(input_data,
+ np.where(np.logical_not(labels_kept))[0]
+ ).reshape(input_data.shape)
+ # Avoid modifying the input:
+ input_data = input_data.copy()
+ input_data[rejected_labels_mask] = 0
+ # Reorder the indices to avoid gaps
+ input_data = np.searchsorted(np.unique(input_data), input_data)
+ return input_data
+
+
def connected_regions(maps_img, min_region_size=1350,
extract_type='local_regions', smoothing_fwhm=6,
mask_img=None):
@@ -112,13 +170,22 @@ def connected_regions(maps_img, min_region_size=1350,
index_of_each_map: numpy array
an array of list of indices where each index denotes the identity
of each extracted region to their family of brain maps.
+
+ See Also
+ --------
+ nilearn.regions.connected_label_regions : A function can be used for
+ extraction of regions on labels based atlas images.
+
+ nilearn.regions.RegionExtractor : A class can be used for both
+ region extraction on continuous type atlas images and
+ also time series signals extraction from regions extracted.
"""
all_regions_imgs = []
index_of_each_map = []
maps_img = check_niimg(maps_img, atleast_4d=True)
- maps = maps_img.get_data()
- affine = maps_img.get_affine()
- min_region_size = min_region_size / np.prod(np.diag(abs(affine[:3])))
+ maps = _safe_get_data(maps_img).copy()
+ affine = maps_img.affine
+ min_region_size = min_region_size / np.abs(np.linalg.det(affine[:3, :3]))
allowed_extract_types = ['connected_components', 'local_regions']
if extract_type not in allowed_extract_types:
@@ -129,7 +196,7 @@ def connected_regions(maps_img, min_region_size=1350,
if mask_img is not None:
if not _check_same_fov(maps_img, mask_img):
mask_img = resample_img(mask_img,
- target_affine=maps_img.get_affine(),
+ target_affine=maps_img.affine,
target_shape=maps_img.shape[:3],
interpolation="nearest")
mask_data, _ = masking._load_mask_img(mask_img)
@@ -143,7 +210,7 @@ def connected_regions(maps_img, min_region_size=1350,
if extract_type == 'local_regions':
smooth_map = _smooth_array(map_3d, affine=affine, fwhm=smoothing_fwhm)
seeds = _peak_local_max(smooth_map)
- seeds_label, seeds_id = label(seeds)
+ seeds_label, seeds_id = ndimage.label(seeds)
# Assign -1 to values which are 0. to indicate to ignore
seeds_label[map_3d == 0.] = -1
rw_maps = _random_walker(map_3d, seeds_label)
@@ -152,7 +219,7 @@ def connected_regions(maps_img, min_region_size=1350,
label_maps = rw_maps
else:
# Connected component extraction
- label_maps, n_labels = label(map_3d)
+ label_maps, n_labels = ndimage.label(map_3d)
# Takes the size of each labelized region data
labels_size = np.bincount(label_maps.ravel())
@@ -193,7 +260,7 @@ class RegionExtractor(NiftiMapsMasker):
Mask to be applied to input data, passed to NiftiMapsMasker.
If None, no masking is applied.
- min_region_size: int, default 1350 mm^3, optional
+ min_region_size: float, default 1350 mm^3, optional
Minimum volume in mm3 for a region to be kept. For example, if
the voxel size is 3x3x3 mm then the volume of the voxel is
27mm^3. By default, it is 1350mm^3 which means we take minimum
@@ -227,6 +294,12 @@ class RegionExtractor(NiftiMapsMasker):
random walker segementation algorithm on these markers for region
separation.
+ smoothing_fwhm: scalar, default 6mm, optional
+ To smooth an image to extract most sparser regions. This parameter
+ is passed to `connected_regions` and exists only for extractor
+ 'local_regions'. Please set this parameter according to maps
+ resolution, otherwise extraction will fail.
+
standardize: bool, True or False, default False, optional
If True, the time series signals are centered and normalized by
putting their mean to 0 and variance to 1. Recommended to
@@ -283,14 +356,21 @@ class RegionExtractor(NiftiMapsMasker):
better brain parcellations from rest fMRI", Sparsity Techniques in
Medical Imaging, Sep 2014, Boston, United States. pp.8
+ See Also
+ --------
+ nilearn.regions.connected_label_regions : A function can be readily
+ used for extraction of regions on labels based atlas images.
+
"""
def __init__(self, maps_img, mask_img=None, min_region_size=1350,
threshold=1., thresholding_strategy='ratio_n_voxels',
- extractor='local_regions', standardize=False, detrend=False,
+ extractor='local_regions', smoothing_fwhm=6,
+ standardize=False, detrend=False,
low_pass=None, high_pass=None, t_r=None,
memory=Memory(cachedir=None), memory_level=0, verbose=0):
super(RegionExtractor, self).__init__(
maps_img=maps_img, mask_img=mask_img,
+ smoothing_fwhm=smoothing_fwhm,
standardize=standardize, detrend=detrend, low_pass=low_pass,
high_pass=high_pass, t_r=t_r, memory=memory,
memory_level=memory_level, verbose=verbose)
@@ -299,6 +379,7 @@ def __init__(self, maps_img, mask_img=None, min_region_size=1350,
self.thresholding_strategy = thresholding_strategy
self.threshold = threshold
self.extractor = extractor
+ self.smoothing_fwhm = smoothing_fwhm
def fit(self, X=None, y=None):
""" Prepare the data and setup for the region extraction
@@ -328,9 +409,141 @@ def fit(self, X=None, y=None):
# connected component extraction
self.regions_img_, self.index_ = connected_regions(threshold_maps,
self.min_region_size,
- self.extractor)
+ self.extractor,
+ self.smoothing_fwhm)
self.maps_img = self.regions_img_
super(RegionExtractor, self).fit()
return self
+
+
+def connected_label_regions(labels_img, min_size=None, connect_diag=True,
+ labels=None):
+ """ Extract connected regions from a brain atlas image defined by labels
+ (integers).
+
+ For each label in an parcellations, separates out connected
+ components and assigns to each separated region a unique label.
+
+ Parameters
+ ----------
+
+ labels_img : Nifti-like image
+ A 3D image which contains regions denoted as labels. Each region
+ is assigned with integers.
+
+ min_size : float, in mm^3 optional (default None)
+ Minimum region size in volume required to keep after extraction.
+ Removes small or spurious regions.
+
+ connect_diag : bool (default True)
+ If 'connect_diag' is True, two voxels are considered in the same region
+ if they are connected along the diagonal (26-connectivity). If it is
+ False, two voxels are considered connected only if they are within the
+ same x, y, or z direction.
+
+ labels : 1D numpy array or list of str, (default None), optional
+ Each string in a list or array denote the name of the brain atlas
+ regions given in labels_img input. If provided, same names will be
+ re-assigned corresponding to each connected component based extraction
+ of regions relabelling. The total number of names should match with the
+ number of labels assigned in the image.
+
+ NOTE: The order of the names given in labels should be appropriately
+ matched with the unique labels (integers) assigned to each region
+ given in labels_img (also excluding 'Background' label).
+
+ Returns
+ -------
+ new_labels_img : Nifti-like image
+ A new image comprising of regions extracted on an input labels_img.
+
+ new_labels : list, optional
+ If labels are provided, new labels assigned to region extracted will
+ be returned. Otherwise, only new labels image will be returned.
+
+ See Also
+ --------
+ nilearn.datasets.fetch_atlas_harvard_oxford : For an example of atlas with
+ labels.
+
+ nilearn.regions.RegionExtractor : A class can be used for region extraction
+ on continuous type atlas images.
+
+ nilearn.regions.connected_regions : A function used for region extraction
+ on continuous type atlas images.
+
+ """
+ labels_img = check_niimg_3d(labels_img)
+ labels_data = _safe_get_data(labels_img, ensure_finite=True)
+ affine = labels_img.affine
+
+ check_unique_labels = np.unique(labels_data)
+
+ if min_size is not None and not isinstance(min_size, numbers.Number):
+ raise ValueError("Expected 'min_size' to be specified as integer. "
+ "You provided {0}".format(min_size))
+ if not isinstance(connect_diag, bool):
+ raise ValueError("'connect_diag' must be specified as True or False. "
+ "You provided {0}".format(connect_diag))
+ if np.any(check_unique_labels < 0):
+ raise ValueError("The 'labels_img' you provided has unknown/negative "
+ "integers as labels {0} assigned to regions. "
+ "All regions in an image should have positive "
+ "integers assigned as labels."
+ .format(check_unique_labels))
+
+ unique_labels = set(check_unique_labels)
+ # check for background label indicated as 0
+ if np.any(check_unique_labels == 0):
+ unique_labels.remove(0)
+
+ if labels is not None:
+ if (not isinstance(labels, collections.Iterable) or
+ isinstance(labels, _basestring)):
+ labels = [labels, ]
+ if len(unique_labels) != len(labels):
+ raise ValueError("The number of labels: {0} provided as input "
+ "in labels={1} does not match with the number "
+ "of unique labels in labels_img: {2}. "
+ "Please provide appropriate match with unique "
+ "number of labels in labels_img."
+ .format(len(labels), labels, len(unique_labels)))
+ new_names = []
+
+ if labels is None:
+ this_labels = [None] * len(unique_labels)
+ else:
+ this_labels = labels
+
+ new_labels_data = np.zeros(labels_data.shape, dtype=np.int)
+ current_max_label = 0
+ for label_id, name in zip(unique_labels, this_labels):
+ this_label_mask = (labels_data == label_id)
+ # Extract regions assigned to each label id
+ if connect_diag:
+ structure = np.ones((3, 3, 3), dtype=np.int)
+ regions, this_n_labels = ndimage.label(
+ this_label_mask.astype(np.int), structure=structure)
+ else:
+ regions, this_n_labels = ndimage.label(this_label_mask.astype(np.int))
+
+ if min_size is not None:
+ index = np.arange(this_n_labels + 1)
+ regions = _remove_small_regions(regions, index, affine,
+ min_size=min_size)
+ this_n_labels = regions.max()
+
+ cur_regions = regions[regions != 0] + current_max_label
+ new_labels_data[regions != 0] = cur_regions
+ current_max_label += this_n_labels
+ if name is not None:
+ new_names.extend([name] * this_n_labels)
+
+ new_labels_img = new_img_like(labels_img, new_labels_data, affine=affine)
+ if labels is not None:
+ new_labels = new_names
+ return new_labels_img, new_labels
+
+ return new_labels_img
diff --git a/nilearn/regions/signal_extraction.py b/nilearn/regions/signal_extraction.py
index 74422076bf..d17ff79729 100644
--- a/nilearn/regions/signal_extraction.py
+++ b/nilearn/regions/signal_extraction.py
@@ -11,6 +11,7 @@
from scipy import linalg, ndimage
from .. import _utils
+from .._utils.niimg import _safe_get_data
from .. import masking
from ..image import new_img_like
@@ -26,18 +27,18 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None,
performs no resampling.
Parameters
- ==========
+ ----------
imgs: 4D Niimg-like object
- See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
+ See http://nilearn.github.io/manipulating_images/input_output.html
input images.
labels_img: Niimg-like object
- See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
+ See http://nilearn.github.io/manipulating_images/input_output.html
regions definition as labels. By default, the label zero is used to
denote an absence of region. Use background_label to change it.
mask_img: Niimg-like object
- See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
+ See http://nilearn.github.io/manipulating_images/input_output.html
Mask to apply to labels before extracting signals. Every point
outside the mask is considered as background (i.e. no region).
@@ -48,7 +49,7 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None,
ordering of output array ("C" or "F"). Defaults to "F".
Returns
- =======
+ -------
signals: numpy.ndarray
Signals extracted from each region. One output signal is the mean
of all input signals in a given region. If some regions are entirely
@@ -60,7 +61,7 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None,
the region with label labels[n].
See also
- ========
+ --------
nilearn.regions.signals_to_img_labels
nilearn.regions.img_to_signals_maps
"""
@@ -70,35 +71,36 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None,
# TODO: Make a special case for list of strings (load one image at a
# time).
imgs = _utils.check_niimg_4d(imgs)
- target_affine = imgs.get_affine()
+ target_affine = imgs.affine
target_shape = imgs.shape[:3]
# Check shapes and affines.
if labels_img.shape != target_shape:
raise ValueError("labels_img and imgs shapes must be identical.")
- if abs(labels_img.get_affine() - target_affine).max() > 1e-9:
+ if abs(labels_img.affine - target_affine).max() > 1e-9:
raise ValueError("labels_img and imgs affines must be identical")
if mask_img is not None:
mask_img = _utils.check_niimg_3d(mask_img)
if mask_img.shape != target_shape:
raise ValueError("mask_img and imgs shapes must be identical.")
- if abs(mask_img.get_affine() - target_affine).max() > 1e-9:
+ if abs(mask_img.affine - target_affine).max() > 1e-9:
raise ValueError("mask_img and imgs affines must be identical")
# Perform computation
- labels_data = labels_img.get_data()
+ labels_data = _safe_get_data(labels_img, ensure_finite=True)
labels = list(np.unique(labels_data))
if background_label in labels:
labels.remove(background_label)
if mask_img is not None:
- mask_data = mask_img.get_data()
+ mask_data = _safe_get_data(mask_img, ensure_finite=True)
labels_data = labels_data.copy()
labels_data[np.logical_not(mask_data)] = background_label
- data = imgs.get_data()
- signals = np.ndarray((data.shape[-1], len(labels)), order=order)
+ data = _safe_get_data(imgs)
+ signals = np.ndarray((data.shape[-1], len(labels)), order=order,
+ dtype=data.dtype)
for n, img in enumerate(np.rollaxis(data, -1)):
signals[n] = np.asarray(ndimage.measurements.mean(img,
labels=labels_data,
@@ -112,7 +114,7 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None,
def signals_to_img_labels(signals, labels_img, mask_img=None,
- background_label=0, order="F"):
+ background_label=0, order="F"):
"""Create image from region signals defined as labels.
The same region signal is used for each voxel of the corresponding 3D
@@ -121,12 +123,12 @@ def signals_to_img_labels(signals, labels_img, mask_img=None,
labels_img, mask_img must have the same shapes and affines.
Parameters
- ==========
+ ----------
signals: numpy.ndarray
2D array with shape: (scan number, number of regions in labels_img)
labels_img: Niimg-like object
- See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
+ See http://nilearn.github.io/manipulating_images/input_output.html
Region definitions using labels.
mask_img: Niimg-like object, optional
@@ -140,13 +142,13 @@ def signals_to_img_labels(signals, labels_img, mask_img=None,
ordering of output array ("C" or "F"). Defaults to "F".
Returns
- =======
+ -------
img: nibabel.Nifti1Image
Reconstructed image. dtype is that of "signals", affine and shape are
those of labels_img.
See also
- ========
+ --------
nilearn.regions.img_to_signals_labels
nilearn.regions.signals_to_img_maps
"""
@@ -154,7 +156,7 @@ def signals_to_img_labels(signals, labels_img, mask_img=None,
labels_img = _utils.check_niimg_3d(labels_img)
signals = np.asarray(signals)
- target_affine = labels_img.get_affine()
+ target_affine = labels_img.affine
target_shape = labels_img.shape[:3]
if mask_img is not None:
@@ -162,17 +164,17 @@ def signals_to_img_labels(signals, labels_img, mask_img=None,
if mask_img.shape != target_shape:
raise ValueError("mask_img and labels_img shapes "
"must be identical.")
- if abs(mask_img.get_affine() - target_affine).max() > 1e-9:
+ if abs(mask_img.affine - target_affine).max() > 1e-9:
raise ValueError("mask_img and labels_img affines "
"must be identical")
- labels_data = labels_img.get_data()
+ labels_data = _safe_get_data(labels_img, ensure_finite=True)
labels = list(np.unique(labels_data))
if background_label in labels:
labels.remove(background_label)
if mask_img is not None:
- mask_data = mask_img.get_data()
+ mask_data = _safe_get_data(mask_img, ensure_finite=True)
labels_data = labels_data.copy()
labels_data[np.logical_not(mask_data)] = background_label
@@ -201,18 +203,18 @@ def img_to_signals_maps(imgs, maps_img, mask_img=None):
This function is applicable to regions defined by maps.
Parameters
- ==========
+ ----------
imgs: Niimg-like object
- See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
+ See http://nilearn.github.io/manipulating_images/input_output.html
Input images.
maps_img: Niimg-like object
- See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
+ See http://nilearn.github.io/manipulating_images/input_output.html
regions definition as maps (array of weights).
shape: imgs.shape + (region number, )
mask_img: Niimg-like object
- See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
+ See http://nilearn.github.io/manipulating_images/input_output.html
mask to apply to regions before extracting signals. Every point
outside the mask is considered as background (i.e. outside of any
region).
@@ -221,7 +223,7 @@ def img_to_signals_maps(imgs, maps_img, mask_img=None):
ordering of output array ("C" or "F"). Defaults to "F".
Returns
- =======
+ -------
region_signals: numpy.ndarray
Signals extracted from each region.
Shape is: (scans number, number of regions intersecting mask)
@@ -231,38 +233,40 @@ def img_to_signals_maps(imgs, maps_img, mask_img=None):
signal region_signals[:, n].
See also
- ========
+ --------
nilearn.regions.img_to_signals_labels
nilearn.regions.signals_to_img_maps
"""
maps_img = _utils.check_niimg_4d(maps_img)
imgs = _utils.check_niimg_4d(imgs)
- affine = imgs.get_affine()
+ affine = imgs.affine
shape = imgs.shape[:3]
# Check shapes and affines.
if maps_img.shape[:3] != shape:
raise ValueError("maps_img and imgs shapes must be identical.")
- if abs(maps_img.get_affine() - affine).max() > 1e-9:
+ if abs(maps_img.affine - affine).max() > 1e-9:
raise ValueError("maps_img and imgs affines must be identical")
- maps_data = maps_img.get_data()
+ maps_data = _safe_get_data(maps_img, ensure_finite=True)
if mask_img is not None:
mask_img = _utils.check_niimg_3d(mask_img)
if mask_img.shape != shape:
raise ValueError("mask_img and imgs shapes must be identical.")
- if abs(mask_img.get_affine() - affine).max() > 1e-9:
+ if abs(mask_img.affine - affine).max() > 1e-9:
raise ValueError("mask_img and imgs affines must be identical")
maps_data, maps_mask, labels = \
- _trim_maps(maps_data, mask_img.get_data(), keep_empty=True)
+ _trim_maps(maps_data,
+ _safe_get_data(mask_img, ensure_finite=True),
+ keep_empty=True)
maps_mask = _utils.as_ndarray(maps_mask, dtype=np.bool)
else:
maps_mask = np.ones(maps_data.shape[:3], dtype=np.bool)
labels = np.arange(maps_data.shape[-1], dtype=np.int)
- data = imgs.get_data()
+ data = _safe_get_data(imgs, ensure_finite=True)
region_signals = linalg.lstsq(maps_data[maps_mask, :],
data[maps_mask, :])[0].T
@@ -275,46 +279,47 @@ def signals_to_img_maps(region_signals, maps_img, mask_img=None):
region_signals, mask_img must have the same shapes and affines.
Parameters
- ==========
+ ----------
region_signals: numpy.ndarray
signals to process, as a 2D array. A signal is a column. There must
be as many signals as maps.
In pseudo-code: region_signals.shape[1] == maps_img.shape[-1]
maps_img: Niimg-like object
- See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
+ See http://nilearn.github.io/manipulating_images/input_output.html
Region definitions using maps.
mask_img: Niimg-like object, optional
- See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
+ See http://nilearn.github.io/manipulating_images/input_output.html
Boolean array giving voxels to process. integer arrays also accepted,
zero meaning False.
Returns
- =======
+ -------
img: nibabel.Nifti1Image
Reconstructed image. affine and shape are those of maps_img.
See also
- ========
+ --------
nilearn.regions.signals_to_img_labels
nilearn.regions.img_to_signals_maps
"""
maps_img = _utils.check_niimg_4d(maps_img)
- maps_data = maps_img.get_data()
+ maps_data = _safe_get_data(maps_img, ensure_finite=True)
shape = maps_img.shape[:3]
- affine = maps_img.get_affine()
+ affine = maps_img.affine
if mask_img is not None:
mask_img = _utils.check_niimg_3d(mask_img)
if mask_img.shape != shape:
raise ValueError("mask_img and maps_img shapes must be identical.")
- if abs(mask_img.get_affine() - affine).max() > 1e-9:
+ if abs(mask_img.affine - affine).max() > 1e-9:
raise ValueError("mask_img and maps_img affines must be "
"identical.")
- maps_data, maps_mask, _ = _trim_maps(maps_data, mask_img.get_data(),
- keep_empty=True)
+ maps_data, maps_mask, _ = _trim_maps(
+ maps_data, _safe_get_data(mask_img, ensure_finite=True),
+ keep_empty=True)
maps_mask = _utils.as_ndarray(maps_mask, dtype=np.bool)
else:
maps_mask = np.ones(maps_data.shape[:3], dtype=np.bool)
@@ -332,7 +337,7 @@ def _trim_maps(maps, mask, keep_empty=False, order="F"):
must be performed before calling this function.
Parameters
- ==========
+ ----------
maps: numpy.ndarray
Set of maps, defining some regions.
@@ -348,7 +353,7 @@ def _trim_maps(maps, mask, keep_empty=False, order="F"):
Ordering of the output maps array (trimmed_maps).
Returns
- =======
+ -------
trimmed_maps: numpy.ndarray
New set of maps, computed as intersection of each input map and mask.
Empty maps are discarded if keep_empty is False, thus the number of
diff --git a/nilearn/regions/tests/__init__.py b/nilearn/regions/tests/__init__.py
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/nilearn/regions/tests/__init__.py
@@ -0,0 +1 @@
+
diff --git a/nilearn/regions/tests/test_parcellations.py b/nilearn/regions/tests/test_parcellations.py
new file mode 100644
index 0000000000..df77cd68eb
--- /dev/null
+++ b/nilearn/regions/tests/test_parcellations.py
@@ -0,0 +1,276 @@
+"""
+Test the parcellations tools module
+"""
+import numpy as np
+import nibabel
+
+from nose.tools import assert_true, assert_equal
+from nilearn.regions.parcellations import (Parcellations,
+ _check_parameters_transform)
+from nilearn._utils.testing import assert_raises_regex
+
+
+def test_errors_raised_in_check_parameters_fit():
+ # Test whether an error is raised or not given
+ # a false method type
+ # valid_methods = ['kmeans', 'ward', 'complete', 'average']
+ data = np.zeros((6, 7, 8, 5))
+
+ img = nibabel.Nifti1Image(data, affine=np.eye(4))
+
+ method_raise1 = Parcellations(method=None)
+ assert_raises_regex(ValueError,
+ "Parcellation method is specified as None. ",
+ method_raise1.fit, img)
+
+ for invalid_method in ['kmens', 'avg', 'complte']:
+ method_raise2 = Parcellations(method=invalid_method)
+ msg = ("The method you have selected is not implemented "
+ "'{0}'".format(invalid_method))
+ assert_raises_regex(ValueError, msg, method_raise2.fit, img)
+
+
+def test_parcellations_fit_on_single_nifti_image():
+ # Test return attributes for each method
+ data = np.zeros((10, 11, 12, 5))
+ data[9, 10, 2] = 1
+ data[4, 9, 3] = 2
+ fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4))
+
+ methods = ['kmeans', 'ward', 'complete', 'average']
+ n_parcels = [5, 10, 15]
+ for n_parcel, method in zip(n_parcels, methods):
+ parcellator = Parcellations(method=method, n_parcels=n_parcel)
+ parcellator.fit(fmri_img)
+ # Test that object returns attribute labels_img_
+ assert_true(parcellator.labels_img_ is not None)
+ # Test object returns attribute masker_
+ assert_true(parcellator.masker_ is not None)
+ assert_true(parcellator.mask_img_ is not None)
+ if method != 'kmeans':
+ # Test that object returns attribute connectivity_
+ # only for AgglomerativeClustering methods
+ assert_true(parcellator.connectivity_ is not None)
+ labels_img = parcellator.labels_img_
+ assert_true(parcellator.labels_img_ is not None)
+ # After inverse_transform, shape must match with original input
+ # data
+ assert_true(labels_img.shape, (data.shape[0],
+ data.shape[1],
+ data.shape[2]))
+
+
+def test_parcellations_fit_on_multi_nifti_images():
+ data = np.zeros((10, 11, 12, 5))
+ data[9, 10, 2] = 1
+ data[4, 9, 3] = 2
+ fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4))
+ # List of fmri images
+ fmri_imgs = [fmri_img, fmri_img, fmri_img]
+
+ parcellator = Parcellations(method='kmeans', n_parcels=5)
+ parcellator.fit(fmri_imgs)
+ assert_true(parcellator.labels_img_ is not None)
+
+ parcellator = Parcellations(method='ward', n_parcels=5)
+ parcellator.fit(fmri_imgs)
+ assert_true(parcellator.labels_img_ is not None)
+
+ # Smoke test with explicit mask image
+ mask_img = np.ones((10, 11, 12))
+ mask_img = nibabel.Nifti1Image(mask_img, np.eye(4))
+
+ parcellator = Parcellations(method='kmeans', n_parcels=5,
+ mask=mask_img)
+ parcellator.fit(fmri_imgs)
+
+ parcellator = Parcellations(method='ward', n_parcels=5,
+ mask=mask_img)
+ parcellator.fit(fmri_imgs)
+
+
+def test_parcellations_transform_single_nifti_image():
+ # Test with NiftiLabelsMasker extraction of timeseries data
+ # after building a parcellations image
+
+ # Here, data has ones. zeros will be considered as background labels
+ # not foreground labels
+ data = np.ones((10, 11, 12, 8))
+ data[6, 7, 8] = 2
+ data[9, 10, 11] = 3
+ parcels = 5
+
+ fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4))
+
+ for method in ['kmeans', 'ward', 'complete', 'average']:
+ parcellator = Parcellations(method=method, n_parcels=parcels)
+ parcellator.fit(fmri_img)
+ # transform to signals
+ signals = parcellator.transform(fmri_img)
+ # Test if the signals extracted are of same shape as inputs
+ # Here, we simply return numpy array for single subject input
+ assert_equal(signals.shape, (fmri_img.shape[3], parcels))
+
+ # Test for single subject but in a list.
+ signals = parcellator.transform([fmri_img])
+ assert_equal(signals.shape, (fmri_img.shape[3], parcels))
+
+
+def test_parcellations_transform_multi_nifti_images():
+ data = np.ones((10, 11, 12, 10))
+ data[6, 7, 8] = 2
+ data[9, 10, 11] = 3
+ parcels = 5
+
+ fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4))
+ fmri_imgs = [fmri_img, fmri_img, fmri_img]
+
+ for method in ['kmeans', 'ward', 'complete', 'average']:
+ parcellator = Parcellations(method=method, n_parcels=parcels)
+ parcellator.fit(fmri_imgs)
+ # transform multi images to signals. In return, we have length
+ # equal to the number of images
+ signals = parcellator.transform(fmri_imgs)
+ assert_equal(signals[0].shape, (fmri_img.shape[3], parcels))
+ assert_equal(signals[1].shape, (fmri_img.shape[3], parcels))
+ assert_equal(signals[2].shape, (fmri_img.shape[3], parcels))
+
+ assert_equal(len(signals), len(fmri_imgs))
+
+
+def test_check_parameters_transform():
+ rng = np.random.RandomState(0)
+ data = np.ones((10, 11, 12, 10))
+ data[6, 7, 8] = 2
+ data[9, 10, 11] = 3
+
+ # single image
+ fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4))
+ # single confound
+ confounds = rng.randn(*(10, 3))
+ # Tests to check whether imgs, confounds returned are
+ # list or not. Pre-check in parameters to work for list
+ # of multi images and multi confounds
+ imgs, confounds, single_subject = _check_parameters_transform(fmri_img,
+ confounds)
+ assert_true(isinstance(imgs, (list, tuple)))
+ assert_true(isinstance(confounds, (list, tuple)))
+ assert_true(single_subject, True)
+
+ # multi images
+ fmri_imgs = [fmri_img, fmri_img, fmri_img]
+ confounds_list = [confounds, confounds, confounds]
+ imgs, confounds, _ = _check_parameters_transform(fmri_imgs, confounds_list)
+ assert_equal(imgs, fmri_imgs)
+ assert_equal(confounds_list, confounds)
+
+ # Test the error when length of images and confounds are not same
+ msg = ("Number of confounds given does not match with the "
+ "given number of images")
+ not_match_confounds_list = [confounds, confounds]
+ assert_raises_regex(ValueError, msg, _check_parameters_transform,
+ fmri_imgs, not_match_confounds_list)
+
+
+def test_parcellations_transform_with_multi_confounds_multi_images():
+ rng = np.random.RandomState(0)
+ data = np.ones((10, 11, 12, 10))
+ data[6, 7, 8] = 2
+ data[9, 10, 11] = 3
+
+ fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4))
+ fmri_imgs = [fmri_img, fmri_img, fmri_img]
+
+ confounds = rng.randn(*(10, 3))
+ confounds_list = (confounds, confounds, confounds)
+
+ for method in ['kmeans', 'ward', 'complete', 'average']:
+ parcellator = Parcellations(method=method, n_parcels=5)
+ parcellator.fit(fmri_imgs)
+
+ signals = parcellator.transform(fmri_imgs,
+ confounds=confounds_list)
+ assert_true(isinstance(signals, list))
+ # n_parcels=5, length of data=10
+ assert_equal(signals[0].shape, (10, 5))
+
+
+def test_fit_transform():
+ rng = np.random.RandomState(0)
+ data = np.ones((10, 11, 12, 10))
+ data[6, 7, 8] = 2
+ data[9, 10, 11] = 3
+
+ fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4))
+ fmri_imgs = [fmri_img, fmri_img, fmri_img]
+
+ confounds = rng.randn(*(10, 3))
+ confounds_list = [confounds, confounds, confounds]
+
+ for method in ['kmeans', 'ward', 'complete', 'average']:
+ parcellator = Parcellations(method=method, n_parcels=5)
+ signals = parcellator.fit_transform(fmri_imgs)
+ assert_true(parcellator.labels_img_ is not None)
+ if method != 'kmeans':
+ assert_true(parcellator.connectivity_ is not None)
+ assert_true(parcellator.masker_ is not None)
+ # fit_transform with confounds
+ signals = parcellator.fit_transform(fmri_imgs,
+ confounds=confounds_list)
+ assert_true(isinstance(signals, list))
+ assert_equal(signals[0].shape, (10, 5))
+
+
+def test_inverse_transform_single_nifti_image():
+ data = np.ones((10, 11, 12, 10))
+ data[6, 7, 8] = 2
+ data[9, 10, 11] = 3
+
+ fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4))
+ methods = ['kmeans', 'ward', 'complete', 'average']
+
+ for method in methods:
+ parcellate = Parcellations(method=method, n_parcels=5)
+ # Fit
+ parcellate.fit(fmri_img)
+ assert_true(parcellate.labels_img_ is not None)
+ # Transform
+ fmri_reduced = parcellate.transform(fmri_img)
+ assert_true(isinstance(fmri_reduced, np.ndarray))
+ # Shape matching with (scans, regions)
+ assert_true(fmri_reduced.shape, (10, 5))
+ # Inverse transform
+ fmri_compressed = parcellate.inverse_transform(fmri_reduced)
+ # A single Nifti image for single subject input
+ assert_true(isinstance(fmri_compressed, nibabel.Nifti1Image))
+ # returns shape of fmri_img
+ assert_true(fmri_compressed.shape, (10, 11, 12, 10))
+
+ # fmri_reduced in a list
+ fmri_compressed = parcellate.inverse_transform([fmri_reduced])
+ # A single Nifti image for single subject input
+ assert_true(isinstance(fmri_compressed, nibabel.Nifti1Image))
+ # returns shape of fmri_img
+ assert_true(fmri_compressed.shape, (10, 11, 12, 10))
+
+
+def test_transform_3d_input_images():
+ # test list of 3D images
+ data = np.ones((10, 11, 12))
+ data[6, 7, 8] = 2
+ data[9, 10, 11] = 3
+ img = nibabel.Nifti1Image(data, affine=np.eye(4))
+ # list of 3
+ imgs = [img, img, img]
+ parcellate = Parcellations(method='ward', n_parcels=20)
+ X = parcellate.fit_transform(imgs)
+ assert_true(isinstance(X, list))
+ # (number of samples, number of features)
+ assert_equal(np.concatenate(X).shape, (3, 20))
+ # inverse transform
+ imgs_ = parcellate.inverse_transform(X)
+ assert_true(isinstance(imgs_, list))
+ # test single 3D image
+ X = parcellate.fit_transform(imgs[0])
+ assert_true(isinstance(X, np.ndarray))
+ assert_equal(X.shape, (1, 20))
diff --git a/nilearn/regions/tests/test_region_extractor.py b/nilearn/regions/tests/test_region_extractor.py
index 268c95f0e6..5d4de657d3 100644
--- a/nilearn/regions/tests/test_region_extractor.py
+++ b/nilearn/regions/tests/test_region_extractor.py
@@ -2,14 +2,18 @@
import numpy as np
import nibabel
+from scipy import ndimage
-from nose.tools import assert_raises, assert_equal, assert_true, assert_not_equal
+from nose.tools import assert_equal, assert_true, assert_not_equal
-from nilearn.regions import connected_regions, RegionExtractor
-from nilearn.regions.region_extractor import _threshold_maps_ratio
-from nilearn.image import iter_img
+from nilearn.regions import (connected_regions, RegionExtractor,
+ connected_label_regions)
+from nilearn.regions.region_extractor import (_threshold_maps_ratio,
+ _remove_small_regions)
+from nilearn._utils import testing
from nilearn._utils.testing import assert_raises_regex, generate_maps
+from nilearn._utils.exceptions import DimensionError
def _make_random_data(shape):
@@ -34,6 +38,15 @@ def test_invalid_thresholds_in_threshold_maps_ratio():
maps, threshold=invalid_threshold)
+def test_nans_threshold_maps_ratio():
+ maps, _ = generate_maps((10, 10, 10), n_regions=2)
+ data = maps.get_data()
+ data[:, :, 0] = np.nan
+
+ maps_img = nibabel.Nifti1Image(data, np.eye(4))
+ thr_maps = _threshold_maps_ratio(maps_img, threshold=0.8)
+
+
def test_threshold_maps_ratio():
# smoke test for function _threshold_maps_ratio with randomly
# generated maps
@@ -69,12 +82,11 @@ def test_invalids_extract_types_in_connected_regions():
def test_connected_regions():
# 4D maps
n_regions = 4
- maps, _ = generate_maps((30, 30, 30), n_regions=n_regions)
+ maps, mask_img = generate_maps((30, 30, 30), n_regions=n_regions)
# 3D maps
map_img = np.zeros((30, 30, 30)) + 0.1 * np.random.randn(30, 30, 30)
map_img = nibabel.Nifti1Image(map_img, affine=np.eye(4))
- valid_extract_types = ['connected_components', 'local_regions']
# smoke test for function connected_regions and also to check
# if the regions extracted should be equal or more than already present.
# 4D image case
@@ -88,6 +100,24 @@ def test_connected_regions():
extract_type=extract_type)
assert_true(connected_extraction_3d_img.shape[-1] >= 1)
+ # Test input mask_img
+ extraction_with_mask_img, index = connected_regions(maps,
+ mask_img=mask_img)
+ assert_true(extraction_with_mask_img.shape[-1] >= 1)
+
+ # mask_img with different shape
+ mask = np.zeros(shape=(10, 11, 12), dtype=np.int)
+ mask[1:-1, 1:-1, 1:-1] = 1
+ affine = np.array([[2., 0., 0., 0.],
+ [0., 2., 0., 0.],
+ [0., 0., 2., 0.],
+ [0., 0., 0., 2.]])
+ mask_img = nibabel.Nifti1Image(mask, affine=affine)
+ extraction_not_same_fov_mask, _ = connected_regions(maps,
+ mask_img=mask_img)
+ assert_equal(maps.shape[:3], extraction_not_same_fov_mask.shape[:3])
+ assert_not_equal(mask_img.shape, extraction_not_same_fov_mask.shape[:3])
+
def test_invalid_threshold_strategies():
maps, _ = generate_maps((6, 8, 10), n_regions=1)
@@ -135,8 +165,6 @@ def test_region_extractor_fit_and_transform():
assert_true(extractor.regions_img_.shape[-1] >= 9)
n_regions_extracted = extractor.regions_img_.shape[-1]
- imgs = []
- signals = []
shape = (91, 109, 91, 7)
expected_signal_shape = (7, n_regions_extracted)
for id_ in range(n_subjects):
@@ -144,3 +172,198 @@ def test_region_extractor_fit_and_transform():
# smoke test NiftiMapsMasker transform inherited in Region Extractor
signal = extractor.transform(img)
assert_equal(expected_signal_shape, signal.shape)
+
+ # smoke test with high resolution image
+ maps, mask_img = generate_maps((20, 20, 20), n_regions=n_regions,
+ affine=.2 * np.eye(4))
+
+ extract_ratio = RegionExtractor(maps,
+ thresholding_strategy='ratio_n_voxels',
+ smoothing_fwhm=.6,
+ min_region_size=.4)
+ extract_ratio.fit()
+ assert_not_equal(extract_ratio.regions_img_, '')
+ assert_true(extract_ratio.regions_img_.shape[-1] >= 9)
+
+ # smoke test with zeros on the diagonal of the affine
+ affine = np.eye(4)
+ affine[[0, 1]] = affine[[1, 0]] # permutes first and second lines
+ maps, mask_img = generate_maps((40, 40, 40), n_regions=n_regions,
+ affine=affine)
+
+ extract_ratio = RegionExtractor(maps, threshold=0.2,
+ thresholding_strategy='ratio_n_voxels')
+ extract_ratio.fit()
+ assert_not_equal(extract_ratio.regions_img_, '')
+ assert_true(extract_ratio.regions_img_.shape[-1] >= 9)
+
+
+def test_error_messages_connected_label_regions():
+ shape = (13, 11, 12)
+ affine = np.eye(4)
+ n_regions = 2
+ labels_img = testing.generate_labeled_regions(shape, affine=affine,
+ n_regions=n_regions)
+ assert_raises_regex(ValueError,
+ "Expected 'min_size' to be specified as integer.",
+ connected_label_regions,
+ labels_img=labels_img, min_size='a')
+ assert_raises_regex(ValueError,
+ "'connect_diag' must be specified as True or False.",
+ connected_label_regions,
+ labels_img=labels_img, connect_diag=None)
+
+
+def test_remove_small_regions():
+ data = np.array([[[0., 1., 0.],
+ [0., 1., 1.],
+ [0., 0., 0.]],
+ [[0., 0., 0.],
+ [1., 0., 0.],
+ [0., 1., 0.]],
+ [[0., 0., 1.],
+ [1., 0., 0.],
+ [0., 1., 1.]]])
+ # To remove small regions, data should be labelled
+ label_map, n_labels = ndimage.label(data)
+ sum_label_data = np.sum(label_map)
+
+ affine = np.eye(4)
+ min_size = 10
+ # data can be act as mask_data to identify regions in label_map because
+ # features in label_map are built upon non-zeros in data
+ index = np.arange(n_labels + 1)
+ removed_data = _remove_small_regions(label_map, index, affine, min_size)
+ sum_removed_data = np.sum(removed_data)
+
+ assert_true(sum_removed_data < sum_label_data)
+
+
+def test_connected_label_regions():
+ shape = (13, 11, 12)
+ affine = np.eye(4)
+ n_regions = 9
+ labels_img = testing.generate_labeled_regions(shape, affine=affine,
+ n_regions=n_regions)
+ labels_data = labels_img.get_data()
+ n_labels_wo_reg_ext = len(np.unique(labels_data))
+
+ # region extraction without specifying min_size
+ extracted_regions_on_labels_img = connected_label_regions(labels_img)
+ extracted_regions_labels_data = extracted_regions_on_labels_img.get_data()
+ n_labels_wo_min = len(np.unique(extracted_regions_labels_data))
+
+ assert_true(n_labels_wo_reg_ext < n_labels_wo_min)
+
+ # with specifying min_size
+ extracted_regions_with_min = connected_label_regions(labels_img,
+ min_size=100)
+ extracted_regions_with_min_data = extracted_regions_with_min.get_data()
+ n_labels_with_min = len(np.unique(extracted_regions_with_min_data))
+
+ assert_true(n_labels_wo_min > n_labels_with_min)
+
+ # Test connect_diag=False
+ ext_reg_without_connect_diag = connected_label_regions(labels_img,
+ connect_diag=False)
+ data_wo_connect_diag = ext_reg_without_connect_diag.get_data()
+ n_labels_wo_connect_diag = len(np.unique(data_wo_connect_diag))
+ assert_true(n_labels_wo_connect_diag > n_labels_wo_reg_ext)
+
+ # If min_size is large and if all the regions are removed then empty image
+ # will be returned
+ extract_reg_min_size_large = connected_label_regions(labels_img,
+ min_size=500)
+ assert_true(np.unique(extract_reg_min_size_large.get_data()) == 0)
+
+ # Test the names of the brain regions given in labels.
+ # Test labels for 9 regions in n_regions
+ labels = ['region_a', 'region_b', 'region_c', 'region_d', 'region_e',
+ 'region_f', 'region_g', 'region_h', 'region_i']
+
+ # If labels are provided, first return will contain extracted labels image
+ # and second return will contain list of new names generated based on same
+ # name with assigned on both hemispheres for example.
+ extracted_reg, new_labels = connected_label_regions(labels_img,
+ min_size=100,
+ labels=labels)
+ # The length of new_labels returned can differ depending upon min_size. If
+ # min_size given is more small regions can be removed therefore newly
+ # generated labels can be less than original size of labels. Or if min_size
+ # is less then newly generated labels can be more.
+
+ # We test here whether labels returned are empty or not.
+ assert_not_equal(new_labels, '')
+ assert_true(len(new_labels) <= len(labels))
+
+ # labels given in numpy array
+ labels = np.asarray(labels)
+ extracted_reg2, new_labels2 = connected_label_regions(labels_img,
+ labels=labels)
+ assert_not_equal(new_labels, '')
+ # By default min_size is less, so newly generated labels can be more.
+ assert_true(len(new_labels2) >= len(labels))
+
+ # If number of labels provided are wrong (which means less than number of
+ # unique labels in labels_img), then we raise an error
+
+ # Test whether error raises
+ unique_labels = set(np.unique(np.asarray(labels_img.get_data())))
+ unique_labels.remove(0)
+
+ # labels given are less than n_regions=9
+ provided_labels = ['region_a', 'region_c', 'region_f',
+ 'region_g', 'region_h', 'region_i']
+
+ assert_true(len(provided_labels) < len(unique_labels))
+
+ np.testing.assert_raises(ValueError, connected_label_regions,
+ labels_img, labels=provided_labels)
+
+ # Test if unknown/negative integers are provided as labels in labels_img,
+ # we raise an error and test the same whether error is raised.
+ labels_data = np.zeros(shape, dtype=np.int)
+ h0 = shape[0] // 2
+ h1 = shape[1] // 2
+ h2 = shape[2] // 2
+ labels_data[:h0, :h1, :h2] = 1
+ labels_data[:h0, :h1, h2:] = 2
+ labels_data[:h0, h1:, :h2] = 3
+ labels_data[:h0, h1:, h2:] = 4
+ labels_data[h0:, :h1, :h2] = 5
+ labels_data[h0:, :h1, h2:] = 6
+ labels_data[h0:, h1:, :h2] = np.nan
+ labels_data[h0:, h1:, h2:] = np.inf
+
+ neg_labels_img = nibabel.Nifti1Image(labels_data, affine)
+ np.testing.assert_raises(ValueError, connected_label_regions,
+ labels_img=neg_labels_img)
+
+ # If labels_img provided is 4D Nifti image, then test whether error is
+ # raised or not. Since this function accepts only 3D image.
+ labels_4d_data = np.zeros((shape) + (2, ))
+ labels_data[h0:, h1:, :h2] = 0
+ labels_data[h0:, h1:, h2:] = 0
+ labels_4d_data[..., 0] = labels_data
+ labels_4d_data[..., 1] = labels_data
+ labels_img_4d = nibabel.Nifti1Image(labels_4d_data, np.eye(4))
+ np.testing.assert_raises(DimensionError, connected_label_regions,
+ labels_img=labels_img_4d)
+
+ # Test if labels (or names to regions) given is a string without a list.
+ # Then, we expect it to be split to regions extracted and returned as list.
+ labels_in_str = 'region_a'
+ labels_img_in_str = testing.generate_labeled_regions(shape, affine=affine,
+ n_regions=1)
+ extract_regions, new_labels = connected_label_regions(labels_img_in_str,
+ labels=labels_in_str)
+ assert_true(isinstance(new_labels, list))
+
+ # If user has provided combination of labels, then function passes without
+ # breaking and new labels are returned based upon given labels and should
+ # be equal or more based on regions extracted
+ combined_labels = ['region_a', '1', 'region_b', '2', 'region_c', '3',
+ 'region_d', '4', 'region_e']
+ ext_reg, new_labels = connected_label_regions(labels_img,
+ labels=combined_labels)
+ assert_true(len(new_labels) >= len(combined_labels))
diff --git a/nilearn/regions/tests/test_signal_extraction.py b/nilearn/regions/tests/test_signal_extraction.py
index 7d7e2d70e1..171ae624dc 100644
--- a/nilearn/regions/tests/test_signal_extraction.py
+++ b/nilearn/regions/tests/test_signal_extraction.py
@@ -16,6 +16,10 @@
from nilearn._utils.testing import write_tmp_imgs, assert_raises_regex
from nilearn._utils.exceptions import DimensionError
+_TEST_DIM_ERROR_MSG = ("Input data has incompatible dimensionality: "
+ "Expected dimension is 3D and you provided "
+ "a 4D image")
+
def test_generate_regions_ts():
"""Minimal testing of generate_regions_ts()"""
@@ -110,7 +114,7 @@ def test_signals_extraction_with_labels():
assert_true(np.all(data.std(axis=-1) > 0))
# verify that 4D label images are refused
- assert_raises_regex(DimensionError, "Data must be a 3D",
+ assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG,
signal_extraction.img_to_signals_labels,
data_img, labels_4d_img)
@@ -136,10 +140,10 @@ def test_signals_extraction_with_labels():
assert_true(labels_r == list(range(1, 9)))
# Same thing, with mask.
- assert_raises_regex(DimensionError, "Data must be a 3D",
+ assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG,
signal_extraction.img_to_signals_labels, data_img,
labels_img, mask_img=mask_4d_img)
- assert_raises_regex(DimensionError, "Data must be a 3D",
+ assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG,
signal_extraction.signals_to_img_labels, data_img,
labels_img, mask_img=mask_4d_img)
@@ -227,7 +231,7 @@ def test_signal_extraction_with_maps():
img = nibabel.Nifti1Image(data, np.eye(4))
# verify that 4d masks are refused
- assert_raises_regex(TypeError, "Data must be a 3D",
+ assert_raises_regex(TypeError, _TEST_DIM_ERROR_MSG,
signal_extraction.img_to_signals_maps, img, maps_img,
mask_img=mask_4d_img)
@@ -292,11 +296,11 @@ def test_signal_extraction_with_maps_and_labels():
maps_data[labels_data == l, n - 1] = 1
- maps_img = nibabel.Nifti1Image(maps_data, labels_img.get_affine())
+ maps_img = nibabel.Nifti1Image(maps_data, labels_img.affine)
# Generate fake data
fmri_img, _ = generate_fake_fmri(shape=shape, length=length,
- affine=labels_img.get_affine())
+ affine=labels_img.affine)
# Extract signals from maps and labels: results must be identical.
maps_signals, maps_labels = signal_extraction.img_to_signals_maps(
@@ -309,7 +313,7 @@ def test_signal_extraction_with_maps_and_labels():
# Same thing with a mask, containing only 3 regions.
mask_data = (labels_data == 1) + (labels_data == 2) + (labels_data == 5)
mask_img = nibabel.Nifti1Image(mask_data.astype(np.int8),
- labels_img.get_affine())
+ labels_img.affine)
labels_signals, labels_labels = signal_extraction.img_to_signals_labels(
fmri_img, labels_img, mask_img=mask_img)
diff --git a/nilearn/signal.py b/nilearn/signal.py
index ca8428e730..dfeb8c5eb4 100644
--- a/nilearn/signal.py
+++ b/nilearn/signal.py
@@ -11,13 +11,11 @@
import warnings
import numpy as np
-import scipy
-from scipy import signal, stats, linalg
-from sklearn.utils import gen_even_slices
-from distutils.version import LooseVersion
+from scipy import stats, linalg, signal as sp_signal
+from sklearn.utils import gen_even_slices, as_float_array
from ._utils.compat import _basestring
-from ._utils.numpy_conversions import csv_to_array
+from ._utils.numpy_conversions import csv_to_array, as_ndarray
NP_VERSION = distutils.version.LooseVersion(np.version.short_version).version
@@ -26,7 +24,7 @@ def _standardize(signals, detrend=False, normalize=True):
""" Center and norm a given signal (time is along first axis)
Parameters
- ==========
+ ----------
signals: numpy.ndarray
Timeseries to standardize
@@ -38,7 +36,7 @@ def _standardize(signals, detrend=False, normalize=True):
to unit energy (sum of squares).
Returns
- =======
+ -------
std_signals: numpy.ndarray
copy of signals, normalized.
"""
@@ -47,12 +45,13 @@ def _standardize(signals, detrend=False, normalize=True):
signals = _detrend(signals, inplace=False)
else:
signals = signals.copy()
- if signals.shape[0] == 1:
- warnings.warn('Standardization of 3D signal has been requested but '
- 'would lead to zero values. Skipping.')
- return signals
if normalize:
+ if signals.shape[0] == 1:
+ warnings.warn('Standardization of 3D signal has been requested but '
+ 'would lead to zero values. Skipping.')
+ return signals
+
if not detrend:
# remove mean if not already detrended
signals = signals - signals.mean(axis=0)
@@ -74,7 +73,7 @@ def _mean_of_squares(signals, n_batches=20):
but uses a lot less memory.
Parameters
- ==========
+ ----------
signals : numpy.ndarray, shape (n_samples, n_features)
signal whose mean of squares must be computed.
@@ -106,7 +105,7 @@ def _detrend(signals, inplace=False, type="linear", n_batches=10):
case and uses a lot less memory.
Parameters
- ==========
+ ----------
signals : numpy.ndarray
This parameter must be two-dimensional.
Signals to detrend. A signal is a column.
@@ -125,21 +124,20 @@ def _detrend(signals, inplace=False, type="linear", n_batches=10):
the value, the lower the memory consumption.
Returns
- =======
+ -------
detrended_signals: numpy.ndarray
Detrended signals. The shape is that of 'signals'.
Notes
- =====
+ -----
If a signal of lenght 1 is given, it is returned unchanged.
"""
- if not inplace:
- signals = signals.copy()
+ signals = as_float_array(signals, copy=not inplace)
if signals.shape[0] == 1:
warnings.warn('Detrending of 3D signal has been requested but '
- 'would lead to zero values. Skipping.')
+ 'would lead to zero values. Skipping.')
return signals
signals -= np.mean(signals, axis=0)
@@ -167,17 +165,32 @@ def _detrend(signals, inplace=False, type="linear", n_batches=10):
def _check_wn(btype, freq, nyq):
wn = freq / float(nyq)
- if wn > 1.:
- warnings.warn('The frequency specified for the %s pass filter is '
- 'too high to be handled by a digital filter (superior to '
- 'nyquist frequency). It has been lowered to %.2f (nyquist '
- 'frequency).' % (btype, nyq))
- wn = 1.
+ if wn >= 1.:
+ # results looked unstable when the critical frequencies are
+ # exactly at the Nyquist frequency. See issue at SciPy
+ # https://github.com/scipy/scipy/issues/6265. Before, SciPy 1.0.0 ("wn
+ # should be btw 0 and 1"). But, after ("0 < wn < 1"). Due to unstable
+ # results as pointed in the issue above. Hence, we forced the
+ # critical frequencies to be slightly less than 1. but not 1.
+ wn = 1 - 10 * np.finfo(1.).eps
+ warnings.warn(
+ 'The frequency specified for the %s pass filter is '
+ 'too high to be handled by a digital filter (superior to '
+ 'nyquist frequency). It has been lowered to %.2f (nyquist '
+ 'frequency).' % (btype, wn))
+
+ if wn < 0.0: # equal to 0.0 is okay
+ wn = np.finfo(1.).eps
+ warnings.warn(
+ 'The frequency specified for the %s pass filter is '
+ 'too low to be handled by a digital filter (must be non-negative).'
+ ' It has been set to eps: %.5e' % (btype, wn))
+
return wn
def butterworth(signals, sampling_rate, low_pass=None, high_pass=None,
- order=5, copy=False, save_memory=False):
+ order=5, copy=False):
""" Apply a low-pass, high-pass or band-pass Butterworth filter
Apply a filter to remove signal below the `low` frequency and above the
@@ -217,9 +230,9 @@ def butterworth(signals, sampling_rate, low_pass=None, high_pass=None,
"""
if low_pass is None and high_pass is None:
if copy:
- return signal.copy()
+ return signals.copy()
else:
- return signal
+ return signals
if low_pass is not None and high_pass is not None \
and high_pass >= low_pass:
@@ -245,75 +258,72 @@ def butterworth(signals, sampling_rate, low_pass=None, high_pass=None,
else:
critical_freq = critical_freq[0]
- b, a = signal.butter(order, critical_freq, btype=btype)
+ b, a = sp_signal.butter(order, critical_freq, btype=btype, output='ba')
if signals.ndim == 1:
# 1D case
- output = signal.filtfilt(b, a, signals)
+ output = sp_signal.filtfilt(b, a, signals)
if copy: # filtfilt does a copy in all cases.
signals = output
else:
signals[...] = output
else:
if copy:
- if (LooseVersion(scipy.__version__) < LooseVersion('0.10.0')):
- # filtfilt is 1D only in scipy 0.9.0
- signals = signals.copy()
- for timeseries in signals.T:
- timeseries[:] = signal.filtfilt(b, a, timeseries)
- else:
- # No way to save memory when a copy has been requested,
- # because filtfilt does out-of-place processing
- signals = signal.filtfilt(b, a, signals, axis=0)
+ # No way to save memory when a copy has been requested,
+ # because filtfilt does out-of-place processing
+ signals = sp_signal.filtfilt(b, a, signals, axis=0)
else:
# Lesser memory consumption, slower.
for timeseries in signals.T:
- timeseries[:] = signal.filtfilt(b, a, timeseries)
+ timeseries[:] = sp_signal.filtfilt(b, a, timeseries)
+
+ # results returned in-place
+
return signals
def high_variance_confounds(series, n_confounds=5, percentile=2.,
detrend=True):
""" Return confounds time series extracted from series with highest
- variance.
-
- Parameters
- ==========
- series: numpy.ndarray
- Timeseries. A timeseries is a column in the "series" array.
- shape (sample number, feature number)
-
- n_confounds: int, optional
- Number of confounds to return
-
- percentile: float, optional
- Highest-variance series percentile to keep before computing the
- singular value decomposition, 0. <= `percentile` <= 100.
- series.shape[0] * percentile / 100 must be greater than n_confounds
-
- detrend: bool, optional
- If True, detrend timeseries before processing.
-
- Returns
- =======
- v: numpy.ndarray
- highest variance confounds. Shape: (samples, n_confounds)
-
- Notes
- ======
- This method is related to what has been published in the literature
- as 'CompCor' (Behzadi NeuroImage 2007).
-
- The implemented algorithm does the following:
-
- - compute sum of squares for each time series (no mean removal)
- - keep a given percentile of series with highest variances (percentile)
- - compute an svd of the extracted series
- - return a given number (n_confounds) of series from the svd with
- highest singular values.
-
- See also
- ========
- nilearn.image.high_variance_confounds
+ variance.
+
+ Parameters
+ ----------
+ series: numpy.ndarray
+ Timeseries. A timeseries is a column in the "series" array.
+ shape (sample number, feature number)
+
+ n_confounds: int, optional
+ Number of confounds to return
+
+ percentile: float, optional
+ Highest-variance series percentile to keep before computing the
+ singular value decomposition, 0. <= `percentile` <= 100.
+ series.shape[0] * percentile / 100 must be greater than n_confounds
+
+ detrend: bool, optional
+ If True, detrend timeseries before processing.
+
+ Returns
+ -------
+ v: numpy.ndarray
+ highest variance confounds. Shape: (samples, n_confounds)
+
+ Notes
+ -----
+ This method is related to what has been published in the literature
+ as 'CompCor' (Behzadi NeuroImage 2007).
+
+ The implemented algorithm does the following:
+
+ - compute sum of squares for each time series (no mean removal)
+ - keep a given percentile of series with highest variances (percentile)
+ - compute an svd of the extracted series
+ - return a given number (n_confounds) of series from the svd with
+ highest singular values.
+
+ See also
+ --------
+ nilearn.image.high_variance_confounds
"""
if detrend:
@@ -345,76 +355,103 @@ def _ensure_float(data):
def clean(signals, sessions=None, detrend=True, standardize=True,
- confounds=None, low_pass=None, high_pass=None, t_r=2.5):
+ confounds=None, low_pass=None, high_pass=None, t_r=2.5,
+ ensure_finite=False):
"""Improve SNR on masked fMRI signals.
- This function can do several things on the input signals, in
- the following order:
- - detrend
- - standardize
- - remove confounds
- - low- and high-pass filter
+ This function can do several things on the input signals, in
+ the following order:
- Low-pass filtering improves specificity.
+ - detrend
+ - standardize
+ - remove confounds
+ - low- and high-pass filter
- High-pass filtering should be kept small, to keep some
- sensitivity.
+ Low-pass filtering improves specificity.
- Filtering is only meaningful on evenly-sampled signals.
+ High-pass filtering should be kept small, to keep some
+ sensitivity.
- Parameters
- ==========
- signals: numpy.ndarray
- Timeseries. Must have shape (instant number, features number).
- This array is not modified.
+ Filtering is only meaningful on evenly-sampled signals.
+
+ Parameters
+ ----------
+ signals: numpy.ndarray
+ Timeseries. Must have shape (instant number, features number).
+ This array is not modified.
sessions : numpy array, optional
Add a session level to the cleaning process. Each session will be
cleaned independently. Must be a 1D array of n_samples elements.
- confounds: numpy.ndarray, str or list of
- Confounds timeseries. Shape must be
- (instant number, confound number), or just (instant number,)
- The number of time instants in signals and confounds must be
- identical (i.e. signals.shape[0] == confounds.shape[0]).
- If a string is provided, it is assumed to be the name of a csv file
- containing signals as columns, with an optional one-line header.
- If a list is provided, all confounds are removed from the input
- signal, as if all were in the same array.
-
- t_r: float
- Repetition time, in second (sampling period).
-
- low_pass, high_pass: float
- Respectively low and high cutoff frequencies, in Hertz.
-
- detrend: bool
- If detrending should be applied on timeseries (before
- confound removal)
-
- standardize: bool
- If True, returned signals are set to unit variance.
-
- Returns
- =======
- cleaned_signals: numpy.ndarray
- Input signals, cleaned. Same shape as `signals`.
-
- Notes
- =====
- Confounds removal is based on a projection on the orthogonal
- of the signal space. See `Friston, K. J., A. P. Holmes,
- K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak.
- "Statistical Parametric Maps in Functional Imaging: A General
- Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210.
- `_
+ confounds: numpy.ndarray, str or list of
+ Confounds timeseries. Shape must be
+ (instant number, confound number), or just (instant number,)
+ The number of time instants in signals and confounds must be
+ identical (i.e. signals.shape[0] == confounds.shape[0]).
+ If a string is provided, it is assumed to be the name of a csv file
+ containing signals as columns, with an optional one-line header.
+ If a list is provided, all confounds are removed from the input
+ signal, as if all were in the same array.
+
+ t_r: float
+ Repetition time, in second (sampling period).
+
+ low_pass, high_pass: float
+ Respectively low and high cutoff frequencies, in Hertz.
+
+ detrend: bool
+ If detrending should be applied on timeseries (before
+ confound removal)
+
+ standardize: bool
+ If True, returned signals are set to unit variance.
+
+ ensure_finite: bool
+ If True, the non-finite values (NANs and infs) found in the data
+ will be replaced by zeros.
+
+ Returns
+ -------
+ cleaned_signals: numpy.ndarray
+ Input signals, cleaned. Same shape as `signals`.
+
+ Notes
+ -----
+ Confounds removal is based on a projection on the orthogonal
+ of the signal space. See `Friston, K. J., A. P. Holmes,
+ K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak.
+ "Statistical Parametric Maps in Functional Imaging: A General
+ Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210.
+ `_
+
+ See Also
+ --------
+ nilearn.image.clean_img
"""
+ if isinstance(low_pass, bool):
+ raise TypeError("low pass must be float or None but you provided "
+ "low_pass='{0}'".format(low_pass))
+ if isinstance(high_pass, bool):
+ raise TypeError("high pass must be float or None but you provided "
+ "high_pass='{0}'".format(high_pass))
+
if not isinstance(confounds,
(list, tuple, _basestring, np.ndarray, type(None))):
raise TypeError("confounds keyword has an unhandled type: %s"
% confounds.__class__)
-
+
+ if not isinstance(ensure_finite, bool):
+ raise ValueError("'ensure_finite' must be boolean type True or False "
+ "but you provided ensure_finite={0}".format(ensure_finite))
+
+ if not isinstance(signals, np.ndarray):
+ signals = as_ndarray(signals)
+
+ if ensure_finite:
+ signals[np.logical_not(np.isfinite(signals))] = 0
+
# Read confounds
if confounds is not None:
if not isinstance(confounds, (list, tuple)):
@@ -465,7 +502,7 @@ def clean(signals, sessions=None, detrend=True, standardize=True,
clean(signals[sessions == s],
detrend=detrend, standardize=standardize,
confounds=session_confounds, low_pass=low_pass,
- high_pass=high_pass, t_r=2.5)
+ high_pass=high_pass, t_r=t_r)
# detrend
signals = _ensure_float(signals)
@@ -474,25 +511,26 @@ def clean(signals, sessions=None, detrend=True, standardize=True,
# Remove confounds
if confounds is not None:
confounds = _ensure_float(confounds)
- confounds = _standardize(confounds, normalize=True, detrend=detrend)
-
- if (LooseVersion(scipy.__version__) > LooseVersion('0.9.0')):
- # Pivoting in qr decomposition was added in scipy 0.10
- Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True)
- Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float).eps * 100.]
- signals -= Q.dot(Q.T).dot(signals)
- else:
- Q, R = linalg.qr(confounds, mode='economic')
- non_null_diag = np.abs(np.diag(R)) > np.finfo(np.float).eps * 100.
- if np.all(non_null_diag):
- signals -= Q.dot(Q.T).dot(signals)
- elif np.any(non_null_diag):
- R = R[:, non_null_diag]
- confounds = confounds[:, non_null_diag]
- inv = scipy.linalg.inv(np.dot(R.T, R))
- signals -= confounds.dot(inv).dot(confounds.T).dot(signals)
+ confounds = _standardize(confounds, normalize=standardize,
+ detrend=detrend)
+ if not standardize:
+ # Improve numerical stability by controlling the range of
+ # confounds. We don't rely on _standardize as it removes any
+ # constant contribution to confounds.
+ confound_max = np.max(np.abs(confounds), axis=0)
+ confound_max[confound_max == 0] = 1
+ confounds /= confound_max
+
+ # Pivoting in qr decomposition was added in scipy 0.10
+ Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True)
+ Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float).eps * 100.]
+ signals -= Q.dot(Q.T).dot(signals)
if low_pass is not None or high_pass is not None:
+ if t_r is None:
+ raise ValueError("Repetition time (t_r) must be specified for "
+ "filtering")
+
signals = butterworth(signals, sampling_rate=1. / t_r,
low_pass=low_pass, high_pass=high_pass)
@@ -501,3 +539,5 @@ def clean(signals, sessions=None, detrend=True, standardize=True,
signals *= np.sqrt(signals.shape[0]) # for unit variance
return signals
+
+
diff --git a/nilearn/surface/__init__.py b/nilearn/surface/__init__.py
new file mode 100644
index 0000000000..5d0f188ddb
--- /dev/null
+++ b/nilearn/surface/__init__.py
@@ -0,0 +1,9 @@
+"""
+Functions for surface manipulation.
+"""
+
+from .surface import (vol_to_surf, load_surf_data,
+ load_surf_mesh, check_mesh_and_data)
+
+__all__ = ['vol_to_surf', 'load_surf_data', 'load_surf_mesh',
+ 'check_mesh_and_data']
diff --git a/nilearn/surface/data/README.txt b/nilearn/surface/data/README.txt
new file mode 100644
index 0000000000..28fc1ea0f2
--- /dev/null
+++ b/nilearn/surface/data/README.txt
@@ -0,0 +1,18 @@
+Each file named ball_cloud_n_samples.csv contains the 3D coordinates of n points
+evenly spaced in the unit ball. They have been precomputed and stored to save
+time when using the 'ball' sampling in nilearn.surface.vol_to_surf.
+They can be re-created like this:
+
+import numpy as np
+from nilearn import surface
+
+for n in [10, 20, 40, 80, 160]:
+ ball_cloud = surface._uniform_ball_cloud(n_points=n)
+ np.savetxt('./ball_cloud_{}_samples.csv'.format(n), ball_cloud)
+
+test_load_uniform_ball_cloud in nilearn/surface/tests/test_surface.py compares
+these loaded values and freshly computed ones.
+
+These values were computed with version 0.2 of scikit-learn, so positions
+computed with scikit-learn < 0.18 would be different (but just as good for our
+purposes), because the k-means implementation changed in 0.18.
diff --git a/nilearn/surface/data/__init__.py b/nilearn/surface/data/__init__.py
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/nilearn/surface/data/__init__.py
@@ -0,0 +1 @@
+
diff --git a/nilearn/surface/data/ball_cloud_10_samples.csv b/nilearn/surface/data/ball_cloud_10_samples.csv
new file mode 100644
index 0000000000..6f6db4b565
--- /dev/null
+++ b/nilearn/surface/data/ball_cloud_10_samples.csv
@@ -0,0 +1,10 @@
+4.430692879471407331e-01 -6.727915282170289502e-02 5.217389116286745843e-01
+-5.317840981053942873e-01 -4.068448137668991826e-01 -8.569400476605214950e-03
+-4.922159923177735208e-01 4.405315020917301427e-01 1.164216441727377732e-01
+1.484607430814552376e-01 4.755451354444120149e-01 -4.550029691377610597e-01
+-2.694911402331963690e-01 -5.344932789330818917e-02 6.175229456732103106e-01
+6.490070953481569260e-01 -8.013888661041402434e-03 -1.770466541122445447e-01
+-4.396591656457559116e-01 8.092645769942548306e-02 -5.102020795701791567e-01
+1.544817889619344853e-01 -6.305953266357835485e-01 1.690989055075702852e-01
+2.195223141814307000e-01 5.678294290151263413e-01 2.729949839370077735e-01
+1.045641017925853095e-01 -3.893522723613875858e-01 -5.458543865437600617e-01
diff --git a/nilearn/surface/data/ball_cloud_160_samples.csv b/nilearn/surface/data/ball_cloud_160_samples.csv
new file mode 100644
index 0000000000..e387ac757a
--- /dev/null
+++ b/nilearn/surface/data/ball_cloud_160_samples.csv
@@ -0,0 +1,160 @@
+2.468214947373665313e-01 -2.792850579444242642e-01 7.644226031554264278e-01
+1.648811851375835691e-01 3.616226868928677507e-01 -5.157645439591338477e-01
+-6.421752485071410277e-01 -5.964630001035132745e-01 3.684182500200331584e-02
+3.098133088463730922e-01 -1.556702688221980602e-01 -1.868509332088056918e-01
+-6.694078139715229359e-01 2.877337087850644348e-01 5.112262704678833813e-01
+-2.982795964828102719e-01 6.995748968182190497e-01 1.679292639609455129e-01
+4.683911941561200720e-01 6.401872506633516924e-01 3.667912859733313269e-01
+-1.123444834661871672e-01 -1.625558343775632408e-01 -5.155715331026244908e-01
+4.351652218844926923e-01 -4.158680235419797144e-01 2.727055478028755919e-01
+-7.558753510293154676e-01 1.132205313861586876e-01 -4.222422687716799738e-01
+-1.870348481730470802e-01 -2.183136165729039069e-01 2.960101250829366010e-02
+8.464072766139388859e-01 2.637261705243060597e-02 -1.255928883824821152e-01
+8.094501200785853001e-02 -6.138955375785578594e-01 -1.384977215929950600e-01
+8.690841769531902627e-02 2.833922715363133560e-01 3.124778677202241228e-01
+1.500123143536800235e-02 -4.526430803330960817e-01 4.411353627408791667e-01
+-4.571504526262001877e-01 -3.649385158845419030e-01 6.145270027899357723e-01
+3.394919669151344466e-01 -5.474642580767447031e-01 -5.765913672740774487e-01
+9.140571202348847513e-02 6.262937330369138245e-01 -1.812513774153057255e-01
+3.470036166199403915e-01 4.257384055243893517e-01 -1.299675661180787756e-01
+-2.976965219792702122e-01 -7.071349012631434894e-01 -3.881158582899634224e-01
+3.390489656277657082e-01 -4.829266990248882313e-02 -7.761601132808267911e-01
+-2.309375199904392106e-01 1.449483491921083889e-01 -8.938795355593670555e-02
+-1.837116450402472567e-01 2.600023477836322927e-01 8.321252683298318020e-01
+8.782463555499016028e-01 -1.104622580592412584e-01 1.060906436299221822e-01
+-4.182090770253469869e-01 6.202743060412704290e-01 -4.515260298536341588e-01
+-8.282988768244941014e-01 -8.840818471237661680e-02 -1.887080043480907632e-01
+-3.923004183297359448e-01 -3.250216882983871652e-02 4.812198031278616117e-01
+-2.552972289204707632e-02 3.645535496631692740e-01 -7.923197490243029106e-01
+-3.385209106958740333e-01 2.771121326049834355e-03 -4.628226600308524841e-01
+-5.663559401608060373e-01 6.180701265410207812e-01 2.274603204069024498e-01
+1.320819709094266381e-01 3.475857019364561595e-01 7.473864284767448574e-01
+7.985108336016614183e-01 -2.806506730934799076e-01 -1.745671254230875269e-01
+-1.439795810027073297e-01 -8.507212452391853530e-01 1.222092760833896896e-01
+-5.754891553434456943e-03 5.000614029166573771e-02 5.265861894786806607e-02
+5.879752914091816995e-01 8.209667474469410642e-02 6.525024140559799735e-01
+-5.079588816660038919e-01 9.323514279738794042e-02 -1.878042190797122590e-01
+4.673456034191262320e-01 5.584000892320106768e-01 -4.139346881877839501e-01
+3.652636862051302780e-01 -7.334859868623129220e-01 -3.365223201393693353e-01
+-5.135220924039070045e-01 6.339468484335901433e-02 -7.129873108632988066e-01
+1.239169968349617679e-01 -2.665713263768796151e-02 3.741128835641054362e-01
+5.904857231460559630e-01 1.513857747295261624e-01 -1.604818143804731534e-01
+-1.453386733208530679e-01 5.469290016433041579e-01 6.823073676037709756e-01
+-7.864711593445462556e-01 7.435003039976385841e-02 3.240844473582713547e-01
+5.339281525574167409e-02 -5.998355599661194804e-01 -6.431867619248139434e-01
+-2.918349165125022071e-01 -7.130525109615926116e-01 3.765915017990039138e-01
+-2.365124063189308623e-01 -5.094205725279126185e-01 -1.047602328789979942e-01
+-8.065439155000714289e-02 -8.504013525754576808e-01 -1.347623312809653473e-01
+1.927454944422858363e-01 8.670324814366928701e-01 -2.679127063901753925e-02
+-5.302080641343123180e-01 -1.935259306395876178e-01 -2.544859069173710520e-01
+6.735400215247770994e-01 3.534035763667250363e-01 -4.466532921578159065e-01
+-4.172125099047981345e-01 1.798382134076172423e-01 7.135167829844780707e-01
+-1.821432898043334747e-01 3.686276732025608460e-01 4.842679521162811529e-01
+7.606009482740337668e-02 2.790486207747422745e-02 -2.676882966778288409e-01
+8.082843982156702189e-01 2.165481178233469584e-01 2.022511250407308669e-01
+-3.138271296202648775e-01 4.112349304752613288e-01 -6.769540992745032293e-01
+-4.150293320715858214e-01 3.816598222173525068e-01 -1.228577213586428026e-02
+4.999977575998048041e-01 2.238158781221078197e-01 1.443146361648925402e-01
+7.003313251553200391e-01 -4.569438280391905916e-01 4.773927573237642975e-03
+-5.458361115861972701e-01 -6.202114867903514295e-01 -2.261447670030410362e-01
+-6.366842590768960752e-01 5.688579850534767335e-01 -8.651551879613575413e-02
+4.711508966071004112e-02 6.296005486882787494e-01 1.823678017198123447e-01
+5.954192395922288661e-01 -5.707751775556215712e-01 3.282951895750269022e-01
+5.853351856229285222e-01 -1.335266614750007796e-01 5.079644102743441358e-02
+4.303038216863468435e-01 7.048545171086511019e-01 1.009260704304623668e-01
+5.920889602706936455e-02 -4.205022037679206903e-02 -8.631068965031709617e-01
+6.407380600152227101e-01 -2.109159490119343339e-01 4.763987887343109073e-01
+-7.572624200813509177e-01 3.719318448297973534e-01 1.751053559571232321e-01
+-7.018952599924107583e-01 -1.699557048188446995e-01 -4.799025683573010514e-01
+-5.734289712534911265e-01 -5.191406976304878684e-01 3.738262444740965695e-01
+2.196210522411545241e-01 2.331889682521952056e-01 -8.104709363207512895e-01
+2.959040228839492914e-01 -7.047044723642490460e-01 4.214810425205403610e-01
+5.309929197105632548e-02 3.283756162280757063e-01 -2.622555845213042081e-01
+-4.759436734683606396e-01 4.510983451346927131e-01 5.625293137069560245e-01
+-3.938106019927344881e-01 7.745299428052411228e-01 -1.028226922691525408e-01
+3.681347873318818742e-01 1.923060124837356055e-01 -3.553281716070366691e-01
+6.661319480525484948e-01 5.183720771078287326e-01 -1.428865712852173397e-01
+-5.213726141515270113e-02 -4.670760441316204048e-01 -4.402021306443001936e-01
+-8.200545534911118928e-02 8.328992336602568658e-01 -2.921045212511030886e-01
+-1.064890498681915626e-01 -4.223117349851726599e-01 7.642105013142310721e-01
+-6.302367537463960945e-01 -7.065814578095064302e-02 6.220258258883528679e-01
+6.100718842659123009e-01 3.396451626919621547e-01 4.721615099088823375e-01
+-8.612976828896171111e-02 6.290874956670777784e-01 -5.500073295049022759e-01
+-7.520388753130112514e-02 1.041413252002766354e-01 6.053251042949002736e-01
+2.369495338128007472e-02 -7.768555338421141432e-01 -4.002287767206850200e-01
+-6.807830147234464890e-01 4.051230109024499537e-01 -3.079060051500838724e-01
+8.581181584346736155e-03 -2.758755905162316724e-01 2.380411698997120795e-01
+1.066898388038960421e-01 -3.530861112851200745e-01 -7.728786344258047336e-01
+2.083728781094499327e-01 -8.406799658155466037e-01 -6.641630077411513067e-02
+-1.231967450261398922e-01 7.635102777258215845e-01 4.340332671140461618e-01
+-7.155301965960304544e-01 -2.065582778135153508e-01 3.549473996991046598e-01
+2.789336473770727409e-02 -2.980371267213346442e-01 -2.155511486601477955e-01
+-4.916444551490664616e-02 4.220976957082471226e-01 -9.814851625390088111e-03
+4.569326945368793469e-01 7.467668508769826907e-01 -1.510203195542751953e-01
+-1.373696474512782831e-02 7.693146030118874767e-02 8.824340335625189269e-01
+7.650763120111574445e-01 -3.234136723303453476e-01 2.669356473783646155e-01
+2.530924862189743996e-01 -1.749692729758766940e-01 1.062718806406038052e-01
+-5.035671050201951937e-01 -1.644007248577817118e-02 1.282983186083589411e-01
+7.211446017328163594e-01 -2.099486395562358554e-01 -4.565077390634975085e-01
+-7.983243128399025723e-01 2.208270092300380905e-01 -5.931041737608694353e-02
+4.593375041407231629e-01 -7.275080434543551577e-01 1.179482174670776345e-01
+-4.638289200512769050e-01 -2.384799710571752218e-01 -6.536041397015064369e-01
+3.669819618625125845e-01 4.969104497408465271e-01 5.359542848495167933e-01
+5.970786273129006227e-01 -4.680534710767403350e-01 -3.637091929637646937e-01
+4.067841164328352410e-01 2.782184703563588823e-01 7.546115963061841292e-01
+5.457758845684220761e-01 -6.647288636440732423e-01 -1.288193837764944061e-01
+5.849061595725044693e-01 7.800334371480646505e-03 -6.073912669715755275e-01
+2.881343154260685435e-01 4.478068849994625844e-01 1.740563450003982937e-01
+-5.066166631680895094e-01 -2.880226428457104970e-01 5.852096467941169528e-03
+-2.522682222299320043e-01 -4.985327174441900322e-01 -6.744249559398650673e-01
+-1.269400041067952900e-01 2.353700846748265352e-01 -4.740730498462337739e-01
+1.309874966533219609e-01 5.858628592998125217e-01 5.640061020832705108e-01
+4.568535145338776804e-01 2.737315484773085728e-01 -6.711626318151925341e-01
+-3.506755032090303481e-01 -3.298709593519617012e-01 2.811358057228456331e-01
+-2.219420818347545288e-01 5.638658666419928966e-01 -2.133276985894274336e-01
+4.530931617539931122e-01 2.845663153934350001e-02 3.248328399937018829e-01
+2.577969803053107856e-01 3.077749059797242268e-02 7.815861807769756142e-01
+2.346699780589680817e-01 -2.421109904959737880e-01 -5.081070469445037041e-01
+-7.843726188677656308e-02 -1.794222739461515392e-01 5.423577066293749915e-01
+3.122410559116524098e-01 -2.426760449035975375e-01 4.284037251295017557e-01
+-3.484635442783707293e-01 6.133596453606899868e-01 4.143046923453875263e-01
+-2.529399500694943792e-01 -5.638243059815465363e-01 5.735409548714441863e-01
+1.120829745752853907e-01 5.120340505748527393e-02 -5.809703689704703855e-01
+-3.622550481418110868e-01 -8.149773266064591626e-01 -1.080471581753022214e-01
+-1.853408062026692393e-01 3.708998219166448340e-01 2.182880768325157206e-01
+-2.257001725852141039e-01 -9.309706670487845337e-02 -1.996596342827560433e-01
+-2.008584060224647416e-01 -2.075040301992876479e-01 -8.134693585302662511e-01
+1.649298085988213924e-01 7.821552396427334797e-01 3.511606273600750838e-01
+-5.489709082748043256e-01 -4.823561396678392454e-01 -4.940722557558900330e-01
+2.030012727824936636e-01 7.176283421105453808e-01 -4.279890122616394899e-01
+-7.669562805377801995e-01 -3.983170381191364906e-01 -2.104138197570341873e-01
+8.061134875002256006e-01 3.099455397398410694e-01 -6.618655562457691210e-02
+2.265538342430305566e-04 -7.538362707287225017e-01 4.498426707603629393e-01
+8.931433401460766447e-03 -1.878959994130762756e-01 8.723841776857562680e-01
+-4.199158990083718956e-01 -6.770122868209182965e-01 1.233473639343237588e-01
+2.694418010592212198e-01 1.249046905753480041e-01 1.310531483759063559e-02
+7.798778964064685404e-01 4.207426885888206763e-02 3.850585469343850153e-01
+-3.763845387380292395e-01 3.422740604127513797e-01 -3.598968981176510762e-01
+-8.599518475701178888e-02 8.499259403005439140e-01 6.082481889346037091e-02
+-2.204791281044143347e-01 1.062849323126351109e-01 -7.915744581551200643e-01
+-5.742473846094791057e-01 2.905248214049359623e-01 -5.603509224412124290e-01
+4.603913045119732983e-01 -1.451431000335821619e-01 6.954632396115836279e-01
+2.745124083136074589e-01 -4.816456989393068100e-01 -3.197265423036383947e-01
+4.563465192521649616e-01 -4.524959147008916216e-01 6.069929997245064879e-01
+1.500693245523793962e-01 -8.308148608219538822e-01 2.128737479430521662e-01
+-5.012038604949585752e-01 2.544265105200320676e-01 2.646634101322346000e-01
+2.034365116603361334e-01 -4.877363248568518661e-01 1.363474764350168233e-01
+1.644494555455654206e-01 -5.392335034104438485e-01 6.575922634376994980e-01
+-1.965526298587466081e-01 4.541537984281525386e-02 3.063549809837030069e-01
+5.223936277221787883e-01 -1.085479334645074179e-01 -3.185624259239997480e-01
+-7.935792144860089881e-01 -3.450292305076666510e-01 1.269266633384188436e-01
+-3.294802560459185736e-01 -3.896032553367997919e-01 -3.415786905955628949e-01
+6.766295192641297307e-01 5.044547361238745919e-01 1.839948362535360205e-01
+2.835588170606819780e-01 1.844072938727307209e-01 5.034443100482813849e-01
+2.712698599400479860e-01 5.530819865924837542e-01 -6.701390881303301850e-01
+3.912762540896945196e-01 -4.347992197582773999e-01 -5.949908193635398967e-02
+7.855949526701798691e-01 1.308624669259461082e-01 -3.959208239067619761e-01
+-9.250385008389788721e-02 -5.494664516439904389e-01 1.264457922344831287e-01
+4.551698116500383362e-01 -3.086125409484862248e-01 -6.794461883909840294e-01
+-3.192949948158811946e-01 -1.058942542901264022e-01 7.903204360151183661e-01
+-8.612147447600198014e-01 -5.786066842228993617e-02 7.198467807550619491e-02
diff --git a/nilearn/surface/data/ball_cloud_20_samples.csv b/nilearn/surface/data/ball_cloud_20_samples.csv
new file mode 100644
index 0000000000..92bc638f9c
--- /dev/null
+++ b/nilearn/surface/data/ball_cloud_20_samples.csv
@@ -0,0 +1,20 @@
+5.842216548676750776e-01 4.376164784861447665e-01 1.663757942055911487e-02
+-2.194847686244247587e-01 -7.109372386034291669e-01 -6.537449345099249909e-02
+-4.471528844891878296e-02 7.064973136116384778e-01 -1.786583114313875786e-01
+-6.707160253685089391e-01 -2.367542857005384138e-01 -1.792212957579401300e-01
+2.178549901595938643e-01 -2.785285000717222514e-01 -6.560721021198115954e-01
+6.871573796912544552e-01 -2.228533963342110569e-01 1.723513240770535859e-01
+2.080943661175675696e-01 -5.279849681932176075e-01 4.646502878400943226e-01
+-3.486157656624189194e-01 5.515283507105400718e-01 3.393222567555757574e-01
+-1.483797620716929000e-01 2.554959811707789227e-02 7.213829985668764877e-01
+6.399999939748245437e-01 -4.124943553074976454e-02 -3.554664972590649286e-01
+3.473983519451797619e-01 -6.311387933802345973e-01 -1.511337279634965280e-01
+2.851339828515243591e-01 3.933725239953038133e-01 -5.586017146778773368e-01
+-2.767360871741600947e-01 1.978903233653403482e-01 -6.509497378458691808e-01
+1.926190211704723876e-01 5.646672295229083760e-01 4.209662240925434173e-01
+-2.788672784884560496e-01 -4.035829215738325204e-01 -5.581754700210062037e-01
+-3.954094300072152901e-01 -4.734944802989128343e-01 4.183409557265382328e-01
+4.370919278787561701e-01 6.281309763243778099e-02 6.011114535121920843e-01
+-5.973605410158238094e-01 3.709749576328147391e-01 -1.957838205252134534e-01
+-3.346005309885992721e-03 -2.204224341652283314e-02 1.993257851754927368e-02
+-6.589176106946886824e-01 5.817647547833440513e-02 3.533542909878894522e-01
diff --git a/nilearn/surface/data/ball_cloud_40_samples.csv b/nilearn/surface/data/ball_cloud_40_samples.csv
new file mode 100644
index 0000000000..a9c7d0fb95
--- /dev/null
+++ b/nilearn/surface/data/ball_cloud_40_samples.csv
@@ -0,0 +1,40 @@
+-4.355925735060561799e-01 4.744387185092658332e-01 4.865608526234390663e-01
+-3.652317993224113435e-02 -7.908381270745744596e-01 -1.251777185204012988e-01
+2.333397505291869289e-01 2.802253440898189174e-01 3.600474979927715397e-02
+2.111812351590668796e-02 2.961613030236745492e-01 -7.508650501371789687e-01
+-4.766488848393080069e-01 -8.479208598375591321e-02 5.993662032710967980e-01
+3.141316285062427660e-01 -1.678132075039663051e-01 -9.064069879073227698e-02
+7.298781240946433657e-01 -2.630212394709173784e-01 -2.286430079318703401e-01
+-4.656458831270897381e-02 6.886613737175500960e-01 -3.245004469456924112e-01
+-4.441568138983346126e-01 -5.876342599005159734e-02 -6.461578053536478627e-01
+-3.348592051245692369e-01 -5.572140611078181260e-01 4.536692150796629575e-01
+7.163492809829055119e-01 2.935295978963431973e-01 -1.738748070031403969e-01
+-1.138927122007272058e-01 1.460799819052024107e-01 7.919312916851095618e-01
+-7.492691258603926263e-01 1.611454568055841752e-01 -2.534102745009135238e-01
+5.290834163851196870e-01 5.068273815856546571e-01 3.226095807699721485e-01
+3.575754697470022170e-01 7.257307657568657921e-01 -3.857038869544984760e-02
+-3.104237107461114697e-01 1.222528056235336058e-01 -7.513375731056939899e-02
+6.137718777492528338e-02 -1.609172754731106880e-01 3.350016021106668185e-01
+3.843526282477446321e-01 1.844676363336974767e-01 6.505544125182312065e-01
+3.795275138004317594e-01 4.867028817977501198e-01 -4.815570704861783513e-01
+5.871613686594342463e-01 -4.527320145979236155e-01 2.273577737549672129e-01
+1.492828191477571786e-01 -7.033946805226763965e-01 3.498588808145969287e-01
+2.427625040176864882e-02 -1.840277657726235583e-01 -7.857292897854342151e-01
+-6.528696478949383986e-01 -3.161253853198419117e-01 -2.924521439930459765e-01
+7.392272378701251201e-01 4.257102296353597326e-02 2.476561477376221787e-01
+-2.381793617014762721e-01 -5.232800307892668812e-01 -5.280100914603548246e-01
+-4.024240850485799048e-01 4.152504770568481995e-01 -5.301851804296517923e-01
+4.631048310055247974e-01 -2.513511941777509251e-01 5.941151204256300833e-01
+2.830704807156398939e-01 -4.886571409792251353e-01 -5.411117627281345488e-01
+-7.190884146531447696e-01 2.031575934922867088e-01 2.468785592176648280e-01
+-1.520360951548836714e-01 -3.353976849406489769e-01 -2.215707978639088610e-02
+5.118089854881623380e-01 1.808855737854670256e-02 -5.920409525060285061e-01
+4.048638420577529984e-02 -3.278125647232136841e-01 7.507151173577555348e-01
+3.959345839124422950e-01 -6.653021545747872523e-01 -1.190012904514499564e-01
+-5.132697037326207079e-01 6.009270879951913447e-01 -4.781001945560951760e-02
+-1.117043936062098869e-02 3.505178394610215797e-02 -3.709295515413487121e-01
+-1.184643048675626253e-01 7.519029958046901108e-01 1.558972399018141719e-01
+-4.647612097508849738e-01 -6.727320587892747783e-01 -3.159514863799117029e-02
+-7.038654999077433860e-01 -2.507108609297006296e-01 1.780364813053967044e-01
+-1.159153922062834408e-01 2.400759284123138715e-01 3.067756561560779915e-01
+8.885437884847594003e-02 5.971260827465554311e-01 5.340625055704769641e-01
diff --git a/nilearn/surface/data/ball_cloud_80_samples.csv b/nilearn/surface/data/ball_cloud_80_samples.csv
new file mode 100644
index 0000000000..5d74eae9f2
--- /dev/null
+++ b/nilearn/surface/data/ball_cloud_80_samples.csv
@@ -0,0 +1,80 @@
+3.262555142760086846e-01 -3.854636161803240979e-01 -6.405913920868614442e-01
+-3.332508030308519165e-01 2.397322704687279982e-01 -1.759194149338177415e-01
+3.946205752335784078e-01 4.469596871557266621e-02 2.475783731288624945e-01
+-2.698262853500476011e-01 -6.702922019782792118e-01 3.917143756213053440e-01
+-3.772515586198929016e-01 -5.154965029400488952e-01 -4.739218571501211952e-01
+6.147098388511126332e-01 4.914510555614984288e-01 2.188045787455347724e-01
+-5.575197499412268970e-02 1.483376456697227397e-01 8.213707485711944090e-01
+1.972455582767498050e-01 7.321495652968792456e-01 -3.000696732828219138e-01
+-7.512186315618207821e-02 7.479681693352847605e-01 3.802239381509175398e-01
+-8.228920647326849469e-01 1.044704429997933975e-01 1.545615843076205698e-01
+-6.038276170274072818e-01 3.287790237275440286e-01 4.627635126178621938e-01
+4.922148499181822623e-01 -5.486158829283857008e-01 3.201847413101034934e-01
+-2.404020158017483866e-01 9.860112333542396523e-02 -7.698187867920961436e-01
+6.425883456152909137e-03 -4.747621878385193117e-01 1.126924368580077412e-01
+7.821090662179631670e-01 1.673272549263735975e-01 2.219534981876404023e-01
+5.231871762794176178e-02 8.163108991953083038e-01 1.172786463441832620e-02
+3.302703311502487304e-01 1.550291269531196559e-01 7.438113705479559856e-01
+3.812002873204423103e-01 2.934803589349491704e-01 -6.569345041348834124e-01
+-3.435171503525978332e-01 -8.279828523635693327e-02 -3.306119449185566106e-01
+-5.043181294925269675e-01 -4.306633896467955136e-01 4.476122856102467407e-01
+-5.183106244606362489e-01 5.841391886205067330e-01 -2.169003386990010218e-01
+6.575267066155660034e-01 -4.954813310478879407e-01 -4.223476793674293267e-02
+7.329406638685140862e-01 -2.515797467053866310e-01 2.906347910343493046e-01
+-1.744079311377981223e-01 3.663860101700270322e-01 1.894222054519310861e-01
+-6.805253528342450897e-01 -4.521471349707480436e-01 -1.952663038227593351e-01
+-5.773067250476234813e-01 1.590944012837150201e-01 -5.940514691077994902e-01
+-4.455140404174229990e-02 -3.324744962058523989e-01 -7.380776950989580421e-01
+2.592616770890403266e-02 8.841013134651969085e-02 -4.560627480591995520e-01
+3.213150846548978867e-01 -7.644932653869754846e-01 3.403045280897368935e-02
+2.291700544105458853e-03 -3.494251037871047849e-01 -2.843657153813490823e-01
+1.233803162885783178e-01 5.168491365870080401e-01 6.358608177699590636e-01
+2.517025147534251439e-01 -5.162970264425378319e-01 6.292509173747389273e-01
+-2.318209167164397566e-01 -3.026995005771295974e-02 4.078120467475971345e-01
+4.461373732622516686e-01 7.177293120218344313e-01 -4.556990930868482642e-02
+6.476714138224308748e-01 6.455329883597886453e-02 5.562676380144352395e-01
+-1.860326782314073585e-03 4.137848616614330788e-01 -2.304991323652210156e-01
+8.021401343655394367e-01 -7.084166892440788010e-02 -5.894136190358661986e-02
+7.349419441573475098e-02 -2.276451363781770987e-01 3.675758106983507689e-01
+-2.975633001797062249e-01 7.517144175645111837e-01 1.263782341998433492e-02
+-1.654806314595003991e-01 7.354338964201146966e-01 -3.396881656072840983e-01
+-1.120639096348921143e-01 -8.344382423010976435e-01 6.577606706062909259e-02
+-3.637489639080681014e-01 -1.767479509276985439e-01 7.311727593762991084e-01
+-4.021033756576388174e-01 1.529386000292534964e-01 7.130353977625731865e-01
+3.415615651866612534e-01 -3.007930900605256253e-01 4.073969792121461680e-02
+1.502188553525203585e-02 3.235836151696099838e-01 -7.877870216196302122e-01
+-3.345006802541954372e-01 4.558299016383890456e-01 -5.755683485402420807e-01
+1.203756082224029592e-02 2.627887156059839982e-02 -1.542444189930148972e-02
+-4.663240545186318475e-01 2.929327299897975870e-02 1.169613316765236999e-01
+4.473564721638071395e-01 -2.258065583148393396e-01 6.109621127614093261e-01
+-6.768460698420405430e-01 4.315266410020058840e-01 7.725832435508381280e-02
+-7.568922787390219842e-01 -3.119072325319900418e-01 1.778584871590067895e-01
+-4.461819946533983061e-01 6.252691311717589739e-01 2.881396758171432260e-01
+5.057441613625023313e-01 5.273956235309013030e-01 -3.929472433882583715e-01
+3.181508175870436661e-01 6.899555095762603951e-01 3.339656790967062139e-01
+4.788324441630524841e-01 4.041751499797106018e-01 5.249591625440308551e-01
+-7.880995627272365800e-01 -9.689671450119406726e-02 -1.727745844121635899e-01
+6.524771249266615725e-02 -7.667568209801455259e-01 -2.303001221313602798e-01
+6.863938253770456299e-01 -3.016940030855146393e-01 -3.609839592462449342e-01
+5.382811868381239817e-01 -4.656238114393324717e-02 -6.279592111782595909e-01
+-6.337394409017619301e-01 -2.042583410740495542e-01 -5.177854902903320511e-01
+1.423836081915543991e-02 -6.227240795530529782e-01 -5.727832240846502110e-01
+-4.920209959256136023e-01 -6.609401457507861899e-01 7.994743993583935948e-02
+-7.419867464825107284e-01 2.566204459980288011e-01 -2.680642576349015482e-01
+4.040763122972179655e-01 2.104976972928298828e-01 -1.649815270206576034e-01
+-3.202796101404180029e-01 -7.329760198396092497e-01 -2.272786033767917324e-01
+-2.600236201539403158e-01 4.611380693255858199e-01 5.907607132512578962e-01
+3.403009680656256530e-01 -1.308499373870331828e-01 -2.817626826700376785e-01
+9.911633677629802519e-02 -1.983687757144718955e-01 8.099661750503094337e-01
+9.931747735602791671e-02 1.874480194406295819e-01 4.172954251140651438e-01
+7.377533848158671637e-01 4.063241974571336068e-01 -1.021876308804053474e-01
+1.604103558010863684e-01 -4.396206357012594906e-02 -7.997339754708899795e-01
+2.174087251782576280e-01 4.273786499174761500e-01 1.036442893327847309e-01
+-3.419068379666388435e-01 -2.570568273197633147e-01 -7.386581700006532580e-01
+-6.716836479592275255e-01 -5.198136093900693894e-02 4.843655667794159458e-01
+-1.478851135845805076e-01 -4.783618056577351174e-01 6.528436707760401081e-01
+1.182268438677049893e-01 5.938126576606220786e-01 -5.849609632701525674e-01
+7.177074680573874987e-01 1.587754580273513239e-01 -3.933932296629837899e-01
+4.035343034611867941e-01 -6.122033662110912822e-01 -3.533445368316120438e-01
+-3.114489072184036811e-01 -2.986883311327169221e-01 1.577086348400994278e-02
+1.164072967095364369e-01 -7.538461188639004051e-01 3.909952695742038853e-01
diff --git a/nilearn/surface/surface.py b/nilearn/surface/surface.py
new file mode 100644
index 0000000000..00c1f795d2
--- /dev/null
+++ b/nilearn/surface/surface.py
@@ -0,0 +1,721 @@
+"""
+Functions for surface manipulation.
+"""
+import os
+import warnings
+import gzip
+from distutils.version import LooseVersion
+
+import numpy as np
+from scipy import sparse, interpolate
+import sklearn.preprocessing
+import sklearn.cluster
+try:
+ from sklearn.exceptions import EfficiencyWarning
+except ImportError:
+ class EfficiencyWarning(UserWarning):
+ """Warning used to notify the user of inefficient computation."""
+
+import nibabel
+from nibabel import gifti
+
+from ..image import load_img
+from ..image import resampling
+from .._utils.compat import _basestring
+from .. import _utils
+
+
+def _uniform_ball_cloud(n_points=20, dim=3, n_monte_carlo=50000):
+ """Get points uniformly spaced in the unit ball."""
+ rng = np.random.RandomState(0)
+ mc_cube = rng.uniform(-1, 1, size=(n_monte_carlo, dim))
+ mc_ball = mc_cube[(mc_cube**2).sum(axis=1) <= 1.]
+ centroids, assignments, _ = sklearn.cluster.k_means(
+ mc_ball, n_clusters=n_points, random_state=0)
+ return centroids
+
+
+def _load_uniform_ball_cloud(n_points=20):
+ stored_points = os.path.abspath(
+ os.path.join(__file__, '..', 'data',
+ 'ball_cloud_{}_samples.csv'.format(n_points)))
+ if os.path.isfile(stored_points):
+ points = np.loadtxt(stored_points)
+ return points
+ warnings.warn(
+ 'Cached sample positions are provided for '
+ 'n_samples = 10, 20, 40, 80, 160. Since the number of samples does '
+ 'have a big impact on the result, we strongly recommend using one '
+ 'of these values when using kind="ball" for much better performance.',
+ EfficiencyWarning)
+ return _uniform_ball_cloud(n_points=n_points)
+
+
+def _face_outer_normals(mesh):
+ """Get the normal to each triangle in a mesh.
+
+ They are the outer normals if the mesh respects the convention that the
+ direction given by the direct order of a triangle's vertices (right-hand
+ rule) points outwards.
+ """
+ vertices, faces = load_surf_mesh(mesh)
+ face_vertices = vertices[faces]
+ # The right-hand rule gives the direction of the outer normal
+ normals = np.cross(face_vertices[:, 1, :] - face_vertices[:, 0, :],
+ face_vertices[:, 2, :] - face_vertices[:, 0, :])
+ normals = sklearn.preprocessing.normalize(normals)
+ return normals
+
+
+def _surrounding_faces(mesh):
+ """Get matrix indicating which faces the nodes belong to.
+
+ i, j is set if node i is a vertex of triangle j.
+ """
+ vertices, faces = load_surf_mesh(mesh)
+ n_faces = faces.shape[0]
+ return sparse.csr_matrix((np.ones(3 * n_faces), (faces.ravel(), np.tile(
+ np.arange(n_faces), (3, 1)).T.ravel())), (vertices.shape[0], n_faces))
+
+
+def _vertex_outer_normals(mesh):
+ """Get the normal at each vertex in a triangular mesh.
+
+ They are the outer normals if the mesh respects the convention that the
+ direction given by the direct order of a triangle's vertices (right-hand
+ rule) points outwards.
+ """
+ vertices, faces = load_surf_mesh(mesh)
+ vertex_faces = _surrounding_faces(mesh)
+ face_normals = _face_outer_normals(mesh)
+ normals = vertex_faces.dot(face_normals)
+ return sklearn.preprocessing.normalize(normals)
+
+
+def _ball_sample_locations(mesh, affine, ball_radius=3., n_points=20):
+ """Locations to draw samples from to project volume data onto a mesh.
+
+ For each mesh vertex, the locations of `n_points` points evenly spread in a
+ ball around the vertex are returned.
+
+ Parameters
+ ----------
+ mesh : pair of np arrays.
+ mesh[0] contains the 3d coordinates of the vertices
+ (shape n_vertices, 3)
+ mesh[1] contains, for each triangle, the indices into mesh[0] of its
+ vertices (shape n_triangles, 3)
+
+ affine : array of shape (4, 4)
+ affine transformation from image voxels to the vertices' coordinate
+ space.
+
+ ball_radius : float, optional (default=3.)
+ size in mm of the neighbourhood around each vertex in which to draw
+ samples
+
+ n_points : int, optional (default=20)
+ number of samples to draw for each vertex.
+
+ Returns
+ -------
+ numpy array, shape (n_vertices, n_points, 3)
+ The locations, in voxel space, from which to draw samples.
+ First dimension iterates over mesh vertices, second dimension iterates
+ over the sample points associated to a vertex, third dimension is x, y,
+ z in voxel space.
+
+ """
+ vertices, faces = mesh
+ offsets_world_space = _load_uniform_ball_cloud(
+ n_points=n_points) * ball_radius
+ mesh_voxel_space = np.asarray(
+ resampling.coord_transform(*vertices.T,
+ affine=np.linalg.inv(affine))).T
+ linear_map = np.eye(affine.shape[0])
+ linear_map[:-1, :-1] = affine[:-1, :-1]
+ offsets_voxel_space = np.asarray(
+ resampling.coord_transform(*offsets_world_space.T,
+ affine=np.linalg.inv(linear_map))).T
+ sample_locations_voxel_space = (mesh_voxel_space[:, np.newaxis, :] +
+ offsets_voxel_space[np.newaxis, :])
+ return sample_locations_voxel_space
+
+
+def _line_sample_locations(
+ mesh, affine, segment_half_width=3., n_points=10):
+ """Locations to draw samples from to project volume data onto a mesh.
+
+ For each mesh vertex, the locations of `n_points` points evenly spread in a
+ segment of the normal to the vertex are returned. The line segment has
+ length 2 * `segment_half_width` and is centered at the vertex.
+
+ Parameters
+ ----------
+ mesh : pair of numpy.ndarray.
+ mesh[0] contains the 3d coordinates of the vertices
+ (shape n_vertices, 3)
+ mesh[1] contains, for each triangle, the indices into mesh[0] of its
+ vertices (shape n_triangles, 3)
+
+ affine : numpy.ndarray of shape (4, 4)
+ affine transformation from image voxels to the vertices' coordinate
+ space.
+
+ segment_half_width : float, optional (default=3.)
+ size in mm of the neighbourhood around each vertex in which to draw
+ samples
+
+ n_points : int, optional (default=10)
+ number of samples to draw for each vertex.
+
+ Returns
+ -------
+ numpy array, shape (n_vertices, n_points, 3)
+ The locations, in voxel space, from which to draw samples.
+ First dimension iterates over mesh vertices, second dimension iterates
+ over the sample points associated to a vertex, third dimension is x, y,
+ z in voxel space.
+
+ """
+ vertices, faces = mesh
+ normals = _vertex_outer_normals(mesh)
+ offsets = np.linspace(-segment_half_width, segment_half_width, n_points)
+ sample_locations = vertices[
+ np.newaxis, :, :] + normals * offsets[:, np.newaxis, np.newaxis]
+ sample_locations = np.rollaxis(sample_locations, 1)
+ sample_locations_voxel_space = np.asarray(
+ resampling.coord_transform(
+ *np.vstack(sample_locations).T,
+ affine=np.linalg.inv(affine))).T.reshape(sample_locations.shape)
+ return sample_locations_voxel_space
+
+
+def _sample_locations(mesh, affine, radius, kind='line', n_points=None):
+ """Get either ball or line sample locations."""
+ projectors = {
+ 'line': _line_sample_locations,
+ 'ball': _ball_sample_locations
+ }
+ if kind not in projectors:
+ raise ValueError(
+ '"kind" must be one of {}'.format(tuple(projectors.keys())))
+ projector = projectors[kind]
+ # let the projector choose the default for n_points
+ # (for example a ball probably needs more than a line)
+ loc_kwargs = ({} if n_points is None else {'n_points': n_points})
+ sample_locations = projector(
+ mesh, affine, radius, **loc_kwargs)
+ return sample_locations
+
+
+def _masked_indices(sample_locations, img_shape, mask=None):
+ """Get the indices of sample points which should be ignored.
+
+ Parameters:
+ -----------
+ sample_locations : array, shape(n_sample_locations, 3)
+ The coordinates of candidate interpolation points
+
+ img_shape : tuple
+ The dimensions of the image to be sampled
+
+ mask : array of shape img_shape or None
+ Part of the image to be masked. If None, don't apply any mask.
+
+ Returns
+ -------
+ array of shape (n_sample_locations,)
+ True if this particular location should be ignored (outside of image or
+ masked).
+
+ """
+ kept = (sample_locations >= 0).all(axis=1)
+ for dim, size in enumerate(img_shape):
+ kept = np.logical_and(kept, sample_locations[:, dim] < size)
+ if mask is not None:
+ indices = np.asarray(np.round(sample_locations[kept]), dtype=int)
+ kept[kept] = mask[
+ indices[:, 0], indices[:, 1], indices[:, 2]] != 0
+ return ~kept
+
+
+def _projection_matrix(mesh, affine, img_shape,
+ kind='line', radius=3., n_points=None, mask=None):
+ """Get a sparse matrix that projects volume data onto a mesh.
+
+ Parameters
+ ----------
+ mesh : str or numpy.ndarray
+ Either a file containing surface mesh geometry (valid formats
+ are .gii or Freesurfer specific files such as .orig, .pial,
+ .sphere, .white, .inflated) or a list of two Numpy arrays,
+ the first containing the x-y-z coordinates of the mesh
+ vertices, the second containing the indices (into coords)
+ of the mesh faces.
+
+ affine : array of shape (4, 4)
+ affine transformation from image voxels to the vertices' coordinate
+ space.
+
+ img_shape : 3-tuple of integers
+ The shape of the image to be projected.
+
+ kind : {'line', 'ball'}
+ The strategy used to sample image intensities around each vertex.
+
+ - 'line' (the default):
+ samples are regularly spaced along the normal to the mesh, over the
+ interval [-radius, +radius].
+ - 'ball':
+ samples are regularly spaced inside a ball centered at the mesh
+ vertex.
+
+ radius : float, optional (default=3.).
+ The size (in mm) of the neighbourhood from which samples are drawn
+ around each node.
+
+ n_points : int or None, optional (default=None)
+ How many samples are drawn around each vertex and averaged. If None,
+ use a reasonable default for the chosen sampling strategy (20 for
+ 'ball' or 10 for 'line').
+ For performance reasons, if using kind="ball", choose `n_points` in
+ [10, 20, 40, 80, 160] (default is 20), because cached positions are
+ available.
+
+ mask : array of shape img_shape or None
+ Part of the image to be masked. If None, don't apply any mask.
+
+ Returns
+ -------
+ scipy.sparse.csr_matrix
+ Shape (n_voxels, n_mesh_vertices). The dot product of this matrix with
+ an image (represented as a column vector) gives the projection onto mesh
+ vertices.
+
+ See Also
+ --------
+ nilearn.surface.vol_to_surf
+ Compute the projection for one or several images.
+
+ """
+ # A user might want to call this function directly so check mask size.
+ if mask is not None and tuple(mask.shape) != img_shape:
+ raise ValueError('mask should have shape img_shape')
+ mesh = load_surf_mesh(mesh)
+ sample_locations = _sample_locations(
+ mesh, affine, kind=kind, radius=radius, n_points=n_points)
+ sample_locations = np.asarray(np.round(sample_locations), dtype=int)
+ n_vertices, n_points, img_dim = sample_locations.shape
+ masked = _masked_indices(np.vstack(sample_locations), img_shape, mask=mask)
+ sample_locations = np.rollaxis(sample_locations, -1)
+ sample_indices = np.ravel_multi_index(
+ sample_locations, img_shape, mode='clip').ravel()
+ row_indices, _ = np.mgrid[:n_vertices, :n_points]
+ row_indices = row_indices.ravel()
+ row_indices = row_indices[~masked]
+ sample_indices = sample_indices[~masked]
+ weights = np.ones(len(row_indices))
+ proj = sparse.csr_matrix(
+ (weights, (row_indices, sample_indices.ravel())),
+ shape=(n_vertices, np.prod(img_shape)))
+ proj = sklearn.preprocessing.normalize(proj, axis=1, norm='l1')
+ return proj
+
+
+def _nearest_voxel_sampling(images, mesh, affine, kind='ball', radius=3.,
+ n_points=None, mask=None):
+ """In each image, measure the intensity at each node of the mesh.
+
+ Image intensity at each sample point is that of the nearest voxel.
+ A 2-d array is returned, where each row corresponds to an image and each
+ column to a mesh vertex.
+ See documentation of vol_to_surf for details.
+
+ """
+ proj = _projection_matrix(
+ mesh, affine, images[0].shape, kind=kind, radius=radius,
+ n_points=n_points, mask=mask)
+ data = np.asarray(images).reshape(len(images), -1).T
+ texture = proj.dot(data)
+ # if all samples around a mesh vertex are outside the image,
+ # there is no reasonable value to assign to this vertex.
+ # in this case we return NaN for this vertex.
+ texture[np.asarray(proj.sum(axis=1) == 0).ravel()] = np.nan
+ return texture.T
+
+
+def _interpolation_sampling(images, mesh, affine, kind='ball', radius=3,
+ n_points=None, mask=None):
+ """In each image, measure the intensity at each node of the mesh.
+
+ Image intensity at each sample point is computed with trilinear
+ interpolation.
+ A 2-d array is returned, where each row corresponds to an image and each
+ column to a mesh vertex.
+ See documentation of vol_to_surf for details.
+
+ """
+ sample_locations = _sample_locations(
+ mesh, affine, kind=kind, radius=radius, n_points=n_points)
+ n_vertices, n_points, img_dim = sample_locations.shape
+ grid = [np.arange(size) for size in images[0].shape]
+ interp_locations = np.vstack(sample_locations)
+ masked = _masked_indices(interp_locations, images[0].shape, mask=mask)
+ # loop over images rather than building a big array to use less memory
+ all_samples = []
+ for img in images:
+ interpolator = interpolate.RegularGridInterpolator(
+ grid, img,
+ bounds_error=False, method='linear', fill_value=None)
+ samples = interpolator(interp_locations)
+ # if all samples around a mesh vertex are outside the image,
+ # there is no reasonable value to assign to this vertex.
+ # in this case we return NaN for this vertex.
+ samples[masked] = np.nan
+ all_samples.append(samples)
+ all_samples = np.asarray(all_samples)
+ all_samples = all_samples.reshape((len(images), n_vertices, n_points))
+ texture = np.nanmean(all_samples, axis=2)
+ return texture
+
+
+def vol_to_surf(img, surf_mesh,
+ radius=3., interpolation='linear', kind='line',
+ n_samples=None, mask_img=None):
+ """Extract surface data from a Nifti image.
+
+ .. versionadded:: 0.4.0
+
+ Parameters
+ ----------
+
+ img : Niimg-like object, 3d or 4d.
+ See http://nilearn.github.io/manipulating_images/input_output.html
+
+ surf_mesh : str or numpy.ndarray
+ Either a file containing surface mesh geometry (valid formats
+ are .gii or Freesurfer specific files such as .orig, .pial,
+ .sphere, .white, .inflated) or a list of two Numpy arrays,
+ the first containing the x-y-z coordinates of the mesh
+ vertices, the second containing the indices (into coords)
+ of the mesh faces.
+
+ radius : float, optional (default=3.).
+ The size (in mm) of the neighbourhood from which samples are drawn
+ around each node.
+
+ interpolation : {'linear', 'nearest'}
+ How the image intensity is measured at a sample point.
+
+ - 'linear' (the default):
+ Use a trilinear interpolation of neighboring voxels.
+ - 'nearest':
+ Use the intensity of the nearest voxel.
+
+ For one image, the speed difference is small, 'linear' takes about x1.5
+ more time. For many images, 'nearest' scales much better, up to x20
+ faster.
+
+ kind : {'line', 'ball'}
+ The strategy used to sample image intensities around each vertex.
+
+ - 'line' (the default):
+ samples are regularly spaced along the normal to the mesh, over the
+ interval [- `radius`, + `radius`].
+ (sometimes called thickness sampling)
+ - 'ball':
+ samples are regularly spaced inside a ball centered at the mesh
+ vertex.
+
+ n_samples : int or None, optional (default=None)
+ How many samples are drawn around each vertex and averaged. If
+ ``None``, use a reasonable default for the chosen sampling strategy
+ (20 for 'ball' or 10 for 'line').
+ For performance reasons, if using `kind` ="ball", choose `n_samples` in
+ [10, 20, 40, 80, 160] (default is 20), because cached positions are
+ available.
+
+ mask_img : Niimg-like object or None, optional (default=None)
+ Samples falling out of this mask or out of the image are ignored.
+ If ``None``, don't apply any mask.
+
+ Returns
+ -------
+ texture : numpy.ndarray, 1d or 2d.
+ If 3D image is provided, a 1d vector is returned, containing one value
+ for each mesh node.
+ If 4D image is provided, a 2d array is returned, where each row
+ corresponds to a mesh node.
+
+ Notes
+ -----
+ This function computes a value for each vertex of the mesh. In order to do
+ so, it selects a few points in the volume surrounding that vertex,
+ interpolates the image intensities at these sampling positions, and
+ averages the results.
+
+ Two strategies are available to select these positions.
+ - 'ball' uses points regularly spaced in a ball centered at the mesh
+ vertex. The radius of the ball is controlled by the parameter
+ `radius`.
+ - 'line' starts by drawing the normal to the mesh passing through this
+ vertex. It then selects a segment of this normal, centered at the
+ vertex, of length 2 * `radius`. Image intensities are measured at
+ points regularly spaced on this normal segment.
+
+ You can control how many samples are drawn by setting `n_samples`.
+
+ Once the sampling positions are chosen, those that fall outside of the 3d
+ image (or ouside of the mask if you provided one) are discarded. If all
+ sample positions are discarded (which can happen, for example, if the
+ vertex itself is outside of the support of the image), the projection at
+ this vertex will be ``numpy.nan``.
+
+ The 3d image then needs to be interpolated at each of the remaining points.
+ Two options are available: 'nearest' selects the value of the nearest
+ voxel, and 'linear' performs trilinear interpolation of neighbouring
+ voxels. 'linear' may give better results - for example, the projected
+ values are more stable when resampling the 3d image or applying affine
+ transformations to it. For one image, the speed difference is small,
+ 'linear' takes about x1.5 more time. For many images, 'nearest' scales much
+ better, up to x20 faster.
+
+ Once the 3d image has been interpolated at each sample point, the
+ interpolated values are averaged to produce the value associated to this
+ particular mesh vertex.
+
+ WARNING: This function is experimental and details such as the
+ interpolation method are subject to change.
+
+ """
+ sampling_schemes = {'linear': _interpolation_sampling,
+ 'nearest': _nearest_voxel_sampling}
+ if interpolation not in sampling_schemes:
+ raise ValueError('"interpolation" should be one of {}'.format(
+ tuple(sampling_schemes.keys())))
+ img = load_img(img)
+ if mask_img is not None:
+ mask_img = _utils.check_niimg(mask_img)
+ mask = resampling.resample_to_img(
+ mask_img, img, interpolation='nearest', copy=False).get_data()
+ else:
+ mask = None
+ original_dimension = len(img.shape)
+ img = _utils.check_niimg(img, atleast_4d=True)
+ frames = np.rollaxis(img.get_data(), -1)
+ mesh = load_surf_mesh(surf_mesh)
+ sampling = sampling_schemes[interpolation]
+ texture = sampling(
+ frames, mesh, img.affine, radius=radius, kind=kind,
+ n_points=n_samples, mask=mask)
+ if original_dimension == 3:
+ texture = texture[0]
+ return texture.T
+
+
+def _load_surf_files_gifti_gzip(surf_file):
+ """Load surface data Gifti files which are gzipped. This
+ function is used by load_surf_mesh and load_surf_data for
+ extracting gzipped files.
+
+ Part of the code can be removed while bumping nibabel 2.0.2
+ """
+ with gzip.open(surf_file) as f:
+ as_bytes = f.read()
+ if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):
+ parser = gifti.GiftiImage.parser()
+ parser.parse(as_bytes)
+ gifti_img = parser.img
+ else:
+ from nibabel.gifti.parse_gifti_fast import ParserCreate, Outputter
+ parser = ParserCreate()
+ parser.buffer_text = True
+ out = Outputter()
+ parser.StartElementHandler = out.StartElementHandler
+ parser.EndElementHandler = out.EndElementHandler
+ parser.CharacterDataHandler = out.CharacterDataHandler
+ parser.Parse(as_bytes)
+ gifti_img = out.img
+ return gifti_img
+
+
+def _gifti_img_to_data(gifti_img):
+ """Load surface image e.g. sulcal depth or statistical map in
+ nibabel.gifti.GiftiImage to data
+
+ Used by load_surf_data function in common to surface sulcal data
+ acceptable to .gii or .gii.gz
+ """
+ if not gifti_img.darrays:
+ raise ValueError('Gifti must contain at least one data array')
+ return np.asarray([arr.data for arr in gifti_img.darrays]).T.squeeze()
+
+
+# function to figure out datatype and load data
+def load_surf_data(surf_data):
+ """Loading data to be represented on a surface mesh.
+
+ Parameters
+ ----------
+ surf_data : str or numpy.ndarray
+ Either a file containing surface data (valid format are .gii,
+ .gii.gz, .mgz, .nii, .nii.gz, or Freesurfer specific files such as
+ .thickness, .curv, .sulc, .annot, .label) or
+ a Numpy array containing surface data.
+ Returns
+ -------
+ data : numpy.ndarray
+ An array containing surface data
+ """
+ # if the input is a filename, load it
+ if isinstance(surf_data, _basestring):
+ if (surf_data.endswith('nii') or surf_data.endswith('nii.gz') or
+ surf_data.endswith('mgz')):
+ data = np.squeeze(nibabel.load(surf_data).get_data())
+ elif (surf_data.endswith('curv') or surf_data.endswith('sulc') or
+ surf_data.endswith('thickness')):
+ data = nibabel.freesurfer.io.read_morph_data(surf_data)
+ elif surf_data.endswith('annot'):
+ data = nibabel.freesurfer.io.read_annot(surf_data)[0]
+ elif surf_data.endswith('label'):
+ data = nibabel.freesurfer.io.read_label(surf_data)
+ elif surf_data.endswith('gii'):
+ if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):
+ gii = nibabel.load(surf_data)
+ else:
+ gii = gifti.read(surf_data)
+ data = _gifti_img_to_data(gii)
+ elif surf_data.endswith('gii.gz'):
+ gii = _load_surf_files_gifti_gzip(surf_data)
+ data = _gifti_img_to_data(gii)
+ else:
+ raise ValueError(('The input type is not recognized. %r was given '
+ 'while valid inputs are a Numpy array or one of '
+ 'the following file formats: .gii, .gii.gz, '
+ '.mgz, .nii, .nii.gz, Freesurfer specific files '
+ 'such as .curv, .sulc, .thickness, .annot, '
+ '.label') % surf_data)
+ # if the input is a numpy array
+ elif isinstance(surf_data, np.ndarray):
+ data = np.squeeze(surf_data)
+ else:
+ raise ValueError('The input type is not recognized. '
+ 'Valid inputs are a Numpy array or one of the '
+ 'following file formats: .gii, .gii.gz, .mgz, .nii, '
+ '.nii.gz, Freesurfer specific files such as .curv, '
+ '.sulc, .thickness, .annot, .label')
+ return data
+
+
+def _gifti_img_to_mesh(gifti_img):
+ """Load surface image in nibabel.gifti.GiftiImage to data
+
+ Used by load_surf_mesh function in common to surface mesh
+ acceptable to .gii or .gii.gz
+ """
+ if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):
+ try:
+ coords = gifti_img.get_arrays_from_intent(
+ nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data
+ except IndexError:
+ raise ValueError('Gifti file needs to contain a data array '
+ 'with intent NIFTI_INTENT_POINTSET')
+ try:
+ faces = gifti_img.get_arrays_from_intent(
+ nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data
+ except IndexError:
+ raise ValueError('Gifti file needs to contain a data array '
+ 'with intent NIFTI_INTENT_TRIANGLE')
+ else:
+ try:
+ coords = gifti_img.getArraysFromIntent(
+ nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data
+ except IndexError:
+ raise ValueError('Gifti file needs to contain a data array '
+ 'with intent NIFTI_INTENT_POINTSET')
+ try:
+ faces = gifti_img.getArraysFromIntent(
+ nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data
+ except IndexError:
+ raise ValueError('Gifti file needs to contain a data array '
+ 'with intent NIFTI_INTENT_TRIANGLE')
+
+ return coords, faces
+
+
+# function to figure out datatype and load data
+def load_surf_mesh(surf_mesh):
+ """Loading a surface mesh geometry
+
+ Parameters
+ ----------
+ surf_mesh : str or numpy.ndarray
+ Either a file containing surface mesh geometry (valid formats
+ are .gii .gii.gz or Freesurfer specific files such as .orig, .pial,
+ .sphere, .white, .inflated) or a list or tuple of two Numpy arrays,
+ the first containing the x-y-z coordinates of the mesh
+ vertices, the second containing the indices (into coords)
+ of the mesh faces.
+
+ Returns
+ --------
+ [coords, faces] : List of two numpy.ndarray
+ The first containing the x-y-z coordinates of the mesh vertices,
+ the second containing the indices (into coords) of the mesh faces.
+ """
+ # if input is a filename, try to load it
+ if isinstance(surf_mesh, _basestring):
+ if (surf_mesh.endswith('orig') or surf_mesh.endswith('pial') or
+ surf_mesh.endswith('white') or surf_mesh.endswith('sphere') or
+ surf_mesh.endswith('inflated')):
+ coords, faces = nibabel.freesurfer.io.read_geometry(surf_mesh)
+ elif surf_mesh.endswith('gii'):
+ if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):
+ gifti_img = nibabel.load(surf_mesh)
+ else:
+ gifti_img = gifti.read(surf_mesh)
+ coords, faces = _gifti_img_to_mesh(gifti_img)
+ elif surf_mesh.endswith('.gii.gz'):
+ gifti_img = _load_surf_files_gifti_gzip(surf_mesh)
+ coords, faces = _gifti_img_to_mesh(gifti_img)
+ else:
+ raise ValueError(('The input type is not recognized. %r was given '
+ 'while valid inputs are one of the following '
+ 'file formats: .gii, .gii.gz, Freesurfer specific'
+ ' files such as .orig, .pial, .sphere, .white, '
+ '.inflated or a list containing two Numpy '
+ 'arrays [vertex coordinates, face indices]'
+ ) % surf_mesh)
+ elif isinstance(surf_mesh, (list, tuple)):
+ try:
+ coords, faces = surf_mesh
+ except Exception:
+ raise ValueError(('If a list or tuple is given as input, '
+ 'it must have two elements, the first is '
+ 'a Numpy array containing the x-y-z coordinates '
+ 'of the mesh vertices, the second is a Numpy '
+ 'array containing the indices (into coords) of '
+ 'the mesh faces. The input was a list with '
+ '%r elements.') % len(surf_mesh))
+ else:
+ raise ValueError('The input type is not recognized. '
+ 'Valid inputs are one of the following file '
+ 'formats: .gii, .gii.gz, Freesurfer specific files '
+ 'such as .orig, .pial, .sphere, .white, .inflated '
+ 'or a list containing two Numpy arrays '
+ '[vertex coordinates, face indices]')
+
+ return [coords, faces]
+
+
+def check_mesh_and_data(mesh, data):
+ """Load surface mesh and data, check that they have compatible shapes."""
+ mesh = load_surf_mesh(mesh)
+ nodes, faces = mesh
+ data = load_surf_data(data)
+ if len(data) != len(nodes):
+ raise ValueError(
+ 'Mismatch between number of nodes in mesh ({}) and '
+ 'size of surface data ({})'.format(len(nodes), len(data)))
+ return mesh, data
diff --git a/nilearn/surface/tests/__init__.py b/nilearn/surface/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/nilearn/surface/tests/data/__init__.py b/nilearn/surface/tests/data/__init__.py
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/nilearn/surface/tests/data/__init__.py
@@ -0,0 +1 @@
+
diff --git a/nilearn/surface/tests/data/test.annot b/nilearn/surface/tests/data/test.annot
new file mode 100644
index 0000000000..016592fa19
Binary files /dev/null and b/nilearn/surface/tests/data/test.annot differ
diff --git a/nilearn/surface/tests/data/test.label b/nilearn/surface/tests/data/test.label
new file mode 100644
index 0000000000..4feb5ed57c
--- /dev/null
+++ b/nilearn/surface/tests/data/test.label
@@ -0,0 +1,12 @@
+#!ascii label , from subject fsaverage5 vox2ras=TkReg
+326
+5900 -15.869 -33.770 74.187 0.4444440007
+5899 -16.323 -32.170 73.531 0.5555559993
+5901 -15.718 -36.573 72.549 0.4444440007
+5902 -17.190 -39.268 70.851 0.3333329856
+2638 -18.197 -33.185 73.204 0.5555559993
+8756 -61.004 -17.019 24.824 0.1111110002
+6241 -60.198 -18.047 23.736 0.1111110002
+8757 -61.604 -15.225 23.800 0.1111110002
+1896 -58.260 -24.190 21.811 0.1111110002
+6243 -58.792 -18.143 22.817 0.1111110002
diff --git a/nilearn/surface/tests/test_surface.py b/nilearn/surface/tests/test_surface.py
new file mode 100644
index 0000000000..d8bf0ae57e
--- /dev/null
+++ b/nilearn/surface/tests/test_surface.py
@@ -0,0 +1,459 @@
+# Tests for functions in surf_plotting.py
+
+import os
+import tempfile
+import warnings
+import itertools
+
+from distutils.version import LooseVersion
+from nose import SkipTest
+from numpy.testing import (assert_array_equal, assert_array_almost_equal,
+ assert_equal)
+from nose.tools import assert_true, assert_raises
+from nilearn._utils.testing import assert_raises_regex, assert_warns
+
+import numpy as np
+from scipy.spatial import Delaunay
+import sklearn
+
+import nibabel as nb
+from nibabel import gifti
+
+from nilearn import datasets
+from nilearn import image
+from nilearn.image import resampling
+from nilearn.image.tests.test_resampling import rotation
+from nilearn.surface import surface
+from nilearn.surface import load_surf_data, load_surf_mesh, vol_to_surf
+from nilearn.surface.surface import (_gifti_img_to_mesh,
+ _load_surf_files_gifti_gzip)
+
+currdir = os.path.dirname(os.path.abspath(__file__))
+datadir = os.path.join(currdir, 'data')
+
+
+def _generate_surf():
+ rng = np.random.RandomState(42)
+ coords = rng.rand(20, 3)
+ faces = rng.randint(coords.shape[0], size=(30, 3))
+ return [coords, faces]
+
+
+def test_load_surf_data_array():
+ # test loading and squeezing data from numpy array
+ data_flat = np.zeros((20, ))
+ data_squeeze = np.zeros((20, 1, 3))
+ assert_array_equal(load_surf_data(data_flat), np.zeros((20, )))
+ assert_array_equal(load_surf_data(data_squeeze), np.zeros((20, 3)))
+
+
+def test_load_surf_data_file_nii_gii():
+ # test loading of fake data from gifti file
+ filename_gii = tempfile.mktemp(suffix='.gii')
+ if LooseVersion(nb.__version__) > LooseVersion('2.0.2'):
+ darray = gifti.GiftiDataArray(data=np.zeros((20, )))
+ else:
+ # Avoid a bug in nibabel 1.2.0 where GiftiDataArray were not
+ # initialized properly:
+ darray = gifti.GiftiDataArray.from_array(np.zeros((20, )),
+ intent='t test')
+ gii = gifti.GiftiImage(darrays=[darray])
+ gifti.write(gii, filename_gii)
+ assert_array_equal(load_surf_data(filename_gii), np.zeros((20, )))
+ os.remove(filename_gii)
+
+ # test loading of data from empty gifti file
+ filename_gii_empty = tempfile.mktemp(suffix='.gii')
+ gii_empty = gifti.GiftiImage()
+ gifti.write(gii_empty, filename_gii_empty)
+ assert_raises_regex(ValueError,
+ 'must contain at least one data array',
+ load_surf_data, filename_gii_empty)
+ os.remove(filename_gii_empty)
+
+ # test loading of fake data from nifti file
+ filename_nii = tempfile.mktemp(suffix='.nii')
+ filename_niigz = tempfile.mktemp(suffix='.nii.gz')
+ nii = nb.Nifti1Image(np.zeros((20, )), affine=None)
+ nb.save(nii, filename_nii)
+ nb.save(nii, filename_niigz)
+ assert_array_equal(load_surf_data(filename_nii), np.zeros((20, )))
+ assert_array_equal(load_surf_data(filename_niigz), np.zeros((20, )))
+ os.remove(filename_nii)
+ os.remove(filename_niigz)
+
+
+def test_load_surf_data_gii_gz():
+ # Test the loader `load_surf_data` with gzipped fsaverage5 files
+
+ # surface data
+ fsaverage = datasets.fetch_surf_fsaverage().sulc_left
+ gii = _load_surf_files_gifti_gzip(fsaverage)
+ assert_true(isinstance(gii, gifti.GiftiImage))
+
+ data = load_surf_data(fsaverage)
+ assert_true(isinstance(data, np.ndarray))
+
+ # surface mesh
+ fsaverage = datasets.fetch_surf_fsaverage().pial_left
+ gii = _load_surf_files_gifti_gzip(fsaverage)
+ assert_true(isinstance(gii, gifti.GiftiImage))
+
+
+def test_load_surf_data_file_freesurfer():
+ # test loading of fake data from sulc and thickness files
+ # using load_surf_data.
+ # We test load_surf_data by creating fake data with function
+ # 'write_morph_data' that works only if nibabel
+ # version is recent with nibabel >= 2.1.0
+ if LooseVersion(nb.__version__) >= LooseVersion('2.1.0'):
+ data = np.zeros((20, ))
+ filename_sulc = tempfile.mktemp(suffix='.sulc')
+ nb.freesurfer.io.write_morph_data(filename_sulc, data)
+ assert_array_equal(load_surf_data(filename_sulc), np.zeros((20, )))
+ os.remove(filename_sulc)
+
+ filename_thick = tempfile.mktemp(suffix='.thickness')
+ nb.freesurfer.io.write_morph_data(filename_thick, data)
+ assert_array_equal(load_surf_data(filename_thick), np.zeros((20, )))
+ os.remove(filename_thick)
+
+ # test loading of data from real label and annot files
+ label_start = np.array([5900, 5899, 5901, 5902, 2638])
+ label_end = np.array([8756, 6241, 8757, 1896, 6243])
+ label = load_surf_data(os.path.join(datadir, 'test.label'))
+ assert_array_equal(label[:5], label_start)
+ assert_array_equal(label[-5:], label_end)
+ assert_equal(label.shape, (10, ))
+ del label, label_start, label_end
+
+ annot_start = np.array([24, 29, 28, 27, 24, 31, 11, 25, 0, 12])
+ annot_end = np.array([16, 16, 16, 16, 16, 16, 16, 16, 16, 16])
+ annot = load_surf_data(os.path.join(datadir, 'test.annot'))
+ assert_array_equal(annot[:10], annot_start)
+ assert_array_equal(annot[-10:], annot_end)
+ assert_equal(annot.shape, (10242, ))
+ del annot, annot_start, annot_end
+
+
+def test_load_surf_data_file_error():
+ # test if files with unexpected suffixes raise errors
+ data = np.zeros((20, ))
+ wrong_suff = ['.vtk', '.obj', '.mnc', '.txt']
+ for suff in wrong_suff:
+ filename_wrong = tempfile.mktemp(suffix=suff)
+ np.savetxt(filename_wrong, data)
+ assert_raises_regex(ValueError,
+ 'input type is not recognized',
+ load_surf_data, filename_wrong)
+ os.remove(filename_wrong)
+
+
+def test_load_surf_mesh_list():
+ # test if correct list is returned
+ mesh = _generate_surf()
+ assert_equal(len(load_surf_mesh(mesh)), 2)
+ assert_array_equal(load_surf_mesh(mesh)[0], mesh[0])
+ assert_array_equal(load_surf_mesh(mesh)[1], mesh[1])
+ # test if incorrect list, array or dict raises error
+ assert_raises_regex(ValueError, 'it must have two elements',
+ load_surf_mesh, [])
+ assert_raises_regex(ValueError, 'it must have two elements',
+ load_surf_mesh, [mesh[0]])
+ assert_raises_regex(ValueError, 'it must have two elements',
+ load_surf_mesh, [mesh[0], mesh[1], mesh[1]])
+ assert_raises_regex(ValueError, 'input type is not recognized',
+ load_surf_mesh, mesh[0])
+ assert_raises_regex(ValueError, 'input type is not recognized',
+ load_surf_mesh, dict())
+ del mesh
+
+
+def test_gifti_img_to_mesh():
+ mesh = _generate_surf()
+
+ coord_array = gifti.GiftiDataArray(data=mesh[0])
+ coord_array.intent = nb.nifti1.intent_codes['NIFTI_INTENT_POINTSET']
+
+ face_array = gifti.GiftiDataArray(data=mesh[1])
+ face_array.intent = nb.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE']
+
+ gii = gifti.GiftiImage(darrays=[coord_array, face_array])
+ coords, faces = _gifti_img_to_mesh(gii)
+ assert_array_equal(coords, mesh[0])
+ assert_array_equal(faces, mesh[1])
+
+
+def test_load_surf_mesh_file_gii_gz():
+ # Test the loader `load_surf_mesh` with gzipped fsaverage5 files
+
+ fsaverage = datasets.fetch_surf_fsaverage().pial_left
+ coords, faces = load_surf_mesh(fsaverage)
+ assert_true(isinstance(coords, np.ndarray))
+ assert_true(isinstance(faces, np.ndarray))
+
+
+def test_load_surf_mesh_file_gii():
+ # Test the loader `load_surf_mesh`
+
+ # If nibabel is of older version we skip tests as nibabel does not
+ # support intent argument and intent codes are not handled properly with
+ # older versions
+
+ if not LooseVersion(nb.__version__) >= LooseVersion('2.1.0'):
+ raise SkipTest
+
+ mesh = _generate_surf()
+
+ # test if correct gii is loaded into correct list
+ filename_gii_mesh = tempfile.mktemp(suffix='.gii')
+
+ coord_array = gifti.GiftiDataArray(data=mesh[0],
+ intent=nb.nifti1.intent_codes[
+ 'NIFTI_INTENT_POINTSET'])
+ face_array = gifti.GiftiDataArray(data=mesh[1],
+ intent=nb.nifti1.intent_codes[
+ 'NIFTI_INTENT_TRIANGLE'])
+
+ gii = gifti.GiftiImage(darrays=[coord_array, face_array])
+ gifti.write(gii, filename_gii_mesh)
+ assert_array_equal(load_surf_mesh(filename_gii_mesh)[0], mesh[0])
+ assert_array_equal(load_surf_mesh(filename_gii_mesh)[1], mesh[1])
+ os.remove(filename_gii_mesh)
+
+ # test if incorrect gii raises error
+ filename_gii_mesh_no_point = tempfile.mktemp(suffix='.gii')
+ gifti.write(gifti.GiftiImage(darrays=[face_array, face_array]),
+ filename_gii_mesh_no_point)
+ assert_raises_regex(ValueError, 'NIFTI_INTENT_POINTSET',
+ load_surf_mesh, filename_gii_mesh_no_point)
+ os.remove(filename_gii_mesh_no_point)
+
+ filename_gii_mesh_no_face = tempfile.mktemp(suffix='.gii')
+ gifti.write(gifti.GiftiImage(darrays=[coord_array, coord_array]),
+ filename_gii_mesh_no_face)
+ assert_raises_regex(ValueError, 'NIFTI_INTENT_TRIANGLE',
+ load_surf_mesh, filename_gii_mesh_no_face)
+ os.remove(filename_gii_mesh_no_face)
+
+
+def test_load_surf_mesh_file_freesurfer():
+ # Older nibabel versions does not support 'write_geometry'
+ if LooseVersion(nb.__version__) <= LooseVersion('1.2.0'):
+ raise SkipTest
+
+ mesh = _generate_surf()
+ for suff in ['.pial', '.inflated', '.white', '.orig', 'sphere']:
+ filename_fs_mesh = tempfile.mktemp(suffix=suff)
+ nb.freesurfer.write_geometry(filename_fs_mesh, mesh[0], mesh[1])
+ assert_equal(len(load_surf_mesh(filename_fs_mesh)), 2)
+ assert_array_almost_equal(load_surf_mesh(filename_fs_mesh)[0],
+ mesh[0])
+ assert_array_almost_equal(load_surf_mesh(filename_fs_mesh)[1],
+ mesh[1])
+ os.remove(filename_fs_mesh)
+
+
+def test_load_surf_mesh_file_error():
+ if LooseVersion(nb.__version__) <= LooseVersion('1.2.0'):
+ raise SkipTest
+
+ # test if files with unexpected suffixes raise errors
+ mesh = _generate_surf()
+ wrong_suff = ['.vtk', '.obj', '.mnc', '.txt']
+ for suff in wrong_suff:
+ filename_wrong = tempfile.mktemp(suffix=suff)
+ nb.freesurfer.write_geometry(filename_wrong, mesh[0], mesh[1])
+ assert_raises_regex(ValueError,
+ 'input type is not recognized',
+ load_surf_data, filename_wrong)
+ os.remove(filename_wrong)
+
+
+def _flat_mesh(x_s, y_s, z=0):
+ x, y = np.mgrid[:x_s, :y_s]
+ x, y = x.ravel(), y.ravel()
+ z = np.ones(len(x)) * z
+ vertices = np.asarray([x, y, z]).T
+ triangulation = Delaunay(vertices[:, :2]).simplices
+ mesh = [vertices, triangulation]
+ return mesh
+
+
+def _z_const_img(x_s, y_s, z_s):
+ hslice = np.arange(x_s * y_s).reshape((x_s, y_s))
+ return np.ones((x_s, y_s, z_s)) * hslice[:, :, np.newaxis]
+
+
+def test_vertex_outer_normals():
+ # compute normals for a flat horizontal mesh, they should all be (0, 0, 1)
+ mesh = _flat_mesh(5, 7)
+ computed_normals = surface._vertex_outer_normals(mesh)
+ true_normals = np.zeros((len(mesh[0]), 3))
+ true_normals[:, 2] = 1
+ assert_array_almost_equal(computed_normals, true_normals)
+
+
+def test_load_uniform_ball_cloud():
+ for n_points in [10, 20, 40, 80, 160]:
+ with warnings.catch_warnings(record=True) as w:
+ points = surface._load_uniform_ball_cloud(n_points=n_points)
+ assert_array_equal(points.shape, (n_points, 3))
+ assert_equal(len(w), 0)
+ assert_warns(surface.EfficiencyWarning,
+ surface._load_uniform_ball_cloud, n_points=3)
+ for n_points in [3, 10, 20]:
+ computed = surface._uniform_ball_cloud(n_points)
+ loaded = surface._load_uniform_ball_cloud(n_points)
+ assert_array_almost_equal(computed, loaded)
+
+
+def test_sample_locations():
+ # check positions of samples on toy example, with an affine != identity
+ # flat horizontal mesh
+ mesh = _flat_mesh(5, 7)
+ affine = np.diagflat([10, 20, 30, 1])
+ inv_affine = np.linalg.inv(affine)
+ # transform vertices to world space
+ vertices = np.asarray(
+ resampling.coord_transform(*mesh[0].T, affine=affine)).T
+ # compute by hand the true offsets in voxel space
+ # (transformed by affine^-1)
+ ball_offsets = surface._load_uniform_ball_cloud(10)
+ ball_offsets = np.asarray(
+ resampling.coord_transform(*ball_offsets.T, affine=inv_affine)).T
+ line_offsets = np.zeros((10, 3))
+ line_offsets[:, 2] = np.linspace(-1, 1, 10)
+ line_offsets = np.asarray(
+ resampling.coord_transform(*line_offsets.T, affine=inv_affine)).T
+ # check we get the same locations
+ for kind, offsets in [('line', line_offsets), ('ball', ball_offsets)]:
+ locations = surface._sample_locations(
+ [vertices, mesh[1]], affine, 1., kind=kind, n_points=10)
+ true_locations = np.asarray([vertex + offsets for vertex in mesh[0]])
+ assert_array_equal(locations.shape, true_locations.shape)
+ assert_array_almost_equal(true_locations, locations)
+ assert_raises(ValueError, surface._sample_locations,
+ mesh, affine, 1., kind='bad_kind')
+
+
+def test_masked_indices():
+ mask = np.ones((4, 3, 8))
+ mask[:, :, ::2] = 0
+ locations = np.mgrid[:5, :3, :8].ravel().reshape((3, -1))
+ masked = surface._masked_indices(locations.T, mask.shape, mask)
+ # These elements are masked by the mask
+ assert_true((masked[::2] == 1).all())
+ # The last element of locations is one row beyond first image dimension
+ assert_true((masked[-24:] == 1).all())
+ # 4 * 3 * 8 / 2 elements should remain unmasked
+ assert_true((1 - masked).sum() == 48)
+
+
+def test_projection_matrix():
+ mesh = _flat_mesh(5, 7, 4)
+ img = _z_const_img(5, 7, 13)
+ proj = surface._projection_matrix(
+ mesh, np.eye(4), img.shape, radius=2., n_points=10)
+ # proj matrix has shape (n_vertices, img_size)
+ assert_equal(proj.shape, (5 * 7, 5 * 7 * 13))
+ # proj.dot(img) should give the values of img at the vertices' locations
+ values = proj.dot(img.ravel()).reshape((5, 7))
+ assert_array_almost_equal(values, img[:, :, 0])
+ mesh = _flat_mesh(5, 7)
+ proj = surface._projection_matrix(
+ mesh, np.eye(4), (5, 7, 1), radius=.1, n_points=10)
+ assert_array_almost_equal(proj.toarray(), np.eye(proj.shape[0]))
+ mask = np.ones(img.shape, dtype=int)
+ mask[0] = 0
+ proj = surface._projection_matrix(
+ mesh, np.eye(4), img.shape, radius=2., n_points=10, mask=mask)
+ proj = proj.toarray()
+ # first row of the mesh is masked
+ assert_array_almost_equal(proj.sum(axis=1)[:7], np.zeros(7))
+ assert_array_almost_equal(proj.sum(axis=1)[7:], np.ones(proj.shape[0] - 7))
+ # mask and img should have the same shape
+ assert_raises(ValueError, surface._projection_matrix,
+ mesh, np.eye(4), img.shape, mask=np.ones((3, 3, 2)))
+
+
+def test_sampling_affine():
+ # check sampled (projected) values on a toy image
+ img = np.ones((4, 4, 4))
+ img[1, :, :] = 2
+ nodes = [[1, 1, 2], [10, 10, 20], [30, 30, 30]]
+ mesh = [np.asarray(nodes), None]
+ affine = 10 * np.eye(4)
+ affine[-1, -1] = 1
+ texture = surface._nearest_voxel_sampling(
+ [img], mesh, affine=affine, radius=1, kind='ball')
+ assert_array_equal(texture[0], [1., 2., 1.])
+ texture = surface._interpolation_sampling(
+ [img], mesh, affine=affine, radius=0, kind='ball')
+ assert_array_almost_equal(texture[0], [1.1, 2., 1.])
+
+
+def test_sampling():
+ mesh = _flat_mesh(5, 7, 4)
+ img = _z_const_img(5, 7, 13)
+ mask = np.ones(img.shape, dtype=int)
+ mask[0] = 0
+ projectors = [surface._nearest_voxel_sampling,
+ surface._interpolation_sampling]
+ for kind in ('line', 'ball'):
+ for projector in projectors:
+ projection = projector([img], mesh, np.eye(4),
+ kind=kind, radius=0.)
+ assert_array_almost_equal(projection.ravel(), img[:, :, 0].ravel())
+ projection = projector([img], mesh, np.eye(4),
+ kind=kind, radius=0., mask=mask)
+ assert_array_almost_equal(projection.ravel()[7:],
+ img[1:, :, 0].ravel())
+ assert_true(np.isnan(projection.ravel()[:7]).all())
+
+
+def test_vol_to_surf():
+ # test 3d niimg to cortical surface projection and invariance to a change
+ # of affine
+ mni = datasets.load_mni152_template()
+ mesh = _generate_surf()
+ _check_vol_to_surf_results(mni, mesh)
+ fsaverage = datasets.fetch_surf_fsaverage5().pial_left
+ _check_vol_to_surf_results(mni, fsaverage)
+
+
+def _check_vol_to_surf_results(img, mesh):
+ mni_mask = datasets.load_mni152_brain_mask()
+ for kind, interpolation, mask_img in itertools.product(
+ ['ball', 'line'], ['linear', 'nearest'], [mni_mask, None]):
+ proj_1 = vol_to_surf(
+ img, mesh, kind=kind, interpolation=interpolation,
+ mask_img=mask_img)
+ assert_true(proj_1.ndim == 1)
+ img_rot = image.resample_img(
+ img, target_affine=rotation(np.pi / 3., np.pi / 4.))
+ proj_2 = vol_to_surf(
+ img_rot, mesh, kind=kind, interpolation=interpolation,
+ mask_img=mask_img)
+ # The projection values for the rotated image should be close
+ # to the projection for the original image
+ diff = np.abs(proj_1 - proj_2) / np.abs(proj_1)
+ assert_true(np.mean(diff[diff < np.inf]) < .03)
+ img_4d = image.concat_imgs([img, img])
+ proj_4d = vol_to_surf(
+ img_4d, mesh, kind=kind, interpolation=interpolation,
+ mask_img=mask_img)
+ nodes, _ = surface.load_surf_mesh(mesh)
+ assert_array_equal(proj_4d.shape, [nodes.shape[0], 2])
+ assert_array_almost_equal(proj_4d[:, 0], proj_1, 3)
+
+
+def test_check_mesh_and_data():
+ mesh = _generate_surf()
+ data = mesh[0][:, 0]
+ m, d = surface.check_mesh_and_data(mesh, data)
+ assert (m[0] == mesh[0]).all()
+ assert (m[1] == mesh[1]).all()
+ assert (d == data).all()
+ data = mesh[0][::2, 0]
+ assert_raises(ValueError, surface.check_mesh_and_data, mesh, data)
diff --git a/nilearn/tests/test_cache_mixin.py b/nilearn/tests/test_cache_mixin.py
index ec0e2c70a2..0578c60a68 100644
--- a/nilearn/tests/test_cache_mixin.py
+++ b/nilearn/tests/test_cache_mixin.py
@@ -1,18 +1,21 @@
"""
Test the _utils.cache_mixin module
"""
+import glob
+import json
import os
import shutil
import tempfile
-import json
-import glob
+from distutils.version import LooseVersion
+import sklearn
from nose.tools import assert_false, assert_true, assert_equal
-
from sklearn.externals.joblib import Memory
import nilearn
-from nilearn._utils import cache_mixin
+from nilearn._utils import cache_mixin, CacheMixin
+from nilearn._utils.testing import assert_raises_regex
+
def f(x):
@@ -20,6 +23,31 @@ def f(x):
return x
+def test_check_memory():
+ # Test if _check_memory returns a memory object with the cachedir equal to
+ # input path
+ try:
+ temp_dir = tempfile.mkdtemp()
+
+ mem_none = Memory(cachedir=None)
+ mem_temp = Memory(cachedir=temp_dir)
+
+ for mem in [None, mem_none]:
+ memory = cache_mixin._check_memory(mem, verbose=False)
+ assert_true(memory, Memory)
+ assert_equal(memory.cachedir, mem_none.cachedir)
+
+ for mem in [temp_dir, mem_temp]:
+ memory = cache_mixin._check_memory(mem, verbose=False)
+ assert_equal(memory.cachedir, mem_temp.cachedir)
+ assert_true(memory, Memory)
+
+ finally:
+ if os.path.exists(temp_dir):
+ shutil.rmtree(temp_dir)
+
+
+
def test__safe_cache_dir_creation():
# Test the _safe_cache function that is supposed to flush the
# cache if the nibabel version changes
@@ -87,3 +115,84 @@ def test_cache_memory_level():
assert_equal(len(glob.glob(job_glob)), 2)
cache_mixin.cache(f, mem)(3)
assert_equal(len(glob.glob(job_glob)), 3)
+
+
+class CacheMixinTest(CacheMixin):
+ """Dummy mock object that wraps a CacheMixin."""
+
+ def __init__(self, memory=None, memory_level=1):
+ self.memory = memory
+ self.memory_level = memory_level
+
+ def run(self):
+ self._cache(f)
+
+
+def test_cache_mixin_with_expand_user():
+ # Test the memory cache is correctly created when using ~.
+ cache_dir = "~/nilearn_data/test_cache"
+ expand_cache_dir = os.path.expanduser(cache_dir)
+ mixin_mock = CacheMixinTest(cache_dir)
+
+ try:
+ assert_false(os.path.exists(expand_cache_dir))
+ mixin_mock.run()
+ assert_true(os.path.exists(expand_cache_dir))
+ finally:
+ if os.path.exists(expand_cache_dir):
+ shutil.rmtree(expand_cache_dir)
+
+
+def test_cache_mixin_without_expand_user():
+ # Test the memory cache is correctly created when using ~.
+ cache_dir = "~/nilearn_data/test_cache"
+ expand_cache_dir = os.path.expanduser(cache_dir)
+ mixin_mock = CacheMixinTest(cache_dir)
+
+ try:
+ assert_false(os.path.exists(expand_cache_dir))
+ nilearn.EXPAND_PATH_WILDCARDS = False
+ assert_raises_regex(ValueError,
+ "Given cache path parent directory doesn't",
+ mixin_mock.run)
+ assert_false(os.path.exists(expand_cache_dir))
+ nilearn.EXPAND_PATH_WILDCARDS = True
+ finally:
+ if os.path.exists(expand_cache_dir):
+ shutil.rmtree(expand_cache_dir)
+
+
+def test_cache_mixin_wrong_dirs():
+ # Test the memory cache raises a ValueError when input base path doesn't
+ # exist.
+
+ for cache_dir in ("/bad_dir/cache",
+ "~/nilearn_data/tmp/test_cache"):
+ expand_cache_dir = os.path.expanduser(cache_dir)
+ mixin_mock = CacheMixinTest(cache_dir)
+
+ try:
+ assert_raises_regex(ValueError,
+ "Given cache path parent directory doesn't",
+ mixin_mock.run)
+ assert_false(os.path.exists(expand_cache_dir))
+ finally:
+ if os.path.exists(expand_cache_dir):
+ shutil.rmtree(expand_cache_dir)
+
+
+def test_cache_shelving():
+ try:
+ temp_dir = tempfile.mkdtemp()
+ job_glob = os.path.join(temp_dir, 'joblib', 'nilearn', 'tests',
+ 'test_cache_mixin', 'f', '*')
+ mem = Memory(cachedir=temp_dir, verbose=0)
+ res = cache_mixin.cache(f, mem, shelve=True)(2)
+ assert_equal(res.get(), 2)
+ assert_equal(len(glob.glob(job_glob)), 1)
+ res = cache_mixin.cache(f, mem, shelve=True)(2)
+ assert_equal(res.get(), 2)
+ assert_equal(len(glob.glob(job_glob)), 1)
+ finally:
+ del mem
+ shutil.rmtree(temp_dir, ignore_errors=True)
diff --git a/nilearn/tests/test_masking.py b/nilearn/tests/test_masking.py
index 5cd62095da..0ef8ba05d0 100644
--- a/nilearn/tests/test_masking.py
+++ b/nilearn/tests/test_masking.py
@@ -6,16 +6,23 @@
import numpy as np
from numpy.testing import assert_array_equal
-from nose.tools import assert_true, assert_false, assert_equal, \
- assert_raises
+from nose.tools import (
+ assert_true,
+ assert_false,
+ assert_equal,
+ assert_raises,
+ )
from nibabel import Nifti1Image
from nilearn import masking
from nilearn.masking import (compute_epi_mask, compute_multi_epi_mask,
- compute_background_mask, unmask, _unmask_3d,
- _unmask_4d, intersect_masks, MaskWarning)
+ compute_background_mask, compute_gray_matter_mask,
+ compute_multi_gray_matter_mask,
+ unmask, _unmask_3d, _unmask_4d, intersect_masks,
+ MaskWarning, _extrapolate_out_mask)
from nilearn._utils.testing import (write_tmp_imgs, assert_raises_regex)
+from nilearn._utils.testing import assert_warns
from nilearn._utils.exceptions import DimensionError
from nilearn.input_data import NiftiMasker
@@ -23,7 +30,11 @@
else np.version.short_version)
np_version = distutils.version.LooseVersion(np_version).version
+_TEST_DIM_ERROR_MSG = ("Input data has incompatible dimensionality: "
+ "Expected dimension is 3D and you provided "
+ "a %s image")
+
def test_compute_epi_mask():
mean_image = np.ones((9, 9, 3))
mean_image[3:-2, 3:-2, :] = 10
@@ -93,6 +104,28 @@ def test_compute_background_mask():
assert_true(isinstance(w[0].message, masking.MaskWarning))
+def test_compute_gray_matter_mask():
+ image = Nifti1Image(np.ones((9, 9, 9)), np.eye(4))
+
+ mask = compute_gray_matter_mask(image, threshold=-1)
+ mask1 = np.zeros((9, 9, 9))
+ mask1[2:-2, 2:-2, 2:-2] = 1
+
+ np.testing.assert_array_equal(mask1, mask.get_data())
+
+ # Check that we get a useful warning for empty masks
+ assert_warns(masking.MaskWarning, compute_gray_matter_mask, image, threshold=1)
+
+ # Check that masks obtained from same FOV are the same
+ img1 = Nifti1Image(np.full((9, 9, 9), np.random.rand()), np.eye(4))
+ img2 = Nifti1Image(np.full((9, 9, 9), np.random.rand()), np.eye(4))
+
+ mask_img1 = compute_gray_matter_mask(img1)
+ mask_img2 = compute_gray_matter_mask(img2)
+ np.testing.assert_array_equal(mask_img1.get_data(),
+ mask_img2.get_data())
+
+
def test_apply_mask():
""" Test smoothing of timeseries extraction
"""
@@ -133,7 +166,7 @@ def test_apply_mask():
# veriy that 4D masks are rejected
mask_img_4d = Nifti1Image(np.ones((40, 40, 40, 2)), np.eye(4))
- assert_raises_regex(DimensionError, "Data must be a 3D",
+ assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG % "4D",
masking.apply_mask, data_img, mask_img_4d)
# Check that 3D data is accepted
@@ -146,7 +179,7 @@ def test_apply_mask():
assert_equal(sorted(data_3d.tolist()), [3., 4., 12.])
# Check data shape and affine
- assert_raises_regex(DimensionError, "Data must be a 3D",
+ assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG % "2D",
masking.apply_mask, data_img,
Nifti1Image(mask[20, ...], affine))
assert_raises(ValueError, masking.apply_mask,
@@ -175,10 +208,10 @@ def test_unmask():
masked4D = data4D[mask, :].T
unmasked4D = data4D.copy()
- unmasked4D[-mask, :] = 0
+ unmasked4D[np.logical_not(mask), :] = 0
masked3D = data3D[mask]
unmasked3D = data3D.copy()
- unmasked3D[-mask] = 0
+ unmasked3D[np.logical_not(mask)] = 0
# 4D Test, test value ordering at the same time.
t = unmask(masked4D, mask_img, order="C").get_data()
@@ -321,6 +354,17 @@ def test_intersect_masks():
mask_ab[2, 2] = 1
mask_ab_ = intersect_masks([mask_a_img, mask_b_img], threshold=1.)
assert_array_equal(mask_ab, mask_ab_.get_data())
+ # Test intersect mask images with '>f8'. This function uses
+ # largest_connected_component to check if intersect_masks passes with
+ # connected=True (which is by default)
+ mask_a_img_change_dtype = Nifti1Image(mask_a_img.get_data().astype('>f8'),
+ affine=mask_a_img.affine)
+ mask_b_img_change_dtype = Nifti1Image(mask_b_img.get_data().astype('>f8'),
+ affine=mask_b_img.affine)
+ mask_ab_change_type = intersect_masks([mask_a_img_change_dtype,
+ mask_b_img_change_dtype],
+ threshold=1.)
+ assert_array_equal(mask_ab, mask_ab_change_type.get_data())
mask_abc = mask_a + mask_b + mask_c
mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img],
@@ -369,6 +413,26 @@ def test_compute_multi_epi_mask():
assert_array_equal(mask_ab, mask_ab_.get_data())
+def test_compute_multi_gray_matter_mask():
+ assert_raises(TypeError, compute_multi_gray_matter_mask, [])
+
+ # Check error raised if images with different shapes are given as input
+ imgs = [Nifti1Image(np.ones((9, 9, 9)), np.eye(4)),
+ Nifti1Image(np.ones((9, 9, 8)), np.eye(4))]
+ assert_raises(ValueError, compute_multi_gray_matter_mask, imgs)
+
+ # Check results are the same if affine is the same
+ imgs1 = [Nifti1Image(np.random.randn(9, 9, 9), np.eye(4)),
+ Nifti1Image(np.random.randn(9, 9, 9), np.eye(4))]
+ mask1 = compute_multi_gray_matter_mask(imgs1)
+
+ imgs2 = [Nifti1Image(np.random.randn(9, 9, 9), np.eye(4)),
+ Nifti1Image(np.random.randn(9, 9, 9), np.eye(4))]
+ mask2 = compute_multi_gray_matter_mask(imgs2)
+
+ assert_array_equal(mask1.get_data(), mask2.get_data())
+
+
def test_error_shape(random_state=42, shape=(3, 5, 7, 11)):
# open-ended `if .. elif` in masking.unmask
@@ -397,3 +461,94 @@ def test_nifti_masker_empty_mask_warning():
ValueError,
"The mask is invalid as it is empty: it masks all data",
NiftiMasker(mask_strategy="epi").fit_transform, X)
+
+
+def test_unmask_list(random_state=42):
+ rng = np.random.RandomState(random_state)
+ shape = (3, 4, 5)
+ affine = np.eye(4)
+ mask_data = (rng.rand(*shape) < .5)
+ mask_img = Nifti1Image(mask_data.astype(np.uint8), affine)
+ a = unmask(mask_data[mask_data], mask_img)
+ b = unmask(mask_data[mask_data].tolist(), mask_img) # shouldn't crash
+ assert_array_equal(a.get_data(), b.get_data())
+
+
+def test__extrapolate_out_mask():
+ # Input data:
+ initial_data = np.zeros((5,5,5))
+ initial_data[1,2,2] = 1
+ initial_data[2,1,2] = 2
+ initial_data[2,2,1] = 3
+ initial_data[3,2,2] = 4
+ initial_data[2,3,2] = 5
+ initial_data[2,2,3] = 6
+ initial_mask = initial_data.copy() != 0
+
+ # Expected result
+ target_data = np.array([[[0. , 0. , 0. , 0. , 0. ],
+ [0. , 0. , 0. , 0. , 0. ],
+ [0. , 0. , 1. , 0. , 0. ],
+ [0. , 0. , 0. , 0. , 0. ],
+ [0. , 0. , 0. , 0. , 0. ]],
+
+ [[0. , 0. , 0. , 0. , 0. ],
+ [0. , 0. , 1.5, 0. , 0. ],
+ [0. , 2. , 1. , 3.5, 0. ],
+ [0. , 0. , 3. , 0. , 0. ],
+ [0. , 0. , 0. , 0. , 0. ]],
+
+ [[0. , 0. , 2. , 0. , 0. ],
+ [0. , 2.5, 2. , 4. , 0. ],
+ [3. , 3. , 3.5, 6. , 6. ],
+ [0. , 4. , 5. , 5.5, 0. ],
+ [0. , 0. , 5. , 0. , 0. ]],
+
+ [[0. , 0. , 0. , 0. , 0. ],
+ [0. , 0. , 3. , 0. , 0. ],
+ [0. , 3.5, 4. , 5. , 0. ],
+ [0. , 0. , 4.5, 0. , 0. ],
+ [0. , 0. , 0. , 0. , 0. ]],
+
+ [[0. , 0. , 0. , 0. , 0. ],
+ [0. , 0. , 0. , 0. , 0. ],
+ [0. , 0. , 4. , 0. , 0. ],
+ [0. , 0. , 0. , 0. , 0. ],
+ [0. , 0. , 0. , 0. , 0. ]]])
+ target_mask = np.array([[[False, False, False, False, False],
+ [False, False, False, False, False],
+ [False, False, True, False, False],
+ [False, False, False, False, False],
+ [False, False, False, False, False]],
+
+ [[False, False, False, False, False],
+ [False, False, True, False, False],
+ [False, True, True, True, False],
+ [False, False, True, False, False],
+ [False, False, False, False, False]],
+
+ [[False, False, True, False, False],
+ [False, True, True, True, False],
+ [ True, True, True, True, True],
+ [False, True, True, True, False],
+ [False, False, True, False, False]],
+
+ [[False, False, False, False, False],
+ [False, False, True, False, False],
+ [False, True, True, True, False],
+ [False, False, True, False, False],
+ [False, False, False, False, False]],
+
+ [[False, False, False, False, False],
+ [False, False, False, False, False],
+ [False, False, True, False, False],
+ [False, False, False, False, False],
+ [False, False, False, False, False]]])
+
+
+ # Test:
+ extrapolated_data, extrapolated_mask = _extrapolate_out_mask(initial_data,
+ initial_mask,
+ iterations=1)
+ assert_array_equal(extrapolated_data, target_data)
+ assert_array_equal(extrapolated_mask, target_mask)
diff --git a/nilearn/tests/test_ndimage.py b/nilearn/tests/test_ndimage.py
index 6ad18d8c49..6f35b2f41c 100644
--- a/nilearn/tests/test_ndimage.py
+++ b/nilearn/tests/test_ndimage.py
@@ -3,12 +3,12 @@
This test file is in nilearn/tests because nosetests ignores modules whose
name starts with an underscore
"""
-from scipy import ndimage
from nose.tools import assert_raises
-
import numpy as np
-from nilearn._utils.ndimage import largest_connected_component, _peak_local_max
+from nilearn._utils.ndimage import (largest_connected_component,
+ _peak_local_max)
+from nilearn._utils import testing
def test_largest_cc():
@@ -18,9 +18,23 @@ def test_largest_cc():
assert_raises(ValueError, largest_connected_component, a)
a[1:3, 1:3, 1:3] = 1
np.testing.assert_equal(a, largest_connected_component(a))
+ # A simple test with non-native dtype
+ a_change_type = a.astype('>f8')
+ np.testing.assert_equal(a, largest_connected_component(a_change_type))
+
b = a.copy()
b[5, 5, 5] = 1
np.testing.assert_equal(a, largest_connected_component(b))
+ # A simple test with non-native dtype
+ b_change_type = b.astype('>f8')
+ np.testing.assert_equal(a, largest_connected_component(b_change_type))
+
+ # Tests for correct errors, when an image or string are passed.
+ img = testing.generate_labeled_regions(shape=(10, 11, 12),
+ n_regions=2)
+
+ assert_raises(ValueError, largest_connected_component, img)
+ assert_raises(ValueError, largest_connected_component, "Test String")
def test_empty_peak_local_max():
diff --git a/nilearn/tests/test_niimg.py b/nilearn/tests/test_niimg.py
index ea081b29db..4c63c3f50e 100644
--- a/nilearn/tests/test_niimg.py
+++ b/nilearn/tests/test_niimg.py
@@ -11,7 +11,6 @@
from nilearn._utils.testing import assert_raises_regex
-
currdir = os.path.dirname(os.path.abspath(__file__))
@@ -23,7 +22,7 @@ def test_copy_img():
def test_copy_img_side_effect():
img1 = Nifti1Image(np.ones((2, 2, 2, 2)), affine=np.eye(4))
hash1 = joblib.hash(img1)
- img2 = niimg.copy_img(img1)
+ niimg.copy_img(img1)
hash2 = joblib.hash(img1)
assert_equal(hash1, hash2)
@@ -31,6 +30,21 @@ def test_copy_img_side_effect():
def test_new_img_like_side_effect():
img1 = Nifti1Image(np.ones((2, 2, 2, 2)), affine=np.eye(4))
hash1 = joblib.hash(img1)
- img2 = new_img_like(img1, np.ones((2, 2, 2, 2)), img1.get_affine().copy(), copy_header=True)
+ new_img_like(img1, np.ones((2, 2, 2, 2)), img1.affine.copy(),
+ copy_header=True)
hash2 = joblib.hash(img1)
assert_equal(hash1, hash2)
+
+
+def test_get_target_dtype():
+ img = Nifti1Image(np.ones((2, 2, 2), dtype=np.float64), affine=np.eye(4))
+ assert_equal(img.get_data().dtype.kind, 'f')
+ dtype_kind_float = niimg._get_target_dtype(img.get_data().dtype,
+ target_dtype='auto')
+ assert_equal(dtype_kind_float, np.float32)
+
+ img2 = Nifti1Image(np.ones((2, 2, 2), dtype=np.int64), affine=np.eye(4))
+ assert_equal(img2.get_data().dtype.kind, 'i')
+ dtype_kind_int = niimg._get_target_dtype(img2.get_data().dtype,
+ target_dtype='auto')
+ assert_equal(dtype_kind_int, np.int32)
diff --git a/nilearn/tests/test_niimg_conversions.py b/nilearn/tests/test_niimg_conversions.py
index 4fe56b67d9..e359915622 100644
--- a/nilearn/tests/test_niimg_conversions.py
+++ b/nilearn/tests/test_niimg_conversions.py
@@ -24,6 +24,8 @@
from nilearn._utils.exceptions import DimensionError
from nilearn._utils import testing, niimg_conversions
from nilearn._utils.testing import assert_raises_regex
+from nilearn._utils.testing import with_memory_profiler
+from nilearn._utils.testing import assert_memory_less_than
from nilearn._utils.niimg_conversions import _iter_check_niimg
@@ -98,7 +100,10 @@ def test_check_niimg_3d():
# Test dimensionality error
img = Nifti1Image(np.zeros((10, 10, 10)), np.eye(4))
- assert_raises_regex(TypeError, 'Data must be a 3D',
+ assert_raises_regex(TypeError,
+ "Input data has incompatible dimensionality: "
+ "Expected dimension is 3D and you provided a list "
+ "of 3D images \(4D\).",
_utils.check_niimg_3d, [img, img])
# Check that a filename does not raise an error
@@ -109,6 +114,10 @@ def test_check_niimg_3d():
with testing.write_tmp_imgs(data_img, create_files=True) as filename:
_utils.check_niimg_3d(filename)
+ # check data dtype equal with dtype='auto'
+ img_check = _utils.check_niimg_3d(img, dtype='auto')
+ assert_equal(img.get_data().dtype.kind, img_check.get_data().dtype.kind)
+
def test_check_niimg_4d():
assert_raises_regex(TypeError, 'nibabel format',
@@ -123,11 +132,11 @@ def test_check_niimg_4d():
# Tests with return_iterator=False
img_4d_1 = _utils.check_niimg_4d([img_3d, img_3d])
assert_true(img_4d_1.get_data().shape == (10, 10, 10, 2))
- assert_array_equal(img_4d_1.get_affine(), affine)
+ assert_array_equal(img_4d_1.affine, affine)
img_4d_2 = _utils.check_niimg_4d(img_4d_1)
assert_array_equal(img_4d_2.get_data(), img_4d_2.get_data())
- assert_array_equal(img_4d_2.get_affine(), img_4d_2.get_affine())
+ assert_array_equal(img_4d_2.affine, img_4d_2.affine)
# Tests with return_iterator=True
img_3d_iterator = _utils.check_niimg_4d([img_3d, img_3d],
@@ -142,7 +151,7 @@ def test_check_niimg_4d():
for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2):
assert_true(img_1.get_data().shape == (10, 10, 10))
assert_array_equal(img_1.get_data(), img_2.get_data())
- assert_array_equal(img_1.get_affine(), img_2.get_affine())
+ assert_array_equal(img_1.affine, img_2.affine)
img_3d_iterator_1 = _utils.check_niimg_4d([img_3d, img_3d],
return_iterator=True)
@@ -151,12 +160,14 @@ def test_check_niimg_4d():
for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2):
assert_true(img_1.get_data().shape == (10, 10, 10))
assert_array_equal(img_1.get_data(), img_2.get_data())
- assert_array_equal(img_1.get_affine(), img_2.get_affine())
+ assert_array_equal(img_1.affine, img_2.affine)
# This should raise an error: a 3D img is given and we want a 4D
- assert_raises_regex(DimensionError, 'Data must be a 4D Niimg-like object '
- 'but you provided a 3D',
- _utils.check_niimg_4d, img_3d)
+ assert_raises_regex(DimensionError,
+ "Input data has incompatible dimensionality: "
+ "Expected dimension is 4D and you provided a "
+ "3D image.",
+ _utils.check_niimg_4d, img_3d)
# Test a Niimg-like object that does not hold a shape attribute
phony_img = PhonyNiimage()
@@ -185,13 +196,24 @@ def test_check_niimg():
assert_raises_regex(
DimensionError,
- 'Data must be a 2D Niimg-like object but you provided a list of list '
- 'of list of 3D images.', _utils.check_niimg, img_3_3d, ensure_ndim=2)
+ "Input data has incompatible dimensionality: "
+ "Expected dimension is 2D and you provided "
+ "a list of list of list of 3D images \(6D\)",
+ _utils.check_niimg, img_3_3d, ensure_ndim=2)
assert_raises_regex(
DimensionError,
- 'Data must be a 4D Niimg-like object but you provided a list of list '
- 'of 4D images.', _utils.check_niimg, img_2_4d, ensure_ndim=4)
+ "Input data has incompatible dimensionality: "
+ "Expected dimension is 4D and you provided "
+ "a list of list of 4D images \(6D\)",
+ _utils.check_niimg, img_2_4d, ensure_ndim=4)
+
+ # check data dtype equal with dtype='auto'
+ img_3d_check = _utils.check_niimg(img_3d, dtype='auto')
+ assert_equal(img_3d.get_data().dtype.kind, img_3d_check.get_data().dtype.kind)
+
+ img_4d_check = _utils.check_niimg(img_4d, dtype='auto')
+ assert_equal(img_4d.get_data().dtype.kind, img_4d_check.get_data().dtype.kind)
def test_check_niimg_wildcards():
@@ -341,6 +363,23 @@ def test_iter_check_niimgs():
_utils.check_niimg(img_2_4d).get_data())
+def _check_memory(list_img_3d):
+ # We intentionally add an offset of memory usage to avoid non trustable
+ # measures with memory_profiler.
+ mem_offset = b'a' * 100 * 1024 ** 2
+ list(_iter_check_niimg(list_img_3d))
+ return mem_offset
+
+
+@with_memory_profiler
+def test_iter_check_niimgs_memory():
+ # Verify that iterating over a list of images doesn't consume extra
+ # memory.
+ assert_memory_less_than(100, 0.1, _check_memory,
+ [Nifti1Image(np.ones((100, 100, 200)), np.eye(4))
+ for i in range(10)])
+
+
def test_repr_niimgs():
# Test with file path
assert_equal(_utils._repr_niimgs("test"), "test")
@@ -383,15 +422,16 @@ def test_concat_niimgs():
# Regression test for #601. Dimensionality of first image was not checked
# properly
- assert_raises_regex(DimensionError, 'Data must be a 4D Niimg-like object but '
- 'you provided',
+ _dimension_error_msg = ("Input data has incompatible dimensionality: "
+ "Expected dimension is 4D and you provided "
+ "a list of 4D images \(5D\)")
+ assert_raises_regex(DimensionError, _dimension_error_msg,
_utils.concat_niimgs, [img4d], ensure_ndim=4)
# check basic concatenation with equal shape/affine
concatenated = _utils.concat_niimgs((img1, img3, img1))
- assert_raises_regex(DimensionError, 'Data must be a 4D Niimg-like object but '
- 'you provided',
+ assert_raises_regex(DimensionError, _dimension_error_msg,
_utils.concat_niimgs, [img1, img4d])
# smoke-test auto_resample
@@ -405,12 +445,13 @@ def test_concat_niimgs():
auto_resample=False)
# test list of 4D niimgs as input
- tmpimg1 = tempfile.mktemp(suffix='.nii')
- tmpimg2 = tempfile.mktemp(suffix='.nii')
+ tempdir = tempfile.mkdtemp()
+ tmpimg1 = os.path.join(tempdir, '1.nii')
+ tmpimg2 = os.path.join(tempdir, '2.nii')
try:
nibabel.save(img1, tmpimg1)
nibabel.save(img3, tmpimg2)
- concatenated = _utils.concat_niimgs([tmpimg1, tmpimg2])
+ concatenated = _utils.concat_niimgs(os.path.join(tempdir, '*'))
assert_array_equal(
concatenated.get_data()[..., 0], img1.get_data())
assert_array_equal(
@@ -418,6 +459,8 @@ def test_concat_niimgs():
finally:
_remove_if_exists(tmpimg1)
_remove_if_exists(tmpimg2)
+ if os.path.exists(tempdir):
+ os.removedirs(tempdir)
img5d = Nifti1Image(np.ones((2, 2, 2, 2, 2)), affine)
assert_raises_regex(TypeError, 'Concatenated images must be 3D or 4D. '
@@ -425,6 +468,17 @@ def test_concat_niimgs():
[img5d, img5d])
+def test_concat_niimg_dtype():
+ shape = [2, 3, 4]
+ vols = [nibabel.Nifti1Image(
+ np.zeros(shape + [n_scans]).astype(np.int16), np.eye(4))
+ for n_scans in [1, 5]]
+ nimg = _utils.concat_niimgs(vols)
+ assert_equal(nimg.get_data().dtype, np.float32)
+ nimg = _utils.concat_niimgs(vols, dtype=None)
+ assert_equal(nimg.get_data().dtype, np.int16)
+
+
def nifti_generator(buffer):
for i in range(10):
buffer.append(Nifti1Image(np.random.random((10, 10, 10)), np.eye(4)))
diff --git a/nilearn/tests/test_param_validation.py b/nilearn/tests/test_param_validation.py
index f44d6a8b07..05efd3ee01 100644
--- a/nilearn/tests/test_param_validation.py
+++ b/nilearn/tests/test_param_validation.py
@@ -2,15 +2,24 @@
Test the _utils.param_validation module
"""
-import warnings
import numpy as np
+import warnings
+import os
+import nibabel
-from nose.tools import assert_true, assert_equal
+from nose.tools import assert_equal, assert_true, assert_raises
+from sklearn.base import BaseEstimator
from nilearn._utils.testing import assert_raises_regex, assert_warns
from nilearn._utils.extmath import fast_abs_percentile
-from nilearn._utils.param_validation import check_threshold
+from nilearn._utils.param_validation import (MNI152_BRAIN_VOLUME,
+ _get_mask_volume,
+ check_feature_screening,
+ check_threshold)
+
+mni152_brain_mask = (
+ "/usr/share/fsl/data/standard/MNI152_T1_1mm_brain_mask.nii.gz")
def test_check_threshold():
@@ -48,10 +57,42 @@ def test_check_threshold():
threshold_numpy_scalar = np.float64(threshold)
assert_equal(
check_threshold(threshold, matrix, percentile_func=fast_abs_percentile),
- check_threshold(threshold_numpy_scalar, matrix, percentile_func=fast_abs_percentile))
+ check_threshold(threshold_numpy_scalar, matrix,
+ percentile_func=fast_abs_percentile))
# Test for threshold provided as a percentile of the data (str ending with a
# %)
assert_true(1. < check_threshold("50%", matrix,
percentile_func=fast_abs_percentile,
name=name) <= 2.)
+
+
+def test_get_mask_volume():
+ # Test that hard-coded standard mask volume can be corrected computed
+ if os.path.isfile(mni152_brain_mask):
+ assert_equal(MNI152_BRAIN_VOLUME, _get_mask_volume(nibabel.load(
+ mni152_brain_mask)))
+ else:
+ warnings.warn("Couldn't find %s (for testing)" % (mni152_brain_mask))
+
+
+def test_feature_screening():
+ # dummy
+ mask_img_data = np.zeros((182, 218, 182))
+ mask_img_data[30:-30, 30:-30, 30:-30] = 1
+ affine = np.eye(4)
+ mask_img = nibabel.Nifti1Image(mask_img_data, affine=affine)
+
+ for is_classif in [True, False]:
+ for screening_percentile in [100, None, 20, 101, -1, 10]:
+
+ if screening_percentile == 100 or screening_percentile is None:
+ assert_equal(check_feature_screening(
+ screening_percentile, mask_img, is_classif), None)
+ elif screening_percentile == 101 or screening_percentile == -1:
+ assert_raises(ValueError, check_feature_screening,
+ screening_percentile, mask_img, is_classif)
+ elif screening_percentile == 20:
+ assert_true(isinstance(check_feature_screening(
+ screening_percentile, mask_img, is_classif),
+ BaseEstimator))
diff --git a/nilearn/tests/test_segmentation.py b/nilearn/tests/test_segmentation.py
index 80d227847c..40a7e4988a 100644
--- a/nilearn/tests/test_segmentation.py
+++ b/nilearn/tests/test_segmentation.py
@@ -54,3 +54,20 @@ def test_bad_inputs():
labels[6, 8] = 5
np.testing.assert_raises(ValueError,
_random_walker, img, labels, spacing=(1,))
+
+
+def test_reorder_labels():
+ # When labels have non-consecutive integers, we make them consecutive
+ # by reordering them to make no gaps/differences between integers. We expect
+ # labels to be of same shape even if they are reordered.
+ # Issue #938, comment #14.
+ data = np.zeros((5, 5)) + 0.1 * np.random.randn(5, 5)
+ data[1:5, 1:5] = 1
+
+ labels = np.zeros_like(data)
+ labels[3, 3] = 1
+ labels[1, 4] = 4 # giving integer which is non-consecutive
+
+ labels = _random_walker(data, labels)
+ assert data.shape == labels.shape
+
diff --git a/nilearn/tests/test_signal.py b/nilearn/tests/test_signal.py
index 572d51b5b9..ea915b1cb1 100644
--- a/nilearn/tests/test_signal.py
+++ b/nilearn/tests/test_signal.py
@@ -8,6 +8,8 @@
import numpy as np
from nose.tools import assert_true, assert_false, assert_raises
+from sklearn.utils.testing import assert_less
+import nibabel
# Use nisignal here to avoid name collisions (using nilearn.signal is
# not possible)
@@ -23,7 +25,7 @@ def generate_signals(n_features=17, n_confounds=5, length=41,
All returned signals have no trends at all (to machine precision).
Parameters
- ==========
+ ----------
n_features, n_confounds : int, optional
respectively number of features to generate, and number of confounds
to use for generating noise signals.
@@ -39,7 +41,7 @@ def generate_signals(n_features=17, n_confounds=5, length=41,
gives the contiguousness of the output arrays.
Returns
- =======
+ -------
signals : numpy.ndarray, shape (length, n_features)
unperturbed signals.
@@ -83,12 +85,12 @@ def generate_trends(n_features=17, length=41):
"""Generate linearly-varying signals, with zero mean.
Parameters
- ==========
+ ----------
n_features, length : int
respectively number of signals and number of samples to generate.
Returns
- =======
+ -------
trends : numpy.ndarray, shape (length, n_features)
output signals, one per column.
"""
@@ -99,6 +101,15 @@ def generate_trends(n_features=17, length=41):
return trends * factors
+def generate_signals_plus_trends(n_features=17, n_samples=41):
+
+ signals, _, _ = generate_signals(n_features=n_features,
+ length=n_samples)
+ trends = generate_trends(n_features=n_features,
+ length=n_samples)
+ return signals + trends
+
+
def test_butterworth():
rand_gen = np.random.RandomState(0)
n_features = 20000
@@ -119,8 +130,9 @@ def test_butterworth():
np.testing.assert_almost_equal(data, data_original)
nisignal.butterworth(data, sampling,
low_pass=low_pass, high_pass=high_pass,
- copy=False, save_memory=True)
+ copy=False)
np.testing.assert_almost_equal(out_single, data)
+ np.testing.assert_(id(out_single) != id(data))
# multiple timeseries
data = rand_gen.randn(n_samples, n_features)
@@ -131,6 +143,8 @@ def test_butterworth():
low_pass=low_pass, high_pass=high_pass,
copy=True)
np.testing.assert_almost_equal(data, data_original)
+ np.testing.assert_(id(out1) != id(data_original))
+
# check that multiple- and single-timeseries filtering do the same thing.
np.testing.assert_almost_equal(out1[:, 0], out_single)
nisignal.butterworth(data, sampling,
@@ -146,6 +160,7 @@ def test_butterworth():
low_pass=80., # Greater than nyq frequency
copy=True)
np.testing.assert_almost_equal(out1, out2)
+ np.testing.assert_(id(out1) != id(out2))
def test_standardize():
@@ -215,6 +230,12 @@ def test_detrend():
np.testing.assert_array_equal(length_1_signal,
nisignal._detrend(length_1_signal))
+ # Mean removal on integers
+ detrended = nisignal._detrend(x.astype(np.int64), inplace=True,
+ type="constant")
+ assert_less(abs(detrended.mean(axis=0)).max(),
+ 20. * np.finfo(np.float).eps)
+
def test_mean_of_squares():
"""Test _mean_of_squares."""
@@ -243,6 +264,18 @@ def test_clean_detrending():
length=n_samples)
x = signals + trends
+ # if NANs, data out should be False with ensure_finite=True
+ y = signals + trends
+ y[20, 150] = np.nan
+ y[5, 500] = np.nan
+ y[15, 14] = np.inf
+ y = nisignal.clean(y, ensure_finite=True)
+ assert_true(np.any(np.isfinite(y)), True)
+
+ # test boolean is not given to signal.clean
+ assert_raises(TypeError, nisignal.clean, x, low_pass=False)
+ assert_raises(TypeError, nisignal.clean, x, high_pass=False)
+
# This should remove trends
x_detrended = nisignal.clean(x, standardize=False, detrend=True,
low_pass=None, high_pass=None)
@@ -254,6 +287,37 @@ def test_clean_detrending():
assert_false(abs(x_undetrended - signals).max() < 0.06)
+def test_clean_t_r():
+ """Different TRs must produce different results after filtering"""
+ rng = np.random.RandomState(0)
+ n_samples = 34
+ # n_features Must be higher than 500
+ n_features = 501
+ x_orig = generate_signals_plus_trends(n_features=n_features,
+ n_samples=n_samples)
+ random_tr_list1 = np.round(rng.rand(3) * 10, decimals=2)
+ random_tr_list2 = np.round(rng.rand(3) * 10, decimals=2)
+ for tr1, tr2 in zip(random_tr_list1, random_tr_list2):
+ low_pass_freq_list = tr1 * np.array([1.0 / 100, 1.0 / 110])
+ high_pass_freq_list = tr1 * np.array([1.0 / 210, 1.0 / 190])
+ for low_cutoff, high_cutoff in zip(low_pass_freq_list,
+ high_pass_freq_list):
+ det_one_tr = nisignal.clean(x_orig, t_r=tr1, low_pass=low_cutoff,
+ high_pass=high_cutoff)
+ det_diff_tr = nisignal.clean(x_orig, t_r=tr2, low_pass=low_cutoff,
+ high_pass=high_cutoff)
+
+ if not np.isclose(tr1, tr2, atol=0.3):
+ msg = ('results do not differ for different TRs: {} and {} '
+ 'at cutoffs: low_pass={}, high_pass={} '
+ 'n_samples={}, n_features={}'.format(
+ tr1, tr2, low_cutoff, high_cutoff,
+ n_samples, n_features))
+ np.testing.assert_(np.any(np.not_equal(det_one_tr, det_diff_tr)),
+ msg)
+ del det_one_tr, det_diff_tr
+
+
def test_clean_frequencies():
sx1 = np.sin(np.linspace(0, 100, 2000))
sx2 = np.sin(np.linspace(0, 100, 2000))
@@ -349,9 +413,21 @@ def test_clean_confounds():
confounds=filename1)
assert_raises(TypeError, nisignal.clean, signals,
confounds=[None])
+ assert_raises(ValueError, nisignal.clean, signals, t_r=None,
+ low_pass=.01)
+
+ # Test without standardizing that constant parts of confounds are
+ # accounted for
+ np.testing.assert_almost_equal(nisignal.clean(np.ones((20, 2)),
+ standardize=False,
+ confounds=np.ones(20),
+ detrend=False,
+ ).mean(),
+ np.zeros((20, 2)))
def test_high_variance_confounds():
+
# C and F order might take different paths in the function. Check that the
# result is identical.
n_features = 1001
diff --git a/nilearn/tests/test_testing.py b/nilearn/tests/test_testing.py
index f7fb842514..446c8c62d3 100644
--- a/nilearn/tests/test_testing.py
+++ b/nilearn/tests/test_testing.py
@@ -1,10 +1,37 @@
import itertools
-
import numpy as np
from nose.tools import assert_equal, assert_raises
-from nilearn._utils.testing import generate_fake_fmri
+from nilearn._utils.testing import generate_fake_fmri, with_memory_profiler
+from nilearn._utils.testing import assert_memory_less_than, assert_raises_regex
+
+
+def create_object(size):
+ """Just create and return an object containing `size` bytes."""
+ mem_use = b'a' * size
+ return mem_use
+
+
+@with_memory_profiler
+def test_memory_usage():
+ # Valid measures (larger objects)
+ for mem in (500, 200):
+ assert_memory_less_than(mem, 0.1, create_object, mem * 1024 ** 2)
+
+ # Ensure an exception is raised with too small objects as
+ # memory_profiler can return non trustable memory measure in this case.
+ assert_raises_regex(ValueError,
+ "Memory profiler measured an untrustable memory",
+ assert_memory_less_than, 50, 0.1,
+ create_object, 25 * 1024 ** 2)
+
+ # Ensure ValueError is raised if memory used is above expected memory
+ # limit.
+ assert_raises_regex(ValueError,
+ "Memory consumption measured",
+ assert_memory_less_than, 100, 0.1,
+ create_object, 200 * 1024 ** 2)
def test_generate_fake_fmri():
diff --git a/nilearn/version.py b/nilearn/version.py
index 3a1475ed1b..52f21101dd 100644
--- a/nilearn/version.py
+++ b/nilearn/version.py
@@ -2,7 +2,7 @@
"""
nilearn version, required package versions, and utilities for checking
"""
-# Author: Loïc Estève, Ben Cipollini
+# Author: Loic Esteve, Ben Cipollini
# License: simplified BSD
# PEP0440 compatible formatted version, see:
@@ -21,32 +21,31 @@
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
-__version__ = '0.2.1'
+__version__ = '0.5.0a'
_NILEARN_INSTALL_MSG = 'See %s for installation information.' % (
'http://nilearn.github.io/introduction.html#installation')
# This is a tuple to preserve order, so that dependencies are checked
-# in some meaningful order (more => less 'core'). We avoid using
-# collections.OrderedDict to preserve Python 2.6 compatibility.
+# in some meaningful order (more => less 'core').
REQUIRED_MODULE_METADATA = (
('numpy', {
- 'min_version': '1.6.1',
+ 'min_version': '1.11',
'required_at_installation': True,
'install_info': _NILEARN_INSTALL_MSG}),
('scipy', {
- 'min_version': '0.9.0',
+ 'min_version': '0.17',
'required_at_installation': True,
'install_info': _NILEARN_INSTALL_MSG}),
('sklearn', {
- 'min_version': '0.13',
+ 'min_version': '0.18',
'required_at_installation': True,
'install_info': _NILEARN_INSTALL_MSG}),
('nibabel', {
- 'min_version': '1.1.0',
+ 'min_version': '2.0.2',
'required_at_installation': False}))
-OPTIONAL_MATPLOTLIB_MIN_VERSION = '1.1.1'
+OPTIONAL_MATPLOTLIB_MIN_VERSION = '1.5.1'
def _import_module_with_version_check(
@@ -64,6 +63,10 @@ def _import_module_with_version_check(
module_name,
install_info or 'Please install it properly to use nilearn.')
exc.args += (user_friendly_info,)
+ # Necessary for Python 3 because the repr/str of ImportError
+ # objects was changed in Python 3
+ if hasattr(exc, 'msg'):
+ exc.msg += '. ' + user_friendly_info
raise
# Avoid choking on modules with no __version__ attribute
diff --git a/setup.cfg b/setup.cfg
index 245652c083..48ddd82a97 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -16,3 +16,9 @@ ignore-files=(plot_.*.py|conf\.py)
[wheel]
universal=1
+
+[flake8]
+# For PEP8 error codes see
+# http://pep8.readthedocs.org/en/latest/intro.html#error-codes
+# E402: module level import not at top of file
+ignore=E402
diff --git a/setup.py b/setup.py
index 1a13dd27fa..e2c106980d 100755
--- a/setup.py
+++ b/setup.py
@@ -79,16 +79,21 @@ def is_installing():
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
],
packages=find_packages(),
package_data={'nilearn.datasets.data': ['*.nii.gz', '*.csv'],
+ 'nilearn.datasets.data.fsaverage5': ['*.gz'],
+ 'nilearn.surface.data': ['*.csv'],
+ 'nilearn.plotting.data.js': ['*.js'],
+ 'nilearn.plotting.data.html': ['*.html'],
'nilearn.plotting.glass_brain_files': ['*.json'],
'nilearn.tests.data': ['*'],
'nilearn.image.tests.data': ['*.mgz'],
+ 'nilearn.surface.tests.data': ['*.annot', '*.label'],
'nilearn.datasets.tests.data': ['*.*'],
'nilearn.datasets.description': ['*.rst']},
install_requires=install_requires,)