From aae508cda795d6e2587b6ffc525af9e6c0447152 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Tue, 1 Dec 2015 11:21:01 +0100 Subject: [PATCH 0001/1925] TRAVIS: flake8 diff in pull requests Add this as a separate entry in the Travis build matrix. For now this entry is marked as "allowed_failures". * add flake8_diff.sh that finds the common ancestor between the branch and upstream/master (or origin/master) and flake8 the diff with respect to this common ancestor * general .travis.yml refactor to follow more closely the Travis-CI docs. * move each build step into its own script * additional SKIP_TESTS environment variable to be able to skip the tests. This is useful when you only want to do the flake8 --diff part * Ignore 'module level import not at top of file' in setup.cfg, because we have plenty of those in our examples. --- .travis.yml | 80 ++++++++++++------------- continuous_integration/after_success.sh | 13 ++++ continuous_integration/flake8_diff.sh | 35 +++++++++++ continuous_integration/install.sh | 8 ++- continuous_integration/test_script.sh | 21 +++++++ setup.cfg | 3 + 6 files changed, 115 insertions(+), 45 deletions(-) create mode 100755 continuous_integration/after_success.sh create mode 100755 continuous_integration/flake8_diff.sh create mode 100755 continuous_integration/test_script.sh diff --git a/.travis.yml b/.travis.yml index 2057fe42d2..f966d3fbba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,54 +6,48 @@ virtualenv: env: global: - TEST_RUN_FOLDER="/tmp" # folder where the tests are run from - matrix: + +matrix: + # Do not wait for the allowed_failures entry to finish before + # setting the status + fast_finish: true + allow_failures: + # allow_failures seems to be keyed on the python version + - python: 2.7 + include: # Ubuntu 14.04 versions - - DISTRIB="conda" PYTHON_VERSION="2.7" - NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.13.3" - SCIKIT_LEARN_VERSION="0.14.1" MATPLOTLIB_VERSION="1.3.1" + - env: DISTRIB="conda" PYTHON_VERSION="2.7" + NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.13.3" + SCIKIT_LEARN_VERSION="0.14.1" MATPLOTLIB_VERSION="1.3.1" # Ubuntu 14.04 versions without matplotlib - - DISTRIB="conda" PYTHON_VERSION="2.7" - NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.13.3" - SCIKIT_LEARN_VERSION="0.14.1" - - DISTRIB="neurodebian" PYTHON_VERSION="2.7" - # Trying to get as close to the minimum required versions while - # still having the package version available through conda - - DISTRIB="conda" PYTHON_VERSION="2.6" - NUMPY_VERSION="1.6.2" SCIPY_VERSION="0.11.0" - SCIKIT_LEARN_VERSION="0.13" MATPLOTLIB_VERSION="1.1.1" - NIBABEL_VERSION="1.1.0" + - env: DISTRIB="conda" PYTHON_VERSION="2.7" + NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.13.3" + SCIKIT_LEARN_VERSION="0.14.1" + # Neurodebian + - env: DISTRIB="neurodebian" PYTHON_VERSION="2.7" + # Trying to get as close to the minimum required versions while + # still having the package version available through conda + - env: DISTRIB="conda" PYTHON_VERSION="2.6" + NUMPY_VERSION="1.6.2" SCIPY_VERSION="0.11.0" + SCIKIT_LEARN_VERSION="0.13" MATPLOTLIB_VERSION="1.1.1" + NIBABEL_VERSION="1.1.0" # Python 3.4 with intermediary versions - - DISTRIB="conda" PYTHON_VERSION="3.4" - NUMPY_VERSION="1.8" SCIPY_VERSION="0.14" - SCIKIT_LEARN_VERSION="0.15" MATPLOTLIB_VERSION="1.4" + - env: DISTRIB="conda" PYTHON_VERSION="3.4" + NUMPY_VERSION="1.8" SCIPY_VERSION="0.14" + SCIKIT_LEARN_VERSION="0.15" MATPLOTLIB_VERSION="1.4" # Most recent versions - - DISTRIB="conda" PYTHON_VERSION="3.5" - NUMPY_VERSION="*" SCIPY_VERSION="*" - SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" + - env: DISTRIB="conda" PYTHON_VERSION="3.5" + NUMPY_VERSION="*" SCIPY_VERSION="*" + SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" + # FLAKE8 linting on diff wrt common ancestor with upstream/master + # Note: the python value is only there to trigger allow_failures + - python: 2.7 + env: DISTRIB="conda" PYTHON_VERSION="2.7" FLAKE8_VERSION="*" SKIP_TESTS="true" -install: - - source continuous_integration/install.sh +install: source continuous_integration/install.sh -before_script: - - make clean +before_script: make clean -script: - - python continuous_integration/show-python-packages-versions.py - # Copy setup.cfg to TEST_RUN_FOLDER where we are going to run the tests from - # Mainly for nose config settings - - cp setup.cfg "$TEST_RUN_FOLDER" - # We want to back out of the current working directory to make - # sure we are using nilearn installed in site-packages rather - # than the one from the current working directory - # Parentheses (run in a subshell) are used to leave - # the current directory unchanged - - (cd "$TEST_RUN_FOLDER" && make -f $OLDPWD/Makefile test-code) - - test "$MATPLOTLIB_VERSION" == "" || make test-doc +script: source continuous_integration/test_script.sh -after_success: - # Ignore coveralls failures as the coveralls server is not very reliable - # but we don't want travis to report a failure in the github UI just - # because the coverage report failed to be published. - # coveralls need to be run from the git checkout - # so we need to copy the coverage results from TEST_RUN_FOLDER - - if [[ "$COVERAGE" == "true" ]]; then cp "$TEST_RUN_FOLDER/.coverage" .; coveralls || echo "failed"; fi +after_success: source continuous_integration/after_success.sh diff --git a/continuous_integration/after_success.sh b/continuous_integration/after_success.sh new file mode 100755 index 0000000000..9eef6cde93 --- /dev/null +++ b/continuous_integration/after_success.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +set -e + +# Ignore coveralls failures as the coveralls server is not very reliable +# but we don't want travis to report a failure in the github UI just +# because the coverage report failed to be published. +# coveralls need to be run from the git checkout +# so we need to copy the coverage results from TEST_RUN_FOLDER +if [[ "$SKIP_TESTS" != "true" && "$COVERAGE" == "true" ]]; then + cp "$TEST_RUN_FOLDER/.coverage" . + coveralls || echo "Coveralls upload failed" +fi diff --git a/continuous_integration/flake8_diff.sh b/continuous_integration/flake8_diff.sh new file mode 100755 index 0000000000..732319afdc --- /dev/null +++ b/continuous_integration/flake8_diff.sh @@ -0,0 +1,35 @@ +#!/bin/sh + +set -e + +# Travis does the git clone with a limited depth (50 at the time of +# writing). This may not be enough to find the common ancestor with +# $REMOTE/master so we unshallow the git checkout +git fetch --unshallow || echo "Unshallowing the git checkout failed" + +# Tackle both common cases of origin and upstream as remote +# Note: upstream has priority if it exists +git remote -v +git remote | grep upstream && REMOTE=upstream || REMOTE=origin +git fetch -v $REMOTE master:remote_master + +# Find common ancestor between HEAD and remote_master +COMMIT=$(git merge-base @ remote_master) || \ + echo "No common ancestor found for $(git show @ -q) and $(git show remote_master -q)" + +if [ -z "$COMMIT" ]; then + # clean-up created branch + git branch -D remote_master + exit 1 +fi + +echo Common ancestor is: +git show $COMMIT --stat + +echo Running flake8 on the diff in the range\ + "$(git rev-parse --short $COMMIT)..$(git rev-parse --short @)" \ + "($(git rev-list $COMMIT.. | wc -l) commit(s)):" +git diff $COMMIT | flake8 --diff && echo -e "No problem detected by flake8\n" + +# clean-up created branch +git branch -D remote_master diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index 153cab922b..3798f079ba 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -35,7 +35,7 @@ print_conda_requirements() { # - for scikit-learn, SCIKIT_LEARN_VERSION is used TO_INSTALL_ALWAYS="pip nose" REQUIREMENTS="$TO_INSTALL_ALWAYS" - TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn" + TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn flake8" for PACKAGE in $TO_INSTALL_MAYBE; do # Capitalize package name and add _VERSION PACKAGE_VERSION_VARNAME="${PACKAGE^^}_VERSION" @@ -104,4 +104,8 @@ if [[ "$COVERAGE" == "true" ]]; then pip install coverage coveralls fi -python setup.py install +# numpy not installed when skipping the tests so we do not want to run +# setup.py install +if [[ "$SKIP_TESTS" != "true" ]]; then + python setup.py install +fi diff --git a/continuous_integration/test_script.sh b/continuous_integration/test_script.sh new file mode 100755 index 0000000000..1dfa2578d1 --- /dev/null +++ b/continuous_integration/test_script.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +set -e + +if [[ -n "$FLAKE8_VERSION" ]]; then + source continuous_integration/flake8_diff.sh +fi + +if [[ "$SKIP_TESTS" != "true" ]]; then + python continuous_integration/show-python-packages-versions.py + # Copy setup.cfg to TEST_RUN_FOLDER where we are going to run the tests from + # Mainly for nose config settings + cp setup.cfg "$TEST_RUN_FOLDER" + # We want to back out of the current working directory to make + # sure we are using nilearn installed in site-packages rather + # than the one from the current working directory + # Parentheses (run in a subshell) are used to leave + # the current directory unchanged + (cd "$TEST_RUN_FOLDER" && make -f $OLDPWD/Makefile test-code) + test "$MATPLOTLIB_VERSION" == "" || make test-doc +fi diff --git a/setup.cfg b/setup.cfg index 245652c083..b25c893f07 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,3 +16,6 @@ ignore-files=(plot_.*.py|conf\.py) [wheel] universal=1 + +[flake8] +ignore=E402 From d74c269b9618f12234ece97ff60d8b233d9cb7db Mon Sep 17 00:00:00 2001 From: Arthur Mensch Date: Sun, 13 Dec 2015 21:36:52 +0100 Subject: [PATCH 0002/1925] FIX: doc link to examples --- doc/connectivity/resting_state_networks.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/connectivity/resting_state_networks.rst b/doc/connectivity/resting_state_networks.rst index ab40765733..b3ce2208d9 100644 --- a/doc/connectivity/resting_state_networks.rst +++ b/doc/connectivity/resting_state_networks.rst @@ -136,17 +136,17 @@ Visualizing the results :start-after: # Visualize the results .. |left_img_decomp| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_001.png - :target: ../auto_examples/plot_compare_resting_state_decomposition.html + :target: ../auto_examples/connectivity/plot_compare_resting_state_decomposition.html :width: 50% .. |right_img_decomp| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_003.png - :target: ../auto_examples/plot_compare_resting_state_decomposition.html + :target: ../auto_examples/connectivity/plot_compare_resting_state_decomposition.html :width: 50% .. |left_img_decomp_single| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_002.png - :target: ../auto_examples/plot_compare_resting_state_decomposition.html + :target: ../auto_examples/connectivity/plot_compare_resting_state_decomposition.html :width: 50% .. |right_img_decomp_single| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_004.png - :target: ../auto_examples/plot_compare_resting_state_decomposition.html + :target: ../auto_examples/connectivity/plot_compare_resting_state_decomposition.html :width: 50% From 22cfc9e5c485c749a4e3eafd2380a65a683434aa Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Wed, 28 Oct 2015 09:38:38 +0100 Subject: [PATCH 0003/1925] New fetcher for Megatrawls Netmats, Tests, Example, Doc --- doc/modules/reference.rst | 1 + .../plot_visualize_megatrawls_netmats.py | 47 +++++ nilearn/datasets/__init__.py | 6 +- nilearn/datasets/func.py | 171 ++++++++++++++++++ nilearn/datasets/tests/test_func.py | 42 +++++ 5 files changed, 265 insertions(+), 2 deletions(-) create mode 100644 examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index 336d21e00c..1ab02544c4 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -80,6 +80,7 @@ uses. fetch_miyawaki2008 fetch_nyu_rest fetch_oasis_vbm + fetch_megatrawls_netmats .. _decoding_ref: diff --git a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py new file mode 100644 index 0000000000..10de864492 --- /dev/null +++ b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py @@ -0,0 +1,47 @@ +""" +Visualizing Megatrawls Network Matrices from Human Connectome Project +===================================================================== + +This example shows how to visualize network matrices fetched from +HCP beta-release of the Functional Connectivity Megatrawl + +For this, we need a fetcher named as `fetch_megatrawls_netmats` in +nilearn.datasets + +Please see related documentation for more details. +""" + + +def plot_mats(netmats, title): + plt.figure() + plt.title(title) + plt.imshow(netmats, interpolation="nearest") + plt.colorbar() + +import numpy as np +from nilearn import datasets +# Fetches the network matrices dimensionalities d=100 and d=300 for +# timeseries method ts3 +print(" -- Fetching Network Matrices -- ") +Znet1, Znet2 = datasets.fetch_megatrawls_netmats( + choice_dimensionality=['d100', 'd300'], choice_timeseries='ts3') + +# Converting netmats text files to numpy arrays +netmats1_d100 = np.genfromtxt(Znet1[0]) +netmats1_d300 = np.genfromtxt(Znet1[1]) + +netmats2_d100 = np.genfromtxt(Znet2[0]) +netmats2_d300 = np.genfromtxt(Znet2[1]) + +# Visualization +import matplotlib.pyplot as plt +print(" -- Showing the matrices -- ") +list_ = [netmats1_d100, netmats1_d300, netmats2_d100, netmats2_d300] +titles = ["Full Correlation matrices of dimensionality d=100", + "Full Correlation matrices of dimensionality d=300", + "Partial Correlation matrices of dimensionality d=100", + "Partial Correlation matrices of dimensionality d=300"] +for matrices, title_ in zip(list_, titles): + plot_mats(matrices, title_) + +plt.show() diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index fca6a88e7f..1c4fdc2b01 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -6,7 +6,8 @@ from .func import (fetch_haxby_simple, fetch_haxby, fetch_nyu_rest, fetch_adhd, fetch_miyawaki2008, fetch_localizer_contrasts, fetch_abide_pcp, - fetch_localizer_calculation_task, fetch_mixed_gambles) + fetch_localizer_calculation_task, fetch_mixed_gambles, + fetch_megatrawls_netmats) from .atlas import (fetch_atlas_craddock_2012, fetch_atlas_destrieux_2009, fetch_atlas_harvard_oxford, fetch_atlas_msdl, fetch_atlas_power_2011, fetch_atlas_smith_2009, @@ -19,4 +20,5 @@ 'fetch_atlas_craddock_2012', 'fetch_atlas_destrieux_2009', 'fetch_atlas_harvard_oxford', 'fetch_atlas_msdl', 'fetch_atlas_power_2011', 'fetch_atlas_smith_2009', - 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal'] + 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', + 'fetch_megatrawls_netmats'] diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 2ede2484b4..6b831c2167 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1279,3 +1279,174 @@ def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, X, y, mask_img = _load_mixed_gambles(map(nibabel.load, data.zmaps)) data.zmaps, data.gain, data.mask_img = X, y, mask_img return data + + +def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, + choice_timeseries=None, + resume=True, verbose=1): + """Downloads and fetches Network Matrices data from MegaTrawls release in HCP. + + This data can be used to predict relationships between imaging data (functional + connectivity) and non-imaging behavioural measures such as age, sex, education, etc. + The network matrices are estimated between 461 functional connectivity subjects. + + The network matrices denoted as 'netmats' are estimated using full correlation + denoted as 'Znet1' or partial correlation with limited L2 regularisation + denoted as 'Znet2'. + + .. versionadded:: 0.1.5 + + Parameters + ---------- + data_dir: string, default is None, optional + Path of the data directory. Used to force data storage in a specified + location. + + choice_dimensionality: a string or list of strings or None, Default is None + Possibile options are {'d25', 'd50', 'd100', 'd200', 'd300'} + 'd25' - Group ICA brain parcellations with dimensionality = 25 + 'd50' - Group ICA brain parcellations with dimensionality = 50 + 'd100' - Group ICA brain parcellations with dimensionality = 100 + 'd200' - Group ICA brain parcellations with dimensionality = 200 + 'd300' - Group ICA brain parcellations with dimensionality = 300 + + If chosen to be default, network matrices data estimated from all + dimensionalities ['d25', 'd50', 'd100', 'd200', 'd300'] of brian + parcellations are fetched. + + If chosen a string or list of strings as choices, network matrices + of particular choice of dimensionality of brain parcellations + are fetched. For example, if chosen as a string 'd25' only data + corresponding to d=25 is fetched. + + choice_timeseries: a string or list of strings or None, Default is None + Possible options are {'ts2', 'ts3'} + 'ts2' - choice denotes the timeseries signals which were extracted using + mutiple spatial regression. + 'ts3' - choice denoted the timeseries signals which were extracted using + eigen regression. + + If chosen to be default, network matrices data estimated using all + timeseries extraction method ['ts2', 'ts3'] are fetched. + + If chosen a string, network matrices estimated using particular type + of timeseries extraction method is fetched. + + resume: boolean, Default is True + This parameter is required if a partly downloaded file is needed to be + resumed to download again. + + verbose: int, Default is 1 + This parameter is used to set the verbosity level to print the message + to give information about the processing. + 0 indicates no information will be given. + + Returns + ------- + data: sklearn.datasets.base.Bunch + Dictionary-like object, attributes are: + + 'Znet1': Full correlation matrices + + 'Znet2': Partial correlation matrices + + References + ---------- + For more details: + Stephen Smith et al, HCP beta-release of the Functional Connectivity MegaTrawl. + April 2015 "HCP500-MegaTrawl" release. + + https://db.humanconnectome.org/megatrawl/ + + Disclaimer + ---------- + IMPORTANT: This is open access data. You must agree to Terms and conditions + of using this data before using it, + available at: http://humanconnectome.org/data/data-use-terms/open-access.html + Open Access Data (all imaging data and most of the behavioral data) + is available to those who register an account at ConnectomeDB and agree to + the Open Access Data Use Terms. This includes agreement to comply with + institutional rules and regulations. This means you may need the approval + of your IRB or Ethics Committee to use the data. The released HCP data are + not considered de-identified, since certain combinations of HCP Restricted + Data (available through a separate process) might allow identification of + individuals. Different national, state and local laws may apply and be + interpreted differently, so it is important that you consult with your IRB + or Ethics Committee before beginning your research. If needed and upon request, + the HCP will provide a certificate stating that you have accepted the + HCP Open Access Data Use Terms. Please note that everyone who works with + HCP open access data must review and agree to these terms, including those + who are accessing shared copies of this data. If you are sharing + HCP Open Access data, please advise your co-researchers that they must + register with ConnectomeDB and agree to these terms. + Register and sign the Open Access Data Use Terms at + ConnectomeDB: https://db.humanconnectome.org/ + """ + # Data is manually uploaded in NITRC + url = "https://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" + opts = {'uncompress': True} + + # dataset terms + dimensionalities = ['d25', 'd50', 'd100', 'd200', 'd300'] + timeseries_methods = ['ts2', 'ts3'] + + message = ("The %s you have given '%s' is invalid. Please choose either " + "of them %s or list of specific choices.") + + names = ['choice_dimensionality', 'choice_timeseries'] + inputs = [choice_dimensionality, choice_timeseries] + assign_names = ['dimensionalities', 'timeseries_methods'] + standard_inputs = [dimensionalities, timeseries_methods] + + for name_, input_, assign_, standard_ in zip(names, inputs, assign_names, + standard_inputs): + if input_ is not None: + if isinstance(input_, list): + for n_input_ in input_: + if n_input_ not in standard_: + raise ValueError(message % ( + name_, input_, str(standard_))) + if assign_ == 'dimensionalities': + dimensionalities = input_ + else: + timeseries_methods = input_ + elif not isinstance(input_, list): + if input_ not in standard_: + raise ValueError(message % ( + name_, input_, str(standard_))) + + if assign_ == 'dimensionalities': + dimensionalities = [input_] + else: + timeseries_methods = [input_] + + n_combinations = len(dimensionalities) * len(timeseries_methods) + dataset_name = 'Megatrawls' + + files_netmats1 = [] + files_netmats2 = [] + for dim_ in dimensionalities: + filename_ = os.path.join('3T_Q1-Q6related468_MSMsulc_' + str(dim_)) + for timeserie_ in timeseries_methods: + each_files_netmats1 = [(os.path.join( + filename_ + '_' + str(timeserie_), 'Znet1.txt'), url, opts)] + each_files_netmats2 = [(os.path.join( + filename_ + '_' + str(timeserie_), 'Znet2.txt'), url, opts)] + files_netmats1.append(each_files_netmats1) + files_netmats2.append(each_files_netmats2) + + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) + network_matrices1 = [] + network_matrices2 = [] + for n_ in range(n_combinations): + netmats1 = _fetch_files( + data_dir, files_netmats1[n_], resume=resume, verbose=verbose) + network_matrices1.extend(netmats1) + netmats2 = _fetch_files( + data_dir, files_netmats2[n_], resume=resume, verbose=verbose) + network_matrices2.extend(netmats2) + + Znet1 = network_matrices1 + Znet2 = network_matrices2 + + return Znet1, Znet2 diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index a866616040..b961cd0194 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -16,6 +16,7 @@ from . import test_utils as tst from nilearn.datasets import utils, func +from nilearn._utils.testing import assert_raises_regex from nilearn._utils.compat import _basestring, _urllib @@ -362,3 +363,44 @@ def test_fetch_mixed_gambles(): assert_equal(mgambles["zmaps"][0], os.path.join(datasetdir, "zmaps", "sub001_zmaps.nii.gz")) assert_equal(len(mgambles["zmaps"]), n_subjects) + + +def test_right_choices_dimensionality_timeseriesmethods(): + message = ("The %s you have given '%s' is invalid. ") + for dim_ in ['a10', 'd15', 'd30']: + assert_raises_regex(ValueError, + (message % ('choice_dimensionality', dim_)), + func.fetch_megatrawls_netmats, + choice_dimensionality=dim_) + + for timeserie_ in ['tt1', 'ts4', 'st2']: + assert_raises_regex(ValueError, + (message % ('choice_timeseries', timeserie_)), + func.fetch_megatrawls_netmats, + choice_timeseries=timeserie_) + + +@with_setup(setup_tmpdata, teardown_tmpdata) +def test_fetch_megatrawls_netmats(): + net1, net2 = func.fetch_megatrawls_netmats(data_dir=tmpdir) + dataset_name_path = os.path.join(tmpdir, 'Megatrawls') + + assert_equal(net1[0], os.path.join( + dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d25_ts2', 'Znet1.txt')) + + assert_equal(net1[9], os.path.join( + dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d300_ts3', 'Znet1.txt')) + + assert_equal(net2[5], os.path.join( + dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d100_ts3', 'Znet2.txt')) + + # test if number of possible combinations of output are correct + net_1, net_2 = func.fetch_megatrawls_netmats( + data_dir=tmpdir, choice_dimensionality=['d25', 'd200']) + + expected_n_combinations = len(['d25', 'd200']) * len(['ts2', 'ts3']) + n_output_combinations_net1 = len(net_1) + n_output_combinations_net2 = len(net_2) + + assert_equal(expected_n_combinations, n_output_combinations_net1) + assert_equal(expected_n_combinations, n_output_combinations_net2) From 9e4206f0800017959ae3acf8351e46c48536e264 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Wed, 28 Oct 2015 15:20:59 +0100 Subject: [PATCH 0004/1925] Addressed comments as description, test for description, Bunch return --- .../plot_visualize_megatrawls_netmats.py | 26 +++++---- nilearn/datasets/description/Megatrawls.rst | 55 +++++++++++++++++++ nilearn/datasets/func.py | 14 ++--- nilearn/datasets/tests/test_func.py | 16 +++--- 4 files changed, 85 insertions(+), 26 deletions(-) create mode 100644 nilearn/datasets/description/Megatrawls.rst diff --git a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py index 10de864492..8da66141ba 100644 --- a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py +++ b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py @@ -5,7 +5,7 @@ This example shows how to visualize network matrices fetched from HCP beta-release of the Functional Connectivity Megatrawl -For this, we need a fetcher named as `fetch_megatrawls_netmats` in +For this, we need a fetcher named as :func:`nilearn.datasets.fetch_megatrawls_netmats` in nilearn.datasets Please see related documentation for more details. @@ -23,25 +23,27 @@ def plot_mats(netmats, title): # Fetches the network matrices dimensionalities d=100 and d=300 for # timeseries method ts3 print(" -- Fetching Network Matrices -- ") -Znet1, Znet2 = datasets.fetch_megatrawls_netmats( +netmats = datasets.fetch_megatrawls_netmats( choice_dimensionality=['d100', 'd300'], choice_timeseries='ts3') # Converting netmats text files to numpy arrays -netmats1_d100 = np.genfromtxt(Znet1[0]) -netmats1_d300 = np.genfromtxt(Znet1[1]) +full_correlation_matrices_d100 = np.genfromtxt(netmats.FullCorrelation[0]) +full_correlation_matrices_d300 = np.genfromtxt(netmats.FullCorrelation[1]) -netmats2_d100 = np.genfromtxt(Znet2[0]) -netmats2_d300 = np.genfromtxt(Znet2[1]) +partial_correlation_matrices_d100 = np.genfromtxt(netmats.PartialCorrelation[0]) +partial_correlation_matrices_d300 = np.genfromtxt(netmats.PartialCorrelation[1]) # Visualization import matplotlib.pyplot as plt print(" -- Showing the matrices -- ") -list_ = [netmats1_d100, netmats1_d300, netmats2_d100, netmats2_d300] -titles = ["Full Correlation matrices of dimensionality d=100", - "Full Correlation matrices of dimensionality d=300", - "Partial Correlation matrices of dimensionality d=100", - "Partial Correlation matrices of dimensionality d=300"] -for matrices, title_ in zip(list_, titles): +list_ = { + 'Full Correlation matrices of dimensionality d=100': full_correlation_matrices_d100, + 'Full Correlation matrices of dimensionality d=300': full_correlation_matrices_d300, + 'Partial Correlation matrices of dimensionality d=100': partial_correlation_matrices_d100, + 'Partial Correlation matrices of dimensionality d=300': partial_correlation_matrices_d300 + } + +for title_, matrices in sorted(list_.items()): plot_mats(matrices, title_) plt.show() diff --git a/nilearn/datasets/description/Megatrawls.rst b/nilearn/datasets/description/Megatrawls.rst new file mode 100644 index 0000000000..f32be44dd4 --- /dev/null +++ b/nilearn/datasets/description/Megatrawls.rst @@ -0,0 +1,55 @@ +MegaTrawls Network Matrices HCP + + +Notes +----- +Network Matrices data of two sets, Full Correlation and Partial Correlation +matrices estimated by a timeseries signals extracted from a nodes of Group ICA +parcellations. In total, 461 functional connectivity datasets were used. + +Data is available for all dimensionalities (d=25, d=50, d=100, d=200, d=300) +of Group ICA and for all timeseries methods (ts2 and ts3) + +These matrices can be used to predict the relationships between subjects +functional connectivity and subjects behavioural measures. + + +Content +------- + :'Full Correlation': Text files containing full correlation values + :'Partial Correlation': Text files containing partial correlation values + + +References +---------- +For more information about technical details about predicting the measures: +Stephen Smith et al, HCP beta-release of the Functional Connectivity MegaTrawl. +April 2015 "HCP500-MegaTrawl" release. +https://db.humanconnectome.org/megatrawl/ + + +Disclaimer +---------- +IMPORTANT: This is open access data. You must agree to Terms and conditions +of using this data before using it, available at: +http://humanconnectome.org/data/data-use-terms/open-access.html + +Open Access Data (all imaging data and most of the behavioral data) +is available to those who register an account at ConnectomeDB and agree to +the Open Access Data Use Terms. This includes agreement to comply with +institutional rules and regulations. This means you may need the approval +of your IRB or Ethics Committee to use the data. The released HCP data are +not considered de-identified, since certain combinations of HCP Restricted +Data (available through a separate process) might allow identification of +individuals. Different national, state and local laws may apply and be +interpreted differently, so it is important that you consult with your IRB +or Ethics Committee before beginning your research. If needed and upon +request, the HCP will provide a certificate stating that you have accepted the +HCP Open Access Data Use Terms. Please note that everyone who works with HCP +open access data must review and agree to these terms, including those who are +accessing shared copies of this data. If you are sharing HCP Open Access data, +please advice your co-researchers that they must register with ConnectomeDB +and agree to these terms. + +Register and sign the Open Access Data Use Terms at +ConnectomeDB: https://db.humanconnectome.org/ diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 6b831c2167..aec23037c5 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1346,9 +1346,9 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, data: sklearn.datasets.base.Bunch Dictionary-like object, attributes are: - 'Znet1': Full correlation matrices + 'FullCorrelation': Full correlation matrices - 'Znet2': Partial correlation matrices + 'PartialCorrelation': Partial correlation matrices References ---------- @@ -1382,7 +1382,6 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, Register and sign the Open Access Data Use Terms at ConnectomeDB: https://db.humanconnectome.org/ """ - # Data is manually uploaded in NITRC url = "https://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" opts = {'uncompress': True} @@ -1436,6 +1435,8 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, files_netmats2.append(each_files_netmats2) data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) + description = _get_dataset_descr(dataset_name) + network_matrices1 = [] network_matrices2 = [] for n_ in range(n_combinations): @@ -1446,7 +1447,6 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, data_dir, files_netmats2[n_], resume=resume, verbose=verbose) network_matrices2.extend(netmats2) - Znet1 = network_matrices1 - Znet2 = network_matrices2 - - return Znet1, Znet2 + return Bunch(FullCorrelation=network_matrices1, + PartialCorrelation=network_matrices2, + description=description) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index b961cd0194..60f9d8934c 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -382,25 +382,27 @@ def test_right_choices_dimensionality_timeseriesmethods(): @with_setup(setup_tmpdata, teardown_tmpdata) def test_fetch_megatrawls_netmats(): - net1, net2 = func.fetch_megatrawls_netmats(data_dir=tmpdir) + correlations = func.fetch_megatrawls_netmats(data_dir=tmpdir) dataset_name_path = os.path.join(tmpdir, 'Megatrawls') - assert_equal(net1[0], os.path.join( + assert_equal(correlations.FullCorrelation[0], os.path.join( dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d25_ts2', 'Znet1.txt')) - assert_equal(net1[9], os.path.join( + assert_equal(correlations.FullCorrelation[9], os.path.join( dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d300_ts3', 'Znet1.txt')) - assert_equal(net2[5], os.path.join( + assert_equal(correlations.PartialCorrelation[5], os.path.join( dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d100_ts3', 'Znet2.txt')) # test if number of possible combinations of output are correct - net_1, net_2 = func.fetch_megatrawls_netmats( + correlation_ = func.fetch_megatrawls_netmats( data_dir=tmpdir, choice_dimensionality=['d25', 'd200']) expected_n_combinations = len(['d25', 'd200']) * len(['ts2', 'ts3']) - n_output_combinations_net1 = len(net_1) - n_output_combinations_net2 = len(net_2) + n_output_combinations_net1 = len(correlation_.FullCorrelation) + n_output_combinations_net2 = len(correlation_.PartialCorrelation) assert_equal(expected_n_combinations, n_output_combinations_net1) assert_equal(expected_n_combinations, n_output_combinations_net2) + + assert_not_equal(correlation_.description, '') From cdc48dd0fa3bfc90fbf5d88b13d76ecc5a72565d Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 3 Nov 2015 17:13:07 +0100 Subject: [PATCH 0005/1925] Addressed comments: references, _ removed, better names, etc --- .../plot_visualize_megatrawls_netmats.py | 42 +++--- nilearn/datasets/func.py | 124 ++++++++++-------- nilearn/datasets/tests/test_func.py | 22 ++-- 3 files changed, 103 insertions(+), 85 deletions(-) diff --git a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py index 8da66141ba..0fd918e508 100644 --- a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py +++ b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py @@ -5,21 +5,22 @@ This example shows how to visualize network matrices fetched from HCP beta-release of the Functional Connectivity Megatrawl -For this, we need a fetcher named as :func:`nilearn.datasets.fetch_megatrawls_netmats` in -nilearn.datasets - -Please see related documentation for more details. +See :func:`nilearn.datasets.fetch_megatrawls_netmats` documentation for more details. """ +import matplotlib.pyplot as plt +import numpy as np + +from nilearn import datasets +from nilearn import plotting def plot_mats(netmats, title): plt.figure() - plt.title(title) - plt.imshow(netmats, interpolation="nearest") + plt.imshow(netmats, interpolation="nearest", + cmap=plotting.cm.bwr) plt.colorbar() + plt.title(title) -import numpy as np -from nilearn import datasets # Fetches the network matrices dimensionalities d=100 and d=300 for # timeseries method ts3 print(" -- Fetching Network Matrices -- ") @@ -27,23 +28,22 @@ def plot_mats(netmats, title): choice_dimensionality=['d100', 'd300'], choice_timeseries='ts3') # Converting netmats text files to numpy arrays -full_correlation_matrices_d100 = np.genfromtxt(netmats.FullCorrelation[0]) -full_correlation_matrices_d300 = np.genfromtxt(netmats.FullCorrelation[1]) +full_correlation_matrices_d100 = np.genfromtxt(netmats.Fullcorrelation[0]) +full_correlation_matrices_d300 = np.genfromtxt(netmats.Fullcorrelation[1]) -partial_correlation_matrices_d100 = np.genfromtxt(netmats.PartialCorrelation[0]) -partial_correlation_matrices_d300 = np.genfromtxt(netmats.PartialCorrelation[1]) +partial_correlation_matrices_d100 = np.genfromtxt(netmats.Partialcorrelation[0]) +partial_correlation_matrices_d300 = np.genfromtxt(netmats.Partialcorrelation[1]) # Visualization -import matplotlib.pyplot as plt -print(" -- Showing the matrices -- ") -list_ = { - 'Full Correlation matrices of dimensionality d=100': full_correlation_matrices_d100, - 'Full Correlation matrices of dimensionality d=300': full_correlation_matrices_d300, - 'Partial Correlation matrices of dimensionality d=100': partial_correlation_matrices_d100, - 'Partial Correlation matrices of dimensionality d=300': partial_correlation_matrices_d300 +print(" -- Plotting correlation matrices -- ") +correlation_matrices = { + 'Full correlation matrices of dimensionality d=100': full_correlation_matrices_d100, + 'Full correlation matrices of dimensionality d=300': full_correlation_matrices_d300, + 'Partial correlation matrices of dimensionality d=100': partial_correlation_matrices_d100, + 'Partial correlation matrices of dimensionality d=300': partial_correlation_matrices_d300 } -for title_, matrices in sorted(list_.items()): - plot_mats(matrices, title_) +for title, matrices in sorted(correlation_matrices.items()): + plot_mats(matrices, title) plt.show() diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index aec23037c5..8188b86582 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -459,7 +459,7 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume, verbose=verbose)[0] - ## Load the csv file + # Load the csv file phenotypic = np.genfromtxt(phenotypic, names=True, delimiter=',', dtype=None) @@ -1288,11 +1288,12 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, This data can be used to predict relationships between imaging data (functional connectivity) and non-imaging behavioural measures such as age, sex, education, etc. - The network matrices are estimated between 461 functional connectivity subjects. + The network matrices are estimated from functional connectivity datasets of 461 + subjects [1]. Full technical details in [2]. The network matrices denoted as 'netmats' are estimated using full correlation denoted as 'Znet1' or partial correlation with limited L2 regularisation - denoted as 'Znet2'. + denoted as 'Znet2'. [1] .. versionadded:: 0.1.5 @@ -1302,41 +1303,45 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, Path of the data directory. Used to force data storage in a specified location. - choice_dimensionality: a string or list of strings or None, Default is None - Possibile options are {'d25', 'd50', 'd100', 'd200', 'd300'} + choice_dimensionality: a string or list of strings or None, default is None + Possible options are {'d25', 'd50', 'd100', 'd200', 'd300'} 'd25' - Group ICA brain parcellations with dimensionality = 25 'd50' - Group ICA brain parcellations with dimensionality = 50 'd100' - Group ICA brain parcellations with dimensionality = 100 'd200' - Group ICA brain parcellations with dimensionality = 200 'd300' - Group ICA brain parcellations with dimensionality = 300 - If chosen to be default, network matrices data estimated from all + By default, network matrices data estimated from all dimensionalities ['d25', 'd50', 'd100', 'd200', 'd300'] of brian parcellations are fetched. - If chosen a string or list of strings as choices, network matrices - of particular choice of dimensionality of brain parcellations + If given as string or list of specific strings, network matrices + related to that particular dimensionality of brain parcellations are fetched. For example, if chosen as a string 'd25' only data corresponding to d=25 is fetched. - choice_timeseries: a string or list of strings or None, Default is None + choice_timeseries: a string or list of strings or None, default is None Possible options are {'ts2', 'ts3'} - 'ts2' - choice denotes the timeseries signals which were extracted using - mutiple spatial regression. - 'ts3' - choice denoted the timeseries signals which were extracted using - eigen regression. - - If chosen to be default, network matrices data estimated using all - timeseries extraction method ['ts2', 'ts3'] are fetched. - - If chosen a string, network matrices estimated using particular type + 'ts2' - choice denotes the averaged timeseries signals which were extracted + using mutiple spatial regression, in which full set of ICA maps was + used as spatial regressors against the subjects datasets resulting + a subject specific timeseries signals. [3] + 'ts3' - choice denoted the principal eigen timeseries signals which were + extracted using multiple spatial regression, but the subject + specific timeseries signals are extracted using SVD. The first + eigen timeseries of each subject rather than simple averaging [4] [5]. + + By default, network matrices data estimated using both timeseries extraction + method ['ts2', 'ts3'] are fetched. + + If given a string, network matrices estimated using particular type of timeseries extraction method is fetched. - resume: boolean, Default is True + resume: boolean, default is True This parameter is required if a partly downloaded file is needed to be resumed to download again. - verbose: int, Default is 1 + verbose: int, default is 1 This parameter is used to set the verbosity level to print the message to give information about the processing. 0 indicates no information will be given. @@ -1346,18 +1351,31 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, data: sklearn.datasets.base.Bunch Dictionary-like object, attributes are: - 'FullCorrelation': Full correlation matrices + 'Fullcorrelation': Full correlation matrices - 'PartialCorrelation': Partial correlation matrices + 'Partialcorrelation': Partial correlation matrices References ---------- For more details: - Stephen Smith et al, HCP beta-release of the Functional Connectivity MegaTrawl. + [1] Stephen Smith et al, HCP beta-release of the Functional Connectivity MegaTrawl. April 2015 "HCP500-MegaTrawl" release. - https://db.humanconnectome.org/megatrawl/ + Technical details: + [2] Smith, S.M. et al. Nat. Neurosci. 18, 1565-1567 (2015). + + [3] N.Filippini, et al. Distinct patterns of brain activity in young carriers + of the APOE-e4 allele. + Proc Natl Acad Sci USA (PNAS), 106:7209-7214, 2009. + + [4] S.Smith, et al. Methods for network modelling from high quality rfMRI data. + Meeting of the Organization for Human Brain Mapping. 2014 + + [5] J.X. O'Reilly et al. Distinct and overlapping functional zones in the cerebellum + defined by resting state functional connectivity. + Cerebral Cortex, 2009. + Disclaimer ---------- IMPORTANT: This is open access data. You must agree to Terms and conditions @@ -1393,44 +1411,44 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, "of them %s or list of specific choices.") names = ['choice_dimensionality', 'choice_timeseries'] - inputs = [choice_dimensionality, choice_timeseries] + user_inputs = [choice_dimensionality, choice_timeseries] assign_names = ['dimensionalities', 'timeseries_methods'] - standard_inputs = [dimensionalities, timeseries_methods] - - for name_, input_, assign_, standard_ in zip(names, inputs, assign_names, - standard_inputs): - if input_ is not None: - if isinstance(input_, list): - for n_input_ in input_: - if n_input_ not in standard_: + standard_variables = [dimensionalities, timeseries_methods] + + for name, check_in, assign, standard in zip(names, user_inputs, + assign_names, standard_variables): + if check_in is not None: + if isinstance(check_in, list): + for each_str in check_in: + if each_str not in standard: raise ValueError(message % ( - name_, input_, str(standard_))) - if assign_ == 'dimensionalities': - dimensionalities = input_ + name, check_in, str(standard))) + if assign == 'dimensionalities': + dimensionalities = check_in else: - timeseries_methods = input_ - elif not isinstance(input_, list): - if input_ not in standard_: + timeseries_methods = check_in + elif not isinstance(check_in, list): + if check_in not in standard: raise ValueError(message % ( - name_, input_, str(standard_))) + name, check_in, str(standard))) - if assign_ == 'dimensionalities': - dimensionalities = [input_] + if assign == 'dimensionalities': + dimensionalities = [check_in] else: - timeseries_methods = [input_] + timeseries_methods = [check_in] n_combinations = len(dimensionalities) * len(timeseries_methods) dataset_name = 'Megatrawls' files_netmats1 = [] files_netmats2 = [] - for dim_ in dimensionalities: - filename_ = os.path.join('3T_Q1-Q6related468_MSMsulc_' + str(dim_)) - for timeserie_ in timeseries_methods: + for dim in dimensionalities: + filename = os.path.join('3T_Q1-Q6related468_MSMsulc_' + str(dim)) + for timeseries in timeseries_methods: each_files_netmats1 = [(os.path.join( - filename_ + '_' + str(timeserie_), 'Znet1.txt'), url, opts)] + filename + '_' + str(timeseries), 'Znet1.txt'), url, opts)] each_files_netmats2 = [(os.path.join( - filename_ + '_' + str(timeserie_), 'Znet2.txt'), url, opts)] + filename + '_' + str(timeseries), 'Znet2.txt'), url, opts)] files_netmats1.append(each_files_netmats1) files_netmats2.append(each_files_netmats2) @@ -1439,14 +1457,14 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, network_matrices1 = [] network_matrices2 = [] - for n_ in range(n_combinations): + for n in range(n_combinations): netmats1 = _fetch_files( - data_dir, files_netmats1[n_], resume=resume, verbose=verbose) + data_dir, files_netmats1[n], resume=resume, verbose=verbose) network_matrices1.extend(netmats1) netmats2 = _fetch_files( - data_dir, files_netmats2[n_], resume=resume, verbose=verbose) + data_dir, files_netmats2[n], resume=resume, verbose=verbose) network_matrices2.extend(netmats2) - return Bunch(FullCorrelation=network_matrices1, - PartialCorrelation=network_matrices2, + return Bunch(Fullcorrelation=network_matrices1, + Partialcorrelation=network_matrices2, description=description) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 60f9d8934c..a829dc272e 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -367,17 +367,17 @@ def test_fetch_mixed_gambles(): def test_right_choices_dimensionality_timeseriesmethods(): message = ("The %s you have given '%s' is invalid. ") - for dim_ in ['a10', 'd15', 'd30']: + for dim in ['a10', 'd15', 'd30']: assert_raises_regex(ValueError, - (message % ('choice_dimensionality', dim_)), + message % ('choice_dimensionality', dim), func.fetch_megatrawls_netmats, - choice_dimensionality=dim_) + choice_dimensionality=dim) - for timeserie_ in ['tt1', 'ts4', 'st2']: + for timeseries in ['tt1', 'ts4', 'st2']: assert_raises_regex(ValueError, - (message % ('choice_timeseries', timeserie_)), + message % ('choice_timeseries', timeseries), func.fetch_megatrawls_netmats, - choice_timeseries=timeserie_) + choice_timeseries=timeseries) @with_setup(setup_tmpdata, teardown_tmpdata) @@ -385,13 +385,13 @@ def test_fetch_megatrawls_netmats(): correlations = func.fetch_megatrawls_netmats(data_dir=tmpdir) dataset_name_path = os.path.join(tmpdir, 'Megatrawls') - assert_equal(correlations.FullCorrelation[0], os.path.join( + assert_equal(correlations.Fullcorrelation[0], os.path.join( dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d25_ts2', 'Znet1.txt')) - assert_equal(correlations.FullCorrelation[9], os.path.join( + assert_equal(correlations.Fullcorrelation[9], os.path.join( dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d300_ts3', 'Znet1.txt')) - assert_equal(correlations.PartialCorrelation[5], os.path.join( + assert_equal(correlations.Partialcorrelation[5], os.path.join( dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d100_ts3', 'Znet2.txt')) # test if number of possible combinations of output are correct @@ -399,8 +399,8 @@ def test_fetch_megatrawls_netmats(): data_dir=tmpdir, choice_dimensionality=['d25', 'd200']) expected_n_combinations = len(['d25', 'd200']) * len(['ts2', 'ts3']) - n_output_combinations_net1 = len(correlation_.FullCorrelation) - n_output_combinations_net2 = len(correlation_.PartialCorrelation) + n_output_combinations_net1 = len(correlation_.Fullcorrelation) + n_output_combinations_net2 = len(correlation_.Partialcorrelation) assert_equal(expected_n_combinations, n_output_combinations_net1) assert_equal(expected_n_combinations, n_output_combinations_net2) From d0c52ec02064363ca71a67883d77e07552c382cc Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 10 Nov 2015 13:01:01 +0100 Subject: [PATCH 0006/1925] Addressed comments related to namings of parameters --- .../plot_visualize_megatrawls_netmats.py | 20 ++--- nilearn/datasets/func.py | 83 +++++++++---------- nilearn/datasets/tests/test_func.py | 23 ++--- 3 files changed, 62 insertions(+), 64 deletions(-) diff --git a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py index 0fd918e508..643d306fd5 100644 --- a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py +++ b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py @@ -24,23 +24,23 @@ def plot_mats(netmats, title): # Fetches the network matrices dimensionalities d=100 and d=300 for # timeseries method ts3 print(" -- Fetching Network Matrices -- ") -netmats = datasets.fetch_megatrawls_netmats( - choice_dimensionality=['d100', 'd300'], choice_timeseries='ts3') +netmats = datasets.fetch_megatrawls_netmats(dimensionality=[100, 300], + timeseries='eigen_regression') # Converting netmats text files to numpy arrays -full_correlation_matrices_d100 = np.genfromtxt(netmats.Fullcorrelation[0]) -full_correlation_matrices_d300 = np.genfromtxt(netmats.Fullcorrelation[1]) +full_correlation_matrices_100 = np.genfromtxt(netmats.Fullcorrelation[0]) +full_correlation_matrices_300 = np.genfromtxt(netmats.Fullcorrelation[1]) -partial_correlation_matrices_d100 = np.genfromtxt(netmats.Partialcorrelation[0]) -partial_correlation_matrices_d300 = np.genfromtxt(netmats.Partialcorrelation[1]) +partial_correlation_matrices_100 = np.genfromtxt(netmats.Partialcorrelation[0]) +partial_correlation_matrices_300 = np.genfromtxt(netmats.Partialcorrelation[1]) # Visualization print(" -- Plotting correlation matrices -- ") correlation_matrices = { - 'Full correlation matrices of dimensionality d=100': full_correlation_matrices_d100, - 'Full correlation matrices of dimensionality d=300': full_correlation_matrices_d300, - 'Partial correlation matrices of dimensionality d=100': partial_correlation_matrices_d100, - 'Partial correlation matrices of dimensionality d=300': partial_correlation_matrices_d300 + 'Full correlation matrices of dimensionality d=100': full_correlation_matrices_100, + 'Full correlation matrices of dimensionality d=300': full_correlation_matrices_300, + 'Partial correlation matrices of dimensionality d=100': partial_correlation_matrices_100, + 'Partial correlation matrices of dimensionality d=300': partial_correlation_matrices_300 } for title, matrices in sorted(correlation_matrices.items()): diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 8188b86582..e2fef27491 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1281,9 +1281,8 @@ def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, return data -def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, - choice_timeseries=None, - resume=True, verbose=1): +def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 300], + timeseries='eigen_regression', resume=True, verbose=1): """Downloads and fetches Network Matrices data from MegaTrawls release in HCP. This data can be used to predict relationships between imaging data (functional @@ -1303,39 +1302,32 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, Path of the data directory. Used to force data storage in a specified location. - choice_dimensionality: a string or list of strings or None, default is None - Possible options are {'d25', 'd50', 'd100', 'd200', 'd300'} - 'd25' - Group ICA brain parcellations with dimensionality = 25 - 'd50' - Group ICA brain parcellations with dimensionality = 50 - 'd100' - Group ICA brain parcellations with dimensionality = 100 - 'd200' - Group ICA brain parcellations with dimensionality = 200 - 'd300' - Group ICA brain parcellations with dimensionality = 300 + dimensionality: integer or list of integers, optional + Possible options are [25, 50, 100, 200, 300] + 25 - Group ICA brain parcellations with dimensionality = 25 + 50 - Group ICA brain parcellations with dimensionality = 50 + 100 - Group ICA brain parcellations with dimensionality = 100 + 200 - Group ICA brain parcellations with dimensionality = 200 + 300 - Group ICA brain parcellations with dimensionality = 300 - By default, network matrices data estimated from all - dimensionalities ['d25', 'd50', 'd100', 'd200', 'd300'] of brian - parcellations are fetched. + By default, network matrices data estimated from brain parcellations + of all dimensionalities are fetched as a seperate list. - If given as string or list of specific strings, network matrices + If given an integer or list of specific integers, network matrices related to that particular dimensionality of brain parcellations - are fetched. For example, if chosen as a string 'd25' only data - corresponding to d=25 is fetched. - - choice_timeseries: a string or list of strings or None, default is None - Possible options are {'ts2', 'ts3'} - 'ts2' - choice denotes the averaged timeseries signals which were extracted - using mutiple spatial regression, in which full set of ICA maps was - used as spatial regressors against the subjects datasets resulting - a subject specific timeseries signals. [3] - 'ts3' - choice denoted the principal eigen timeseries signals which were - extracted using multiple spatial regression, but the subject - specific timeseries signals are extracted using SVD. The first - eigen timeseries of each subject rather than simple averaging [4] [5]. - - By default, network matrices data estimated using both timeseries extraction - method ['ts2', 'ts3'] are fetched. - - If given a string, network matrices estimated using particular type - of timeseries extraction method is fetched. + are fetched. For example, if given as an integer 25 only data + corresponding to dimensionality=25 will be fetched. + + timeseries: a string or list ['mutiple_spatial_regression', 'eigen_regression'] \ + default is 'eigen_regression', optional + 'multiple_spatial_regression' - denotes the averaged timeseries signals + which were extracted using mutiple spatial regression, in which full set + of ICA maps were used as spatial regressors against the subjects datasets + resulting a subject specific timeseries signals. [3] + 'eigen_regression' - denotes the principal eigen timeseries signals which were + extracted using multiple spatial regression, but the subject + specific timeseries signals are extracted using SVD. The first + eigen timeseries of each subject rather than simple averaging. [4] [5] resume: boolean, default is True This parameter is required if a partly downloaded file is needed to be @@ -1350,10 +1342,12 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, ------- data: sklearn.datasets.base.Bunch Dictionary-like object, attributes are: + 'Fullcorrelation': Full correlation matrices (Znet1) + 'Partialcorrelation': Partial correlation matrices (Znet2) - 'Fullcorrelation': Full correlation matrices - - 'Partialcorrelation': Partial correlation matrices + Note: In output namings, 'eigen_regression' can be seen fetched as 'ts3' and + 'multiple_spatial_regression' fetched as 'ts2'. To keep them with standard + Megatrawls notations. References ---------- @@ -1404,18 +1398,18 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, opts = {'uncompress': True} # dataset terms - dimensionalities = ['d25', 'd50', 'd100', 'd200', 'd300'] - timeseries_methods = ['ts2', 'ts3'] + dimensionalities = [25, 50, 100, 200, 300] + timeseries_methods = ['multiple_spatial_regression', 'eigen_regression'] message = ("The %s you have given '%s' is invalid. Please choose either " "of them %s or list of specific choices.") - names = ['choice_dimensionality', 'choice_timeseries'] - user_inputs = [choice_dimensionality, choice_timeseries] + error_correcting_names = ['dimensionality', 'timeseries'] + user_inputs = [dimensionality, timeseries] assign_names = ['dimensionalities', 'timeseries_methods'] standard_variables = [dimensionalities, timeseries_methods] - for name, check_in, assign, standard in zip(names, user_inputs, + for name, check_in, assign, standard in zip(error_correcting_names, user_inputs, assign_names, standard_variables): if check_in is not None: if isinstance(check_in, list): @@ -1439,12 +1433,15 @@ def fetch_megatrawls_netmats(data_dir=None, choice_dimensionality=None, n_combinations = len(dimensionalities) * len(timeseries_methods) dataset_name = 'Megatrawls' - files_netmats1 = [] files_netmats2 = [] for dim in dimensionalities: - filename = os.path.join('3T_Q1-Q6related468_MSMsulc_' + str(dim)) + filename = os.path.join('3T_Q1-Q6related468_MSMsulc_d' + str(dim)) for timeseries in timeseries_methods: + if timeseries == 'multiple_spatial_regression': + timeseries = 'ts2' + elif timeseries == 'eigen_regression': + timeseries = 'ts3' each_files_netmats1 = [(os.path.join( filename + '_' + str(timeseries), 'Znet1.txt'), url, opts)] each_files_netmats2 = [(os.path.join( diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index a829dc272e..3a5d82c088 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -369,15 +369,15 @@ def test_right_choices_dimensionality_timeseriesmethods(): message = ("The %s you have given '%s' is invalid. ") for dim in ['a10', 'd15', 'd30']: assert_raises_regex(ValueError, - message % ('choice_dimensionality', dim), + message % ('dimensionality', dim), func.fetch_megatrawls_netmats, - choice_dimensionality=dim) + dimensionality=dim) - for timeseries in ['tt1', 'ts4', 'st2']: + for ts in ['ts4', 'st2', 'eigen_regresion', 'mutiple_sptial_regression']: assert_raises_regex(ValueError, - message % ('choice_timeseries', timeseries), + message % ('timeseries', ts), func.fetch_megatrawls_netmats, - choice_timeseries=timeseries) + timeseries=ts) @with_setup(setup_tmpdata, teardown_tmpdata) @@ -386,19 +386,20 @@ def test_fetch_megatrawls_netmats(): dataset_name_path = os.path.join(tmpdir, 'Megatrawls') assert_equal(correlations.Fullcorrelation[0], os.path.join( - dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d25_ts2', 'Znet1.txt')) + dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d25_ts3', 'Znet1.txt')) - assert_equal(correlations.Fullcorrelation[9], os.path.join( + assert_equal(correlations.Fullcorrelation[4], os.path.join( dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d300_ts3', 'Znet1.txt')) - assert_equal(correlations.Partialcorrelation[5], os.path.join( + assert_equal(correlations.Partialcorrelation[2], os.path.join( dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d100_ts3', 'Znet2.txt')) # test if number of possible combinations of output are correct - correlation_ = func.fetch_megatrawls_netmats( - data_dir=tmpdir, choice_dimensionality=['d25', 'd200']) + timeseries = ['multiple_spatial_regression', 'eigen_regression'] + correlation_ = func.fetch_megatrawls_netmats(data_dir=tmpdir, dimensionality=[25, 200], + timeseries=timeseries) - expected_n_combinations = len(['d25', 'd200']) * len(['ts2', 'ts3']) + expected_n_combinations = len([25, 200]) * len(timeseries) n_output_combinations_net1 = len(correlation_.Fullcorrelation) n_output_combinations_net2 = len(correlation_.Partialcorrelation) From 67a8198bb0432d6146116066b33fdab6df276977 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Fri, 13 Nov 2015 14:57:25 +0100 Subject: [PATCH 0007/1925] Addressed comments and rebase --- .../plot_visualize_megatrawls_netmats.py | 17 ++- nilearn/datasets/description/Megatrawls.rst | 9 +- nilearn/datasets/func.py | 121 +++++++++++------- nilearn/datasets/tests/test_func.py | 53 ++++---- 4 files changed, 115 insertions(+), 85 deletions(-) diff --git a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py index 643d306fd5..2bcdd72a88 100644 --- a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py +++ b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py @@ -25,20 +25,19 @@ def plot_mats(netmats, title): # timeseries method ts3 print(" -- Fetching Network Matrices -- ") netmats = datasets.fetch_megatrawls_netmats(dimensionality=[100, 300], - timeseries='eigen_regression') + timeseries='eigen_regression', + matrices=['correlation', 'partial_correlation']) +correlation_matrices_100 = netmats.d100_eigen_regression_correlation +correlation_matrices_300 = netmats.d300_eigen_regression_correlation -# Converting netmats text files to numpy arrays -full_correlation_matrices_100 = np.genfromtxt(netmats.Fullcorrelation[0]) -full_correlation_matrices_300 = np.genfromtxt(netmats.Fullcorrelation[1]) - -partial_correlation_matrices_100 = np.genfromtxt(netmats.Partialcorrelation[0]) -partial_correlation_matrices_300 = np.genfromtxt(netmats.Partialcorrelation[1]) +partial_correlation_matrices_100 = netmats.d100_eigen_regression_partial_correlation +partial_correlation_matrices_300 = netmats.d300_eigen_regression_partial_correlation # Visualization print(" -- Plotting correlation matrices -- ") correlation_matrices = { - 'Full correlation matrices of dimensionality d=100': full_correlation_matrices_100, - 'Full correlation matrices of dimensionality d=300': full_correlation_matrices_300, + 'Correlation matrices of dimensionality d=100': correlation_matrices_100, + 'Correlation matrices of dimensionality d=300': correlation_matrices_300, 'Partial correlation matrices of dimensionality d=100': partial_correlation_matrices_100, 'Partial correlation matrices of dimensionality d=300': partial_correlation_matrices_300 } diff --git a/nilearn/datasets/description/Megatrawls.rst b/nilearn/datasets/description/Megatrawls.rst index f32be44dd4..88718012d2 100644 --- a/nilearn/datasets/description/Megatrawls.rst +++ b/nilearn/datasets/description/Megatrawls.rst @@ -3,12 +3,13 @@ MegaTrawls Network Matrices HCP Notes ----- -Network Matrices data of two sets, Full Correlation and Partial Correlation +Network Matrices data of two types, Full correlation and Partial correlation matrices estimated by a timeseries signals extracted from a nodes of Group ICA parcellations. In total, 461 functional connectivity datasets were used. Data is available for all dimensionalities (d=25, d=50, d=100, d=200, d=300) -of Group ICA and for all timeseries methods (ts2 and ts3) +of Group ICA and for all timeseries methods multiple spatial regression and +eigen regression (ts2 and ts3). These matrices can be used to predict the relationships between subjects functional connectivity and subjects behavioural measures. @@ -16,8 +17,8 @@ functional connectivity and subjects behavioural measures. Content ------- - :'Full Correlation': Text files containing full correlation values - :'Partial Correlation': Text files containing partial correlation values + :'Correlation': an array of matrices containing full correlation values + :'Partialcorrelation': an array of matrices containing partial correlation values References diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index e2fef27491..402f899594 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -11,7 +11,8 @@ from .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr, _read_md5_sum_file, _tree, _filter_columns) -from .._utils.compat import BytesIO, _basestring, _urllib +from .._utils.compat import BytesIO, _basestring, _urllib, izip +from .._utils.numpy_conversions import csv_to_array def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): @@ -1282,7 +1283,8 @@ def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 300], - timeseries='eigen_regression', resume=True, verbose=1): + timeseries='eigen_regression', matrices='partial_correlation', + resume=True, verbose=1): """Downloads and fetches Network Matrices data from MegaTrawls release in HCP. This data can be used to predict relationships between imaging data (functional @@ -1329,6 +1331,12 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 specific timeseries signals are extracted using SVD. The first eigen timeseries of each subject rather than simple averaging. [4] [5] + matrices: a string ['correlation', 'partial_correlation'], optional + By default, only 'partial_correlation' data matrices will be fetched. + or If given as only 'correlation', data matrices of its type will be + fetched. If given as both ['correlation', 'partial_correlation'], then + data matrices of both types will be fetched. + resume: boolean, default is True This parameter is required if a partly downloaded file is needed to be resumed to download again. @@ -1341,13 +1349,15 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 Returns ------- data: sklearn.datasets.base.Bunch - Dictionary-like object, attributes are: - 'Fullcorrelation': Full correlation matrices (Znet1) - 'Partialcorrelation': Partial correlation matrices (Znet2) + Dictionary-like object, contains: + - an array of full correlation matrices + - an array of partial correlation matrices + - Data description - Note: In output namings, 'eigen_regression' can be seen fetched as 'ts3' and - 'multiple_spatial_regression' fetched as 'ts2'. To keep them with standard - Megatrawls notations. + Note: output can be seen according to the user given inputs. + For example: If dimensionality=25 and timeseries='eigen_regression' and + matrices='partial_correlation', then output can be seen fetched as an array + matrix of size(25,25) assigned to name as "d25_eigen_regression_partial_correlation". References ---------- @@ -1394,7 +1404,7 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 Register and sign the Open Access Data Use Terms at ConnectomeDB: https://db.humanconnectome.org/ """ - url = "https://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" + url = "http://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" opts = {'uncompress': True} # dataset terms @@ -1409,59 +1419,74 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 assign_names = ['dimensionalities', 'timeseries_methods'] standard_variables = [dimensionalities, timeseries_methods] - for name, check_in, assign, standard in zip(error_correcting_names, user_inputs, - assign_names, standard_variables): - if check_in is not None: - if isinstance(check_in, list): - for each_str in check_in: - if each_str not in standard: - raise ValueError(message % ( - name, check_in, str(standard))) - if assign == 'dimensionalities': - dimensionalities = check_in - else: - timeseries_methods = check_in - elif not isinstance(check_in, list): - if check_in not in standard: + for name, check_in, assign, standard in izip(error_correcting_names, + user_inputs, assign_names, + standard_variables + ): + if isinstance(check_in, list): + for each_str in check_in: + if each_str not in standard: raise ValueError(message % ( name, check_in, str(standard))) - if assign == 'dimensionalities': - dimensionalities = [check_in] + dimensionalities = check_in else: - timeseries_methods = [check_in] - - n_combinations = len(dimensionalities) * len(timeseries_methods) + timeseries_methods = check_in + elif not isinstance(check_in, list): + if check_in not in standard: + raise ValueError(message % ( + name, check_in, str(standard))) + + if assign == 'dimensionalities': + dimensionalities = [check_in] + else: + timeseries_methods = [check_in] + + output_matrices = ['correlation', 'partial_correlation'] + if isinstance(matrices, list): + for each_type in matrices: + if each_type not in output_matrices: + raise ValueError(message % ('matrices', matrices, + output_matrices)) + output_matrices = matrices + elif not isinstance(matrices, list): + if matrices not in output_matrices: + raise ValueError(message % ('matrices', matrices, output_matrices)) + output_matrices = [matrices] + + n_combinations = len(dimensionalities) * len(timeseries_methods) * len(output_matrices) dataset_name = 'Megatrawls' - files_netmats1 = [] - files_netmats2 = [] + files_netmats = [] + names = [] for dim in dimensionalities: filename = os.path.join('3T_Q1-Q6related468_MSMsulc_d' + str(dim)) for timeseries in timeseries_methods: + dim_timeseries = os.path.join('d' + str(dim) + '_' + str(timeseries)) if timeseries == 'multiple_spatial_regression': timeseries = 'ts2' elif timeseries == 'eigen_regression': timeseries = 'ts3' - each_files_netmats1 = [(os.path.join( - filename + '_' + str(timeseries), 'Znet1.txt'), url, opts)] - each_files_netmats2 = [(os.path.join( - filename + '_' + str(timeseries), 'Znet2.txt'), url, opts)] - files_netmats1.append(each_files_netmats1) - files_netmats2.append(each_files_netmats2) + for matrices in output_matrices: + dim_timeseries_matrices = os.path.join(dim_timeseries + '_' + matrices) + if matrices == 'correlation': + matrices = 'Znet1.txt' + elif matrices == 'partial_correlation': + matrices = 'Znet2.txt' + each_files_netmats = [(os.path.join( + filename + '_' + str(timeseries), matrices), url, opts)] + files_netmats.append(each_files_netmats) + names.append(dim_timeseries_matrices) data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) description = _get_dataset_descr(dataset_name) - network_matrices1 = [] - network_matrices2 = [] + network_matrices = [] for n in range(n_combinations): - netmats1 = _fetch_files( - data_dir, files_netmats1[n], resume=resume, verbose=verbose) - network_matrices1.extend(netmats1) - netmats2 = _fetch_files( - data_dir, files_netmats2[n], resume=resume, verbose=verbose) - network_matrices2.extend(netmats2) - - return Bunch(Fullcorrelation=network_matrices1, - Partialcorrelation=network_matrices2, - description=description) + netmats = _fetch_files( + data_dir, files_netmats[n], resume=resume, verbose=verbose) + arr = csv_to_array(netmats[0]) + network_matrices.append(arr) + + params = dict([('description', description)] + list(zip(names, network_matrices))) + + return Bunch(**params) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 3a5d82c088..835c86d50c 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -365,7 +365,7 @@ def test_fetch_mixed_gambles(): assert_equal(len(mgambles["zmaps"]), n_subjects) -def test_right_choices_dimensionality_timeseriesmethods(): +def test_wrong_inputs_of_dimensionality_timeseriesmethods_matrices(): message = ("The %s you have given '%s' is invalid. ") for dim in ['a10', 'd15', 'd30']: assert_raises_regex(ValueError, @@ -378,32 +378,37 @@ def test_right_choices_dimensionality_timeseriesmethods(): message % ('timeseries', ts), func.fetch_megatrawls_netmats, timeseries=ts) + for matrices in ['partal_correlation', 'corelation']: + assert_raises_regex(ValueError, + message % ('matrices', matrices), + func.fetch_megatrawls_netmats, + matrices=matrices) @with_setup(setup_tmpdata, teardown_tmpdata) def test_fetch_megatrawls_netmats(): - correlations = func.fetch_megatrawls_netmats(data_dir=tmpdir) + correlations = func.fetch_megatrawls_netmats(data_dir=tmpdir, + dimensionality=[25, 100, 200, 300], + timeseries='eigen_regression', + matrices='partial_correlation') dataset_name_path = os.path.join(tmpdir, 'Megatrawls') - assert_equal(correlations.Fullcorrelation[0], os.path.join( - dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d25_ts3', 'Znet1.txt')) - - assert_equal(correlations.Fullcorrelation[4], os.path.join( - dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d300_ts3', 'Znet1.txt')) - - assert_equal(correlations.Partialcorrelation[2], os.path.join( - dataset_name_path, '3T_Q1-Q6related468_MSMsulc_d100_ts3', 'Znet2.txt')) - - # test if number of possible combinations of output are correct - timeseries = ['multiple_spatial_regression', 'eigen_regression'] - correlation_ = func.fetch_megatrawls_netmats(data_dir=tmpdir, dimensionality=[25, 200], - timeseries=timeseries) - - expected_n_combinations = len([25, 200]) * len(timeseries) - n_output_combinations_net1 = len(correlation_.Fullcorrelation) - n_output_combinations_net2 = len(correlation_.Partialcorrelation) - - assert_equal(expected_n_combinations, n_output_combinations_net1) - assert_equal(expected_n_combinations, n_output_combinations_net2) - - assert_not_equal(correlation_.description, '') + # test whether the shapes of each dimensionality are equal + # expected shapes + d25_expected_shape = (25, 25) + d100_expected_shape = (100, 100) + d200_expected_shape = (200, 200) + d300_expected_shape = (300, 300) + + # output shapes + d25_output_shape = correlations.d25_eigen_regression_partial_correlation.shape + d100_output_shape = correlations.d100_eigen_regression_partial_correlation.shape + d200_output_shape = correlations.d200_eigen_regression_partial_correlation.shape + d300_output_shape = correlations.d300_eigen_regression_partial_correlation.shape + + assert_equal(d25_expected_shape, d25_output_shape) + assert_equal(d100_expected_shape, d100_output_shape) + assert_equal(d200_expected_shape, d200_output_shape) + assert_equal(d300_expected_shape, d300_output_shape) + + assert_not_equal(correlations.description, '') From c123caefdb1f4f9dbdd82ebe3d4abb3e097a953f Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Fri, 13 Nov 2015 16:02:11 +0100 Subject: [PATCH 0008/1925] Minor changes --- nilearn/datasets/tests/test_func.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 835c86d50c..bc116b136d 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -385,13 +385,12 @@ def test_wrong_inputs_of_dimensionality_timeseriesmethods_matrices(): matrices=matrices) -@with_setup(setup_tmpdata, teardown_tmpdata) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_megatrawls_netmats(): - correlations = func.fetch_megatrawls_netmats(data_dir=tmpdir, + correlations = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir, dimensionality=[25, 100, 200, 300], timeseries='eigen_regression', matrices='partial_correlation') - dataset_name_path = os.path.join(tmpdir, 'Megatrawls') # test whether the shapes of each dimensionality are equal # expected shapes From 9b8e57e26e0255f255d290ca948a9fa3f6e0319c Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sun, 22 Nov 2015 21:44:09 +0100 Subject: [PATCH 0009/1925] Addressed comments made by Alex --- .../plot_visualize_megatrawls_netmats.py | 28 ++- nilearn/datasets/func.py | 165 ++++++++---------- nilearn/datasets/tests/test_func.py | 71 ++++---- 3 files changed, 122 insertions(+), 142 deletions(-) diff --git a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py index 2bcdd72a88..9ba3fbde53 100644 --- a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py +++ b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py @@ -22,27 +22,23 @@ def plot_mats(netmats, title): plt.title(title) # Fetches the network matrices dimensionalities d=100 and d=300 for -# timeseries method ts3 +# timeseries method multiple regression and eigen regression print(" -- Fetching Network Matrices -- ") -netmats = datasets.fetch_megatrawls_netmats(dimensionality=[100, 300], - timeseries='eigen_regression', +netmats = datasets.fetch_megatrawls_netmats(dimensionality=[300, 100], + timeseries=['multiple_spatial_regression', 'eigen_regression'], matrices=['correlation', 'partial_correlation']) -correlation_matrices_100 = netmats.d100_eigen_regression_correlation -correlation_matrices_300 = netmats.d300_eigen_regression_correlation - -partial_correlation_matrices_100 = netmats.d100_eigen_regression_partial_correlation -partial_correlation_matrices_300 = netmats.d300_eigen_regression_partial_correlation # Visualization print(" -- Plotting correlation matrices -- ") -correlation_matrices = { - 'Correlation matrices of dimensionality d=100': correlation_matrices_100, - 'Correlation matrices of dimensionality d=300': correlation_matrices_300, - 'Partial correlation matrices of dimensionality d=100': partial_correlation_matrices_100, - 'Partial correlation matrices of dimensionality d=300': partial_correlation_matrices_300 - } - -for title, matrices in sorted(correlation_matrices.items()): +for matrices, dim, tseries in zip( + netmats.correlation, netmats.dimensions_correlation, netmats.timeseries_correlation): + title = ('Correlation matrices of d=%d & timeseries=%s' % (dim, tseries)) + plot_mats(matrices, title) + +print(" -- Plotting partial correlation matrices -- ") +for matrices, dim, tseries in zip( + netmats.partial_correlation, netmats.dimensions_partial, netmats.timeseries_partial): + title = ('Partial correlation matrices of d=%d & timeseries=%s' % (dim, tseries)) plot_mats(matrices, title) plt.show() diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 402f899594..591934e8f3 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -4,6 +4,7 @@ import warnings import os import re +import itertools import numpy as np import nibabel from sklearn.datasets.base import Bunch @@ -1283,7 +1284,8 @@ def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 300], - timeseries='eigen_regression', matrices='partial_correlation', + timeseries=['multiple_spatial_regression', 'eigen_regression'], + matrices=['correlation', 'partial_correlation'], resume=True, verbose=1): """Downloads and fetches Network Matrices data from MegaTrawls release in HCP. @@ -1304,7 +1306,7 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 Path of the data directory. Used to force data storage in a specified location. - dimensionality: integer or list of integers, optional + dimensionality: list of integers, optional Possible options are [25, 50, 100, 200, 300] 25 - Group ICA brain parcellations with dimensionality = 25 50 - Group ICA brain parcellations with dimensionality = 50 @@ -1313,15 +1315,15 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 300 - Group ICA brain parcellations with dimensionality = 300 By default, network matrices data estimated from brain parcellations - of all dimensionalities are fetched as a seperate list. + of all dimensionalities are fetched as seperate arrays. - If given an integer or list of specific integers, network matrices + If given as list of specific integers, network matrices related to that particular dimensionality of brain parcellations - are fetched. For example, if given as an integer 25 only data - corresponding to dimensionality=25 will be fetched. + are fetched. For example, if given as [25, 50] only data + corresponding to dimensionality 25 and 50 will be fetched. - timeseries: a string or list ['mutiple_spatial_regression', 'eigen_regression'] \ - default is 'eigen_regression', optional + timeseries: string list ['mutiple_spatial_regression', 'eigen_regression'] \ + default is both, optional 'multiple_spatial_regression' - denotes the averaged timeseries signals which were extracted using mutiple spatial regression, in which full set of ICA maps were used as spatial regressors against the subjects datasets @@ -1331,11 +1333,10 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 specific timeseries signals are extracted using SVD. The first eigen timeseries of each subject rather than simple averaging. [4] [5] - matrices: a string ['correlation', 'partial_correlation'], optional - By default, only 'partial_correlation' data matrices will be fetched. - or If given as only 'correlation', data matrices of its type will be - fetched. If given as both ['correlation', 'partial_correlation'], then - data matrices of both types will be fetched. + matrices: string list ['correlation', 'partial_correlation'], optional + By default, both data matrices will be fetched. + or If given as only ['correlation'], data matrices of its type will be + fetched. resume: boolean, default is True This parameter is required if a partly downloaded file is needed to be @@ -1350,15 +1351,18 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 ------- data: sklearn.datasets.base.Bunch Dictionary-like object, contains: - - an array of full correlation matrices - - an array of partial correlation matrices + - correlation: arrays of correlation matrices (Znet1). + - partial_correlation: arrays of partial correlation matrices (Znet2). + - dimensions_correlation: array of dimensionalities in numbers which + were given to fetch the correlation matrices. + - dimensions_partial: array of dimensionalities in numbers which were + given to fetch the partial correlation matrices. + - timeseries_correlation: array of timeseries methods given to fetch its + corresponding matrices. + - timeseries_partial: array of timeseries methods given to fetch its + corresponding matrices. - Data description - Note: output can be seen according to the user given inputs. - For example: If dimensionality=25 and timeseries='eigen_regression' and - matrices='partial_correlation', then output can be seen fetched as an array - matrix of size(25,25) assigned to name as "d25_eigen_regression_partial_correlation". - References ---------- For more details: @@ -1410,83 +1414,66 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 # dataset terms dimensionalities = [25, 50, 100, 200, 300] timeseries_methods = ['multiple_spatial_regression', 'eigen_regression'] + output_matrices = ['correlation', 'partial_correlation'] - message = ("The %s you have given '%s' is invalid. Please choose either " - "of them %s or list of specific choices.") + message = ("Invalid {0} name: {1}. " + "Please choose either of them:{2}") - error_correcting_names = ['dimensionality', 'timeseries'] - user_inputs = [dimensionality, timeseries] - assign_names = ['dimensionalities', 'timeseries_methods'] - standard_variables = [dimensionalities, timeseries_methods] + inputs = [dimensionality, timeseries, matrices] + standards = [dimensionalities, timeseries_methods, output_matrices] + error_names = ['dimensionality', 'timeseries', 'matrices'] - for name, check_in, assign, standard in izip(error_correcting_names, - user_inputs, assign_names, - standard_variables - ): - if isinstance(check_in, list): - for each_str in check_in: + for each_input, standard, name in izip(inputs, standards, error_names): + if isinstance(each_input, list): + for each_str in each_input: if each_str not in standard: - raise ValueError(message % ( - name, check_in, str(standard))) - if assign == 'dimensionalities': - dimensionalities = check_in - else: - timeseries_methods = check_in - elif not isinstance(check_in, list): - if check_in not in standard: - raise ValueError(message % ( - name, check_in, str(standard))) - - if assign == 'dimensionalities': - dimensionalities = [check_in] - else: - timeseries_methods = [check_in] + raise ValueError(message.format(name, each_str, str(standard))) + elif not isinstance(each_input, list): + raise TypeError("If %s is given as single element, it should be " + "like a list as ['%s']" % (name, each_input)) - output_matrices = ['correlation', 'partial_correlation'] - if isinstance(matrices, list): - for each_type in matrices: - if each_type not in output_matrices: - raise ValueError(message % ('matrices', matrices, - output_matrices)) - output_matrices = matrices - elif not isinstance(matrices, list): - if matrices not in output_matrices: - raise ValueError(message % ('matrices', matrices, output_matrices)) - output_matrices = [matrices] - - n_combinations = len(dimensionalities) * len(timeseries_methods) * len(output_matrices) - dataset_name = 'Megatrawls' - files_netmats = [] - names = [] - for dim in dimensionalities: - filename = os.path.join('3T_Q1-Q6related468_MSMsulc_d' + str(dim)) - for timeseries in timeseries_methods: - dim_timeseries = os.path.join('d' + str(dim) + '_' + str(timeseries)) - if timeseries == 'multiple_spatial_regression': - timeseries = 'ts2' - elif timeseries == 'eigen_regression': - timeseries = 'ts3' - for matrices in output_matrices: - dim_timeseries_matrices = os.path.join(dim_timeseries + '_' + matrices) - if matrices == 'correlation': - matrices = 'Znet1.txt' - elif matrices == 'partial_correlation': - matrices = 'Znet2.txt' - each_files_netmats = [(os.path.join( - filename + '_' + str(timeseries), matrices), url, opts)] - files_netmats.append(each_files_netmats) - names.append(dim_timeseries_matrices) + url = "http://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" + opts = {'uncompress': True} + dataset_name = 'Megatrawls' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) description = _get_dataset_descr(dataset_name) - network_matrices = [] - for n in range(n_combinations): - netmats = _fetch_files( - data_dir, files_netmats[n], resume=resume, verbose=verbose) - arr = csv_to_array(netmats[0]) - network_matrices.append(arr) + # Generate all combinations + dims, tseries, mats = list(zip(*list(itertools.product(dimensionality, timeseries, matrices)))) + files = [] + ids_correlation = [] + ids_partial = [] + timeseries_map = dict(multiple_spatial_regression='ts2', eigen_regression='ts3') + matrices_map = dict(correlation='Znet1.txt', partial_correlation='Znet2.txt') + for index, (dim, tserie, mat) in enumerate(zip(dims, tseries, mats)): + if mat == 'correlation': + ids_correlation.append(index) + elif mat == 'partial_correlation': + ids_partial.append(index) + filepath = os.path.join( + '3T_Q1-Q6related468_MSMsulc_d%d_%s' % (dim, timeseries_map[tserie]), matrices_map[mat]) + files.append((filepath, url, opts)) + + # Fetch all the files + files = _fetch_files(data_dir, files, resume=resume, verbose=verbose) - params = dict([('description', description)] + list(zip(names, network_matrices))) + # Load the files into arrays + correlation = [csv_to_array(files[id_c]) for id_c in ids_correlation] + partial = [csv_to_array(files[id_p]) for id_p in ids_partial] + # Taking the account of all the given dimensions & timeseries + # methods to give the end users to identify themselves about the + # matrices which are fetched. + dimensions_correlation = [dims[id_c] for id_c in ids_correlation] + dimensions_partial = [dims[id_p] for id_p in ids_partial] + timeseries_correlation = [tseries[id_c] for id_c in ids_correlation] + timeseries_partial = [tseries[id_p] for id_p in ids_partial] - return Bunch(**params) + return Bunch( + correlation=correlation, + partial_correlation=partial, + dimensions_correlation=dimensions_correlation, + dimensions_partial=dimensions_partial, + timeseries_correlation=timeseries_correlation, + timeseries_partial=timeseries_partial, + description=description) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index bc116b136d..498db6061c 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -366,48 +366,45 @@ def test_fetch_mixed_gambles(): def test_wrong_inputs_of_dimensionality_timeseriesmethods_matrices(): - message = ("The %s you have given '%s' is invalid. ") - for dim in ['a10', 'd15', 'd30']: - assert_raises_regex(ValueError, - message % ('dimensionality', dim), - func.fetch_megatrawls_netmats, - dimensionality=dim) - - for ts in ['ts4', 'st2', 'eigen_regresion', 'mutiple_sptial_regression']: - assert_raises_regex(ValueError, - message % ('timeseries', ts), - func.fetch_megatrawls_netmats, - timeseries=ts) - for matrices in ['partal_correlation', 'corelation']: - assert_raises_regex(ValueError, - message % ('matrices', matrices), - func.fetch_megatrawls_netmats, - matrices=matrices) + + message = "Invalid %s name" + + dimensionality = ['a10', 'd15', 'd30'] + assert_raises_regex(ValueError, + message % 'dimensionality', + func.fetch_megatrawls_netmats, + dimensionality=dimensionality) + + timeseries = ['ts4', 'st2', 'eigen_regresion', 'multiple_sptial_regresion'] + assert_raises_regex(ValueError, + message % 'timeseries', + func.fetch_megatrawls_netmats, + timeseries=timeseries) + + matrices = ['partal_corelation', 'sorelation'] + assert_raises_regex(ValueError, + message % 'matrices', + func.fetch_megatrawls_netmats, + matrices=matrices) + + assert_raises(TypeError, func.fetch_megatrawls_netmats, + matrices='partial_correlation') @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_megatrawls_netmats(): + for dim in [25, 100, 200, 300]: + files_dir = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d%d_ts3' % dim) + os.makedirs(files_dir) + dummy = open(os.path.join(files_dir, 'Znet2.txt'), 'w') + dummy.write("1") + dummy.close() + correlations = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir, dimensionality=[25, 100, 200, 300], - timeseries='eigen_regression', - matrices='partial_correlation') - - # test whether the shapes of each dimensionality are equal - # expected shapes - d25_expected_shape = (25, 25) - d100_expected_shape = (100, 100) - d200_expected_shape = (200, 200) - d300_expected_shape = (300, 300) - - # output shapes - d25_output_shape = correlations.d25_eigen_regression_partial_correlation.shape - d100_output_shape = correlations.d100_eigen_regression_partial_correlation.shape - d200_output_shape = correlations.d200_eigen_regression_partial_correlation.shape - d300_output_shape = correlations.d300_eigen_regression_partial_correlation.shape - - assert_equal(d25_expected_shape, d25_output_shape) - assert_equal(d100_expected_shape, d100_output_shape) - assert_equal(d200_expected_shape, d200_output_shape) - assert_equal(d300_expected_shape, d300_output_shape) + timeseries=['eigen_regression'], + matrices=['partial_correlation']) + + assert_true(correlations.partial_correlation[0], np.ndarray) assert_not_equal(correlations.description, '') From f0ffd60e5624805a2b55549bb8d2906f746255a3 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 24 Nov 2015 11:31:30 +0100 Subject: [PATCH 0010/1925] corrected doubling the url --- nilearn/datasets/func.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 591934e8f3..832fb34291 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1432,9 +1432,6 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 raise TypeError("If %s is given as single element, it should be " "like a list as ['%s']" % (name, each_input)) - url = "http://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" - opts = {'uncompress': True} - dataset_name = 'Megatrawls' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) description = _get_dataset_descr(dataset_name) From e5a9a96d652091252ac17ecd15688a1dd22768c7 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Thu, 26 Nov 2015 22:39:45 +0100 Subject: [PATCH 0011/1925] Addressed Loic comments, nitpicks in documentation, tests --- nilearn/datasets/func.py | 70 ++++++++++++++--------------- nilearn/datasets/tests/test_func.py | 41 ++++++++++------- 2 files changed, 59 insertions(+), 52 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 832fb34291..6520ac3834 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1306,37 +1306,35 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 Path of the data directory. Used to force data storage in a specified location. - dimensionality: list of integers, optional - Possible options are [25, 50, 100, 200, 300] - 25 - Group ICA brain parcellations with dimensionality = 25 - 50 - Group ICA brain parcellations with dimensionality = 50 - 100 - Group ICA brain parcellations with dimensionality = 100 - 200 - Group ICA brain parcellations with dimensionality = 200 - 300 - Group ICA brain parcellations with dimensionality = 300 - + dimensionality: integer or integers as list, optional + Valid dimensions in integers are [25, 50, 100, 200, 300]. By default, network matrices data estimated from brain parcellations - of all dimensionalities are fetched as seperate arrays. - - If given as list of specific integers, network matrices - related to that particular dimensionality of brain parcellations - are fetched. For example, if given as [25, 50] only data - corresponding to dimensionality 25 and 50 will be fetched. - - timeseries: string list ['mutiple_spatial_regression', 'eigen_regression'] \ - default is both, optional - 'multiple_spatial_regression' - denotes the averaged timeseries signals - which were extracted using mutiple spatial regression, in which full set - of ICA maps were used as spatial regressors against the subjects datasets - resulting a subject specific timeseries signals. [3] - 'eigen_regression' - denotes the principal eigen timeseries signals which were - extracted using multiple spatial regression, but the subject - specific timeseries signals are extracted using SVD. The first - eigen timeseries of each subject rather than simple averaging. [4] [5] - - matrices: string list ['correlation', 'partial_correlation'], optional - By default, both data matrices will be fetched. - or If given as only ['correlation'], data matrices of its type will be - fetched. + of all dimensionalities are returned each in a separate dimensional + array (n, n). + If set to specific dimension, then network matrices related to + particular dimension of brain parcellations will be returned. For example, + if set as [25, 50] only data corresponding to dimensionality 25 and 50 + of array (25, 25) and (50, 50) will be fetched. + + timeseries: string or strings as list, optional + Valid methods in strings are ['multiple_spatial_regression', 'eigen_regression'] + By default, network matrices of both types of timeseries signal extraction + methods will be returned. Each method has its own matrix array. + If set to ['multiple_spatial_regression'], then correlation matrices + estimated using spatial regressor based extraction of subject specific + timeseries signals will be returned. + If set to ['eigen_regression'], then correlation matrices estimated using + first principal eigen component based extraction of subject specific + timeseries signals will be returned. + For full technical details about each method. Refer to [3] [4] [5] + + matrices: string or strings as list, optional + Valid output matrices in strings are ['correlation', 'partial_correlation'] + By default, matrices of both types will be returned. + If set as only ['correlation'], matrices of only full correlation + will be returned. + If set as ['partial_correlation'], only partial correlation matrices + will be fetched. resume: boolean, default is True This parameter is required if a partly downloaded file is needed to be @@ -1416,21 +1414,21 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 timeseries_methods = ['multiple_spatial_regression', 'eigen_regression'] output_matrices = ['correlation', 'partial_correlation'] - message = ("Invalid {0} name: {1}. " - "Please choose either of them:{2}") + message = ("Invalid {0} name is given: {1}. " + "Please choose either of them {2}") inputs = [dimensionality, timeseries, matrices] standards = [dimensionalities, timeseries_methods, output_matrices] error_names = ['dimensionality', 'timeseries', 'matrices'] for each_input, standard, name in izip(inputs, standards, error_names): - if isinstance(each_input, list): + if not isinstance(each_input, list): + raise TypeError("Input given for {0} should be in list. " + "You have given as single variable: {1}".format(name, each_input)) + elif isinstance(each_input, list): for each_str in each_input: if each_str not in standard: raise ValueError(message.format(name, each_str, str(standard))) - elif not isinstance(each_input, list): - raise TypeError("If %s is given as single element, it should be " - "like a list as ['%s']" % (name, each_input)) dataset_name = 'Megatrawls' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 498db6061c..447d1f5ccd 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -367,44 +367,53 @@ def test_fetch_mixed_gambles(): def test_wrong_inputs_of_dimensionality_timeseriesmethods_matrices(): - message = "Invalid %s name" + message = "Invalid {0} name is given: {1}" - dimensionality = ['a10', 'd15', 'd30'] + invalid_inputs_dimensionality = [1, 5, 30] assert_raises_regex(ValueError, - message % 'dimensionality', + message.format('dimensionality', invalid_inputs_dimensionality), func.fetch_megatrawls_netmats, - dimensionality=dimensionality) + dimensionality=invalid_inputs_dimensionality) - timeseries = ['ts4', 'st2', 'eigen_regresion', 'multiple_sptial_regresion'] + invalid_inputs_timeseries = ['asdf', 'time', 'st2'] assert_raises_regex(ValueError, - message % 'timeseries', + message.format('timeseries', invalid_inputs_timeseries), func.fetch_megatrawls_netmats, - timeseries=timeseries) + timeseries=invalid_inputs_timeseries) - matrices = ['partal_corelation', 'sorelation'] + invalid_outputs = ['net1', 'net2'] assert_raises_regex(ValueError, - message % 'matrices', + message.format('matrices', invalid_outputs), + func.fetch_megatrawls_netmats, + matrices=invalid_outputs) + + # giving a valid input as a single element but not as a list to test + # if it raises same error message + matrices = 'correlation' + assert_raises_regex(TypeError, + "Input given for {0} should be in list. " + "You have given as single variable: {1}".format('matrices', matrices), func.fetch_megatrawls_netmats, matrices=matrices) - assert_raises(TypeError, func.fetch_megatrawls_netmats, - matrices='partial_correlation') - @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_megatrawls_netmats(): + # smoke test to see that files are fetched and read properly + # since we are loading information present in it and returning + # the same for dim in [25, 100, 200, 300]: files_dir = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d%d_ts3' % dim) os.makedirs(files_dir) - dummy = open(os.path.join(files_dir, 'Znet2.txt'), 'w') - dummy.write("1") - dummy.close() + with open(os.path.join(files_dir, 'Znet2.txt'), 'w') as net_file: + net_file.write("1") correlations = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir, dimensionality=[25, 100, 200, 300], timeseries=['eigen_regression'], matrices=['partial_correlation']) - assert_true(correlations.partial_correlation[0], np.ndarray) + # expected number of returns sitting in ouput name correlations should be equal + assert_equal(len(correlations), 7) assert_not_equal(correlations.description, '') From 00a5016613fc762d7201785b52abc3876834e9fb Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sun, 29 Nov 2015 11:10:18 +0100 Subject: [PATCH 0012/1925] Fixed description --- nilearn/datasets/func.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 6520ac3834..2827938348 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1352,14 +1352,12 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 - correlation: arrays of correlation matrices (Znet1). - partial_correlation: arrays of partial correlation matrices (Znet2). - dimensions_correlation: array of dimensionalities in numbers which - were given to fetch the correlation matrices. + were given as inputs. - dimensions_partial: array of dimensionalities in numbers which were - given to fetch the partial correlation matrices. - - timeseries_correlation: array of timeseries methods given to fetch its - corresponding matrices. - - timeseries_partial: array of timeseries methods given to fetch its - corresponding matrices. - - Data description + given as inputs. + - timeseries_correlation: array of timeseries method given as inputs. + - timeseries_partial: array of timeseries method given as inputs. + - description: Data description References ---------- From bff1ad7a70a1115ac360c294f00762cff3b7ecb0 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 14 Dec 2015 20:57:28 +0100 Subject: [PATCH 0013/1925] Changed example layout, Loic's comments, changed function layout to param_validation --- .../plot_visualize_megatrawls_netmats.py | 59 +++++++++------- nilearn/_utils/param_validation.py | 30 +++++++- nilearn/datasets/func.py | 68 +++++++++---------- nilearn/datasets/tests/test_func.py | 60 ++++++---------- nilearn/tests/test_param_validation.py | 43 +++++++++++- 5 files changed, 157 insertions(+), 103 deletions(-) diff --git a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py index 9ba3fbde53..fdff8d31d1 100644 --- a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py +++ b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py @@ -2,43 +2,50 @@ Visualizing Megatrawls Network Matrices from Human Connectome Project ===================================================================== -This example shows how to visualize network matrices fetched from -HCP beta-release of the Functional Connectivity Megatrawl +This example shows how to fetch network matrices data from HCP beta-release +of the Functional Connectivity Megatrawl project. See :func:`nilearn.datasets.fetch_megatrawls_netmats` documentation for more details. """ -import matplotlib.pyplot as plt -import numpy as np - -from nilearn import datasets -from nilearn import plotting -def plot_mats(netmats, title): +def plot_matrix(matrix, title): plt.figure() - plt.imshow(netmats, interpolation="nearest", - cmap=plotting.cm.bwr) + plt.imshow(matrix, interpolation="nearest", cmap=plotting.cm.bwr) plt.colorbar() plt.title(title) -# Fetches the network matrices dimensionalities d=100 and d=300 for -# timeseries method multiple regression and eigen regression -print(" -- Fetching Network Matrices -- ") +################################################################################ +# Fetch the network matrices data of dimensionalities d=100 and d=300 for +# timeseries method 'eigen regression' by importing datasets module +from nilearn import datasets + netmats = datasets.fetch_megatrawls_netmats(dimensionality=[300, 100], - timeseries=['multiple_spatial_regression', 'eigen_regression'], - matrices=['correlation', 'partial_correlation']) + timeseries=['eigen_regression'], + matrices=['partial_correlation']) + +# Output matrices are returned according to the sequence of the given inputs. +# Partial correlation matrix arrays: array 1 has matrix with dimensionality=300 +# and array 2 has matrix with dimensionality=100 +correlation_matrices = netmats.partial_correlation +# Array of given dimensions +dimensions_partial = netmats.dimensions_partial + +# Array of timeseries method repeated for total number of given dimensions +timeseries_partial = netmats.timeseries_partial + + +################################################################################ # Visualization -print(" -- Plotting correlation matrices -- ") -for matrices, dim, tseries in zip( - netmats.correlation, netmats.dimensions_correlation, netmats.timeseries_correlation): - title = ('Correlation matrices of d=%d & timeseries=%s' % (dim, tseries)) - plot_mats(matrices, title) - -print(" -- Plotting partial correlation matrices -- ") -for matrices, dim, tseries in zip( - netmats.partial_correlation, netmats.dimensions_partial, netmats.timeseries_partial): - title = ('Partial correlation matrices of d=%d & timeseries=%s' % (dim, tseries)) - plot_mats(matrices, title) +# Importing matplotlib and nilearn plotting modules to use its utilities for +# plotting correlation matrices +import matplotlib.pyplot as plt +from nilearn import plotting + +for matrix, dim, tserie in zip(correlation_matrices, dimensions_partial, + timeseries_partial): + title = 'Partial correlation matrices of d=%d & timeseries=%s' % (dim, tserie) + plot_matrix(matrix, title) plt.show() diff --git a/nilearn/_utils/param_validation.py b/nilearn/_utils/param_validation.py index 48709567ce..dade1da5c7 100644 --- a/nilearn/_utils/param_validation.py +++ b/nilearn/_utils/param_validation.py @@ -5,7 +5,7 @@ import numbers import warnings -from .compat import _basestring +from .compat import _basestring, izip def check_threshold(threshold, data, percentile_func, name='threshold'): @@ -63,3 +63,31 @@ def check_threshold(threshold, data, percentile_func, name='threshold'): raise TypeError('%s should be either a number ' 'or a string finishing with a percent sign' % (name, )) return threshold + + +def check_parameters_megatrawls_datasets(inputs, standards, name): + """ Checks given inputs against standards for megatrawls datasets parameters. + + If parameters are valid, then no error message is raised else error message + will be raised. + + Parameters + ---------- + inputs: list of str + list to be checked. + standards: list of str + The given inputs will be checked against this given standards. + name: str + Used for precise naming in error message + """ + message = ("Invalid {0} name is given: {1}. " + "Please choose either of them {2}") + + if isinstance(inputs, _basestring): + raise TypeError("Input given for {0} should be in list. " + "You have given as single variable: {1}".format(name, inputs)) + else: + for each_input in inputs: + if each_input not in standards: + raise ValueError(message.format(name, each_input, str(standards))) + return inputs diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 2827938348..96621d547c 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -12,8 +12,9 @@ from .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr, _read_md5_sum_file, _tree, _filter_columns) -from .._utils.compat import BytesIO, _basestring, _urllib, izip +from .._utils.compat import BytesIO, _basestring, _urllib from .._utils.numpy_conversions import csv_to_array +from .._utils.param_validation import check_parameters_megatrawls_datasets def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): @@ -1283,10 +1284,8 @@ def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, return data -def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 300], - timeseries=['multiple_spatial_regression', 'eigen_regression'], - matrices=['correlation', 'partial_correlation'], - resume=True, verbose=1): +def fetch_megatrawls_netmats(data_dir=None, dimensionality=None, timeseries=None, + matrices=None, resume=True, verbose=1): """Downloads and fetches Network Matrices data from MegaTrawls release in HCP. This data can be used to predict relationships between imaging data (functional @@ -1298,26 +1297,24 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 denoted as 'Znet1' or partial correlation with limited L2 regularisation denoted as 'Znet2'. [1] - .. versionadded:: 0.1.5 + .. versionadded:: 0.2.2 Parameters ---------- - data_dir: string, default is None, optional + data_dir: str, default is None, optional Path of the data directory. Used to force data storage in a specified location. - dimensionality: integer or integers as list, optional - Valid dimensions in integers are [25, 50, 100, 200, 300]. + dimensionality: list of int in [25, 50, 100, 200, 300], optional By default, network matrices data estimated from brain parcellations of all dimensionalities are returned each in a separate dimensional array (n, n). If set to specific dimension, then network matrices related to particular dimension of brain parcellations will be returned. For example, if set as [25, 50] only data corresponding to dimensionality 25 and 50 - of array (25, 25) and (50, 50) will be fetched. + of array (25, 25) and (50, 50) in size will be returned. - timeseries: string or strings as list, optional - Valid methods in strings are ['multiple_spatial_regression', 'eigen_regression'] + timeseries: list of str in ['multiple_spatial_regression', 'eigen_regression'], optional By default, network matrices of both types of timeseries signal extraction methods will be returned. Each method has its own matrix array. If set to ['multiple_spatial_regression'], then correlation matrices @@ -1328,16 +1325,15 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 timeseries signals will be returned. For full technical details about each method. Refer to [3] [4] [5] - matrices: string or strings as list, optional - Valid output matrices in strings are ['correlation', 'partial_correlation'] + matrices: list of str in ['correlation', 'partial_correlation'], optional By default, matrices of both types will be returned. If set as only ['correlation'], matrices of only full correlation will be returned. If set as ['partial_correlation'], only partial correlation matrices will be fetched. - resume: boolean, default is True - This parameter is required if a partly downloaded file is needed to be + resume: bool, default is True + This parameter is required if a partially downloaded file is needed to be resumed to download again. verbose: int, default is 1 @@ -1357,7 +1353,7 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 given as inputs. - timeseries_correlation: array of timeseries method given as inputs. - timeseries_partial: array of timeseries method given as inputs. - - description: Data description + - description: data description References ---------- @@ -1407,26 +1403,28 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=[25, 50, 100, 200, 30 url = "http://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" opts = {'uncompress': True} - # dataset terms + # standard dataset terms dimensionalities = [25, 50, 100, 200, 300] timeseries_methods = ['multiple_spatial_regression', 'eigen_regression'] - output_matrices = ['correlation', 'partial_correlation'] - - message = ("Invalid {0} name is given: {1}. " - "Please choose either of them {2}") - - inputs = [dimensionality, timeseries, matrices] - standards = [dimensionalities, timeseries_methods, output_matrices] - error_names = ['dimensionality', 'timeseries', 'matrices'] - - for each_input, standard, name in izip(inputs, standards, error_names): - if not isinstance(each_input, list): - raise TypeError("Input given for {0} should be in list. " - "You have given as single variable: {1}".format(name, each_input)) - elif isinstance(each_input, list): - for each_str in each_input: - if each_str not in standard: - raise ValueError(message.format(name, each_str, str(standard))) + output_matrices_names = ['correlation', 'partial_correlation'] + + if dimensionality is not None: + dimensionality = check_parameters_megatrawls_datasets( + dimensionality, dimensionalities, 'dimensionality') + else: + dimensionality = dimensionalities + + if timeseries is not None: + timeseries = check_parameters_megatrawls_datasets( + timeseries, timeseries_methods, 'timeseries') + else: + timeseries = timeseries_methods + + if matrices is not None: + matrices = check_parameters_megatrawls_datasets( + matrices, output_matrices_names, 'matrices') + else: + matrices = output_matrices_names dataset_name = 'Megatrawls' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 447d1f5ccd..41bab06fa4 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -365,55 +365,35 @@ def test_fetch_mixed_gambles(): assert_equal(len(mgambles["zmaps"]), n_subjects) -def test_wrong_inputs_of_dimensionality_timeseriesmethods_matrices(): - - message = "Invalid {0} name is given: {1}" - - invalid_inputs_dimensionality = [1, 5, 30] - assert_raises_regex(ValueError, - message.format('dimensionality', invalid_inputs_dimensionality), - func.fetch_megatrawls_netmats, - dimensionality=invalid_inputs_dimensionality) - - invalid_inputs_timeseries = ['asdf', 'time', 'st2'] - assert_raises_regex(ValueError, - message.format('timeseries', invalid_inputs_timeseries), - func.fetch_megatrawls_netmats, - timeseries=invalid_inputs_timeseries) - - invalid_outputs = ['net1', 'net2'] - assert_raises_regex(ValueError, - message.format('matrices', invalid_outputs), - func.fetch_megatrawls_netmats, - matrices=invalid_outputs) - - # giving a valid input as a single element but not as a list to test - # if it raises same error message - matrices = 'correlation' - assert_raises_regex(TypeError, - "Input given for {0} should be in list. " - "You have given as single variable: {1}".format('matrices', matrices), - func.fetch_megatrawls_netmats, - matrices=matrices) - - @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_megatrawls_netmats(): # smoke test to see that files are fetched and read properly # since we are loading information present in it and returning # the same - for dim in [25, 100, 200, 300]: + dimensionality = [25, 100, 200, 300] + for dim in dimensionality: files_dir = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d%d_ts3' % dim) os.makedirs(files_dir) with open(os.path.join(files_dir, 'Znet2.txt'), 'w') as net_file: net_file.write("1") - correlations = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir, - dimensionality=[25, 100, 200, 300], - timeseries=['eigen_regression'], - matrices=['partial_correlation']) + timeseries = ['eigen_regression'] + megatrawl_netmats_data = func.fetch_megatrawls_netmats( + data_dir=tst.tmpdir, dimensionality=dimensionality, + timeseries=timeseries, matrices=['partial_correlation']) + + # expected number of returns sitting in output name correlations should be equal + assert_equal(len(megatrawl_netmats_data), 7) + + # dimensions given to fetch partial correlation should be same meaning we + # check if same array is returned as given + assert_equal(megatrawl_netmats_data.dimensions_partial, dimensionality) - # expected number of returns sitting in ouput name correlations should be equal - assert_equal(len(correlations), 7) + # same timeseries method is repeated for each dimension meaning we check + # multiplying by 4 since given dimensionalities are of 4 types + assert_equal(megatrawl_netmats_data.timeseries_partial, timeseries * 4) - assert_not_equal(correlations.description, '') + # check length of output matrices should be equal + assert_equal(len(megatrawl_netmats_data.partial_correlation), 4) + # check if description is not empty + assert_not_equal(megatrawl_netmats_data.description, '') diff --git a/nilearn/tests/test_param_validation.py b/nilearn/tests/test_param_validation.py index f44d6a8b07..24554b15db 100644 --- a/nilearn/tests/test_param_validation.py +++ b/nilearn/tests/test_param_validation.py @@ -10,7 +10,8 @@ from nilearn._utils.testing import assert_raises_regex, assert_warns from nilearn._utils.extmath import fast_abs_percentile -from nilearn._utils.param_validation import check_threshold +from nilearn._utils.param_validation import (check_threshold, + check_parameters_megatrawls_datasets) def test_check_threshold(): @@ -55,3 +56,43 @@ def test_check_threshold(): assert_true(1. < check_threshold("50%", matrix, percentile_func=fast_abs_percentile, name=name) <= 2.) + + +def test_check_parameters_megatrawls_datasets(): + # testing whether the function raises the same error message as in + # main function if wrong input parameters are given + # parameters are dimensionality, timeseries, matrices + message = "Invalid {0} name is given: {1}" + + invalid_inputs_dimensionality = [1, 5, 30] + valid_inputs_dimensionality = [25, 50, 100, 200, 300] + assert_raises_regex(ValueError, + message.format('dimensionality', invalid_inputs_dimensionality), + check_parameters_megatrawls_datasets, + invalid_inputs_dimensionality, valid_inputs_dimensionality, + 'dimensionality') + + invalid_inputs_timeseries = ['asdf', 'time', 'st2'] + valid_inputs_timeseries = ['multiple_spatial_regression', 'eigen_regression'] + assert_raises_regex(ValueError, + message.format('timeseries', invalid_inputs_timeseries), + check_parameters_megatrawls_datasets, + invalid_inputs_timeseries, valid_inputs_timeseries, + 'timeseries') + + invalid_output_names = ['net1', 'net2'] + valid_output_names = ['correlation', 'partial_correlation'] + assert_raises_regex(ValueError, + message.format('matrices', invalid_output_names), + check_parameters_megatrawls_datasets, + invalid_output_names, valid_output_names, 'matrices') + + # giving a valid input as a single element but not as a list to test + # if it raises same error message + message = ("Input given for {0} should be in list. " + "You have given as single variable: {1}") + valid_matrix_name = 'correlation' + assert_raises_regex(TypeError, + message.format('matrices', valid_matrix_name), + check_parameters_megatrawls_datasets, + valid_matrix_name, valid_output_names, 'matrices') From 085ad3cccac29d303c1de6cd004d3963cd2e9e31 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 15 Dec 2015 16:18:39 +0100 Subject: [PATCH 0014/1925] Rebase --- nilearn/datasets/func.py | 122 +++++++++++++++++++-------------------- 1 file changed, 58 insertions(+), 64 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 96621d547c..f063146ba4 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1286,16 +1286,12 @@ def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, def fetch_megatrawls_netmats(data_dir=None, dimensionality=None, timeseries=None, matrices=None, resume=True, verbose=1): - """Downloads and fetches Network Matrices data from MegaTrawls release in HCP. + """Downloads and returns Network Matrices data from MegaTrawls release in HCP. - This data can be used to predict relationships between imaging data (functional - connectivity) and non-imaging behavioural measures such as age, sex, education, etc. - The network matrices are estimated from functional connectivity datasets of 461 - subjects [1]. Full technical details in [2]. - - The network matrices denoted as 'netmats' are estimated using full correlation - denoted as 'Znet1' or partial correlation with limited L2 regularisation - denoted as 'Znet2'. [1] + This data can be used to predict relationships between imaging data and + non-imaging behavioural measures such as age, sex, education, etc. + The network matrices are estimated from functional connectivity + datasets of 461 subjects. Full technical details in [1] [2]. .. versionadded:: 0.2.2 @@ -1308,33 +1304,30 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=None, timeseries=None dimensionality: list of int in [25, 50, 100, 200, 300], optional By default, network matrices data estimated from brain parcellations of all dimensionalities are returned each in a separate dimensional - array (n, n). - If set to specific dimension, then network matrices related to - particular dimension of brain parcellations will be returned. For example, - if set as [25, 50] only data corresponding to dimensionality 25 and 50 - of array (25, 25) and (50, 50) in size will be returned. - - timeseries: list of str in ['multiple_spatial_regression', 'eigen_regression'], optional - By default, network matrices of both types of timeseries signal extraction - methods will be returned. Each method has its own matrix array. - If set to ['multiple_spatial_regression'], then correlation matrices - estimated using spatial regressor based extraction of subject specific - timeseries signals will be returned. - If set to ['eigen_regression'], then correlation matrices estimated using + array (n, n). If set to specific dimension, then data of its + particular dimension will be returned. For example, if set as + [25] only data corresponding to dimensions 25 of array (25, 25) + in size will be returned. + + timeseries: list of str in ['multiple_spatial_regression', 'eigen_regression'] + By default, network matrices data extimated using both methods will be + returned each in a separate array. If ['multiple_spatial_regression'], + then correlation matrices estimated using spatial regressor based + extraction of subject specific timeseries signals will be returned. + If ['eigen_regression'], then correlation matrices estimated using first principal eigen component based extraction of subject specific - timeseries signals will be returned. - For full technical details about each method. Refer to [3] [4] [5] + timeseries signals will be returned. For full technical details + about each method, refer to [3] [4] [5]. matrices: list of str in ['correlation', 'partial_correlation'], optional - By default, matrices of both types will be returned. - If set as only ['correlation'], matrices of only full correlation + By default, matrices of both types will be returned. If ['correlation'], + then only full correlation matrices will be returned otherwise if + set as ['partial_correlation'], only partial correlation matrices will be returned. - If set as ['partial_correlation'], only partial correlation matrices - will be fetched. resume: bool, default is True - This parameter is required if a partially downloaded file is needed to be - resumed to download again. + This parameter is required if a partially downloaded file is needed + to be resumed to download again. verbose: int, default is 1 This parameter is used to set the verbosity level to print the message @@ -1343,60 +1336,61 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=None, timeseries=None Returns ------- - data: sklearn.datasets.base.Bunch - Dictionary-like object, contains: - - correlation: arrays of correlation matrices (Znet1). - - partial_correlation: arrays of partial correlation matrices (Znet2). - - dimensions_correlation: array of dimensionalities in numbers which - were given as inputs. - - dimensions_partial: array of dimensionalities in numbers which were - given as inputs. - - timeseries_correlation: array of timeseries method given as inputs. - - timeseries_partial: array of timeseries method given as inputs. - - description: data description + data: Bunch + Dictionary-like object, the attributes are : + 'correlation': list of arrays, contains full correlation matrices. + 'partial_correlation': list of arrays, contains partial correlation + matrices. + 'dimensions_correlation': array consists of given input in dimensions + used in fetching its full correlation matrices. + 'dimensions_partial': array consists of given input in dimensions + used in fetching its partial correlation matrices. + 'timeseries_correlation': array consists of given input in timeseries + methods used in fetching its full correlation matrices. + 'timeseries_partial': array consists of given input in timeseries + methods used in fetching its partial correlation matrices. References ---------- - For more details: - [1] Stephen Smith et al, HCP beta-release of the Functional Connectivity MegaTrawl. - April 2015 "HCP500-MegaTrawl" release. - https://db.humanconnectome.org/megatrawl/ + [1] Stephen Smith et al, HCP beta-release of the Functional Connectivity + MegaTrawl. + April 2015 "HCP500-MegaTrawl" release. + https://db.humanconnectome.org/megatrawl/ - Technical details: [2] Smith, S.M. et al. Nat. Neurosci. 18, 1565-1567 (2015). - [3] N.Filippini, et al. Distinct patterns of brain activity in young carriers - of the APOE-e4 allele. - Proc Natl Acad Sci USA (PNAS), 106:7209-7214, 2009. + [3] N.Filippini, et al. Distinct patterns of brain activity in young + carriers of the APOE-e4 allele. + Proc Natl Acad Sci USA (PNAS), 106::7209-7214, 2009. [4] S.Smith, et al. Methods for network modelling from high quality rfMRI data. - Meeting of the Organization for Human Brain Mapping. 2014 + Meeting of the Organization for Human Brain Mapping. 2014 - [5] J.X. O'Reilly et al. Distinct and overlapping functional zones in the cerebellum - defined by resting state functional connectivity. - Cerebral Cortex, 2009. + [5] J.X. O'Reilly et al. Distinct and overlapping functional zones in the + cerebellum defined by resting state functional connectivity. + Cerebral Cortex, 2009. Disclaimer ---------- IMPORTANT: This is open access data. You must agree to Terms and conditions - of using this data before using it, - available at: http://humanconnectome.org/data/data-use-terms/open-access.html - Open Access Data (all imaging data and most of the behavioral data) - is available to those who register an account at ConnectomeDB and agree to - the Open Access Data Use Terms. This includes agreement to comply with + of using this data before using it, available at + http://humanconnectome.org/data/data-use-terms/open-access.html. + Open Access Data (all imaging data and most of the behavioral data) is + available to those who register an account at ConnectomeDB and agree to the + Open Access Data Use Terms. This includes agreement to comply with institutional rules and regulations. This means you may need the approval of your IRB or Ethics Committee to use the data. The released HCP data are not considered de-identified, since certain combinations of HCP Restricted Data (available through a separate process) might allow identification of individuals. Different national, state and local laws may apply and be interpreted differently, so it is important that you consult with your IRB - or Ethics Committee before beginning your research. If needed and upon request, - the HCP will provide a certificate stating that you have accepted the - HCP Open Access Data Use Terms. Please note that everyone who works with + or Ethics Committee before beginning your research. If needed and upon + request, the HCP will provide a certificate stating that you have accepted + the HCP Open Access Data Use Terms. Please note that everyone who works with HCP open access data must review and agree to these terms, including those - who are accessing shared copies of this data. If you are sharing - HCP Open Access data, please advise your co-researchers that they must - register with ConnectomeDB and agree to these terms. + who are accessing shared copies of this data. If you are sharing HCP Open + Access data, please advise your co-researchers that they must register with + ConnectomeDB and agree to these terms. Register and sign the Open Access Data Use Terms at ConnectomeDB: https://db.humanconnectome.org/ """ From b63b9577ca1333269e2950ef6b4ef148314977ee Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Wed, 16 Dec 2015 13:04:59 +0100 Subject: [PATCH 0015/1925] For good rendering of attributes --- nilearn/datasets/func.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index f063146ba4..18ae31451f 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1338,17 +1338,23 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=None, timeseries=None ------- data: Bunch Dictionary-like object, the attributes are : - 'correlation': list of arrays, contains full correlation matrices. - 'partial_correlation': list of arrays, contains partial correlation - matrices. - 'dimensions_correlation': array consists of given input in dimensions - used in fetching its full correlation matrices. - 'dimensions_partial': array consists of given input in dimensions - used in fetching its partial correlation matrices. - 'timeseries_correlation': array consists of given input in timeseries - methods used in fetching its full correlation matrices. - 'timeseries_partial': array consists of given input in timeseries - methods used in fetching its partial correlation matrices. + + - 'correlation': list of arrays + contains full correlation matrices. + - 'partial_correlation': list of arrays + contains partial correlation matrices. + - 'dimensions_correlation': array + consists of given input in dimensions used in fetching its full + correlation matrices. + - 'dimensions_partial': array + consists of given input in dimensions used in fetching its partial + correlation matrices. + - 'timeseries_correlation': array + consists of given input in timeseries methods used in fetching its + full correlation matrices. + - 'timeseries_partial': array + consists of given input in timeseries methods used in fetching its + partial correlation matrices. References ---------- From 64719c661c30673e8cda16cab15d24082b75f48b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Wed, 16 Dec 2015 13:38:12 +0100 Subject: [PATCH 0016/1925] Make sure that $REMOTE/master exists rather than fetching $REMOTE/master into local branch. Also some improvement on the script output to separate the flake8 output --- continuous_integration/flake8_diff.sh | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/continuous_integration/flake8_diff.sh b/continuous_integration/flake8_diff.sh index 732319afdc..985355498e 100755 --- a/continuous_integration/flake8_diff.sh +++ b/continuous_integration/flake8_diff.sh @@ -11,25 +11,26 @@ git fetch --unshallow || echo "Unshallowing the git checkout failed" # Note: upstream has priority if it exists git remote -v git remote | grep upstream && REMOTE=upstream || REMOTE=origin -git fetch -v $REMOTE master:remote_master +# Make sure that $REMOTE/master is set +git remote set-branches --add $REMOTE master +git fetch $REMOTE master +REMOTE_MASTER_REF="$REMOTE/master" -# Find common ancestor between HEAD and remote_master -COMMIT=$(git merge-base @ remote_master) || \ - echo "No common ancestor found for $(git show @ -q) and $(git show remote_master -q)" +# Find common ancestor between HEAD and remotes/$REMOTE/master +COMMIT=$(git merge-base @ $REMOTE_MASTER_REF) || \ + echo "No common ancestor found for $(git show @ -q) and $(git show $REMOTE_MASTER_REF -q)" if [ -z "$COMMIT" ]; then - # clean-up created branch - git branch -D remote_master exit 1 fi echo Common ancestor is: git show $COMMIT --stat -echo Running flake8 on the diff in the range\ + +echo '\nRunning flake8 on the diff in the range'\ "$(git rev-parse --short $COMMIT)..$(git rev-parse --short @)" \ "($(git rev-list $COMMIT.. | wc -l) commit(s)):" -git diff $COMMIT | flake8 --diff && echo -e "No problem detected by flake8\n" +echo '--------------------------------------------------------------------------------' -# clean-up created branch -git branch -D remote_master +git diff $COMMIT | flake8 --diff && echo -e "No problem detected by flake8\n" From 0c2c84aabb69ca73f2d78c3488b7afc2bc130ab3 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Wed, 16 Dec 2015 18:18:44 +0100 Subject: [PATCH 0017/1925] Return a list of labels in all cases. --- nilearn/datasets/atlas.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index c2dd3ef558..d34a2c030d 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -231,7 +231,7 @@ def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, names[0] = 'Background' for label in ElementTree.parse(label_file).findall('.//label'): names[int(label.get('index')) + 1] = label.text - names = np.asarray(list(names.values())) + names = list(names.values()) if not symmetric_split: return Bunch(maps=atlas_img, labels=names) From e246357591b7e1edefc05eb95cc614656c5ef36f Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 17 Dec 2015 09:51:52 +0100 Subject: [PATCH 0018/1925] Fix left/right split --- nilearn/datasets/atlas.py | 45 +++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index d34a2c030d..0f8c80ac88 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -245,35 +245,34 @@ def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, atlas = atlas_img.get_data() labels = np.unique(atlas) - # ndimage.find_objects output contains None elements for labels - # that do not exist - found_slices = (s for s in ndimage.find_objects(atlas) - if s is not None) + # Build a mask of both halves of the brain middle_ind = (atlas.shape[0] - 1) // 2 - crosses_middle = [s.start < middle_ind and s.stop > middle_ind - for s, _, _ in found_slices] - - # Split every zone crossing the median plane into two parts. - # Assumes that the background label is zero. - half = np.zeros(atlas.shape, dtype=np.bool) - half[:middle_ind, ...] = True - new_label = max(labels) + 1 # Put zeros on the median plane atlas[middle_ind, ...] = 0 - for label, crosses in zip(labels[1:], crosses_middle): - if not crosses: - continue - atlas[np.logical_and(atlas == label, half)] = new_label - new_label += 1 + # Split every zone crossing the median plane into two parts. + left_atlas = atlas.copy() + left_atlas[middle_ind:, ...] = 0 + right_atlas = atlas.copy() + right_atlas[:middle_ind, ...] = 0 - # Duplicate labels for right and left + new_label = 0 + new_atlas = atlas.copy() + # Assumes that the background label is zero. new_names = [names[0]] - for n in names[1:]: - new_names.append(n + ', right part') - for n in names[1:]: - new_names.append(n + ', left part') + for label, name in zip(labels[1:], names[1:]): + new_label += 1 + if ((left_atlas == label).sum() == 0 or + (right_atlas == label).sum() == 0): + new_atlas[atlas == label] = new_label + new_names.append(name) + continue + new_atlas[right_atlas == label] = new_label + new_names.append(name + ', left part') + new_label += 1 + new_atlas[left_atlas == label] = new_label + new_names.append(name + ', right part') - atlas_img = new_img_like(atlas_img, atlas, atlas_img.get_affine()) + atlas_img = new_img_like(atlas_img, new_atlas, atlas_img.get_affine()) return Bunch(maps=atlas_img, labels=new_names) From b27db85e33fd51aee95795c22c716429598fc2ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 17 Dec 2015 11:13:16 +0100 Subject: [PATCH 0019/1925] Update numpydoc to 0.5 --- doc/conf.py | 2 +- doc/sphinxext/numpy_ext/__init__.py | 0 doc/sphinxext/numpydoc/__init__.py | 3 + doc/sphinxext/numpydoc/comment_eater.py | 169 ++++ doc/sphinxext/numpydoc/compiler_unparse.py | 865 ++++++++++++++++++ .../{numpy_ext => numpydoc}/docscrape.py | 174 ++-- .../docscrape_sphinx.py | 120 ++- doc/sphinxext/numpydoc/linkcode.py | 83 ++ .../{numpy_ext => numpydoc}/numpydoc.py | 96 +- doc/sphinxext/numpydoc/phantom_import.py | 167 ++++ doc/sphinxext/numpydoc/plot_directive.py | 642 +++++++++++++ doc/sphinxext/numpydoc/traitsdoc.py | 142 +++ 12 files changed, 2301 insertions(+), 162 deletions(-) delete mode 100644 doc/sphinxext/numpy_ext/__init__.py create mode 100644 doc/sphinxext/numpydoc/__init__.py create mode 100644 doc/sphinxext/numpydoc/comment_eater.py create mode 100644 doc/sphinxext/numpydoc/compiler_unparse.py rename doc/sphinxext/{numpy_ext => numpydoc}/docscrape.py (73%) rename doc/sphinxext/{numpy_ext => numpydoc}/docscrape_sphinx.py (65%) create mode 100644 doc/sphinxext/numpydoc/linkcode.py rename doc/sphinxext/{numpy_ext => numpydoc}/numpydoc.py (63%) create mode 100644 doc/sphinxext/numpydoc/phantom_import.py create mode 100644 doc/sphinxext/numpydoc/plot_directive.py create mode 100644 doc/sphinxext/numpydoc/traitsdoc.py diff --git a/doc/conf.py b/doc/conf.py index 87278cc30b..0838e6dc97 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -39,7 +39,7 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.pngmath', 'sphinx.ext.intersphinx', - 'numpy_ext.numpydoc', + 'numpydoc.numpydoc', 'sphinx_gallery.gen_gallery', ] diff --git a/doc/sphinxext/numpy_ext/__init__.py b/doc/sphinxext/numpy_ext/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/doc/sphinxext/numpydoc/__init__.py b/doc/sphinxext/numpydoc/__init__.py new file mode 100644 index 0000000000..0fce2cf747 --- /dev/null +++ b/doc/sphinxext/numpydoc/__init__.py @@ -0,0 +1,3 @@ +from __future__ import division, absolute_import, print_function + +from .numpydoc import setup diff --git a/doc/sphinxext/numpydoc/comment_eater.py b/doc/sphinxext/numpydoc/comment_eater.py new file mode 100644 index 0000000000..8cddd3305f --- /dev/null +++ b/doc/sphinxext/numpydoc/comment_eater.py @@ -0,0 +1,169 @@ +from __future__ import division, absolute_import, print_function + +import sys +if sys.version_info[0] >= 3: + from io import StringIO +else: + from io import StringIO + +import compiler +import inspect +import textwrap +import tokenize + +from .compiler_unparse import unparse + + +class Comment(object): + """ A comment block. + """ + is_comment = True + def __init__(self, start_lineno, end_lineno, text): + # int : The first line number in the block. 1-indexed. + self.start_lineno = start_lineno + # int : The last line number. Inclusive! + self.end_lineno = end_lineno + # str : The text block including '#' character but not any leading spaces. + self.text = text + + def add(self, string, start, end, line): + """ Add a new comment line. + """ + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + self.text += string + + def __repr__(self): + return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno, self.text) + + +class NonComment(object): + """ A non-comment block of code. + """ + is_comment = False + def __init__(self, start_lineno, end_lineno): + self.start_lineno = start_lineno + self.end_lineno = end_lineno + + def add(self, string, start, end, line): + """ Add lines to the block. + """ + if string.strip(): + # Only add if not entirely whitespace. + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + + def __repr__(self): + return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno) + + +class CommentBlocker(object): + """ Pull out contiguous comment blocks. + """ + def __init__(self): + # Start with a dummy. + self.current_block = NonComment(0, 0) + + # All of the blocks seen so far. + self.blocks = [] + + # The index mapping lines of code to their associated comment blocks. + self.index = {} + + def process_file(self, file): + """ Process a file object. + """ + if sys.version_info[0] >= 3: + nxt = file.__next__ + else: + nxt = file.next + for token in tokenize.generate_tokens(nxt): + self.process_token(*token) + self.make_index() + + def process_token(self, kind, string, start, end, line): + """ Process a single token. + """ + if self.current_block.is_comment: + if kind == tokenize.COMMENT: + self.current_block.add(string, start, end, line) + else: + self.new_noncomment(start[0], end[0]) + else: + if kind == tokenize.COMMENT: + self.new_comment(string, start, end, line) + else: + self.current_block.add(string, start, end, line) + + def new_noncomment(self, start_lineno, end_lineno): + """ We are transitioning from a noncomment to a comment. + """ + block = NonComment(start_lineno, end_lineno) + self.blocks.append(block) + self.current_block = block + + def new_comment(self, string, start, end, line): + """ Possibly add a new comment. + + Only adds a new comment if this comment is the only thing on the line. + Otherwise, it extends the noncomment block. + """ + prefix = line[:start[1]] + if prefix.strip(): + # Oops! Trailing comment, not a comment block. + self.current_block.add(string, start, end, line) + else: + # A comment block. + block = Comment(start[0], end[0], string) + self.blocks.append(block) + self.current_block = block + + def make_index(self): + """ Make the index mapping lines of actual code to their associated + prefix comments. + """ + for prev, block in zip(self.blocks[:-1], self.blocks[1:]): + if not block.is_comment: + self.index[block.start_lineno] = prev + + def search_for_comment(self, lineno, default=None): + """ Find the comment block just before the given line number. + + Returns None (or the specified default) if there is no such block. + """ + if not self.index: + self.make_index() + block = self.index.get(lineno, None) + text = getattr(block, 'text', default) + return text + + +def strip_comment_marker(text): + """ Strip # markers at the front of a block of comment text. + """ + lines = [] + for line in text.splitlines(): + lines.append(line.lstrip('#')) + text = textwrap.dedent('\n'.join(lines)) + return text + + +def get_class_traits(klass): + """ Yield all of the documentation for trait definitions on a class object. + """ + # FIXME: gracefully handle errors here or in the caller? + source = inspect.getsource(klass) + cb = CommentBlocker() + cb.process_file(StringIO(source)) + mod_ast = compiler.parse(source) + class_ast = mod_ast.node.nodes[0] + for node in class_ast.code.nodes: + # FIXME: handle other kinds of assignments? + if isinstance(node, compiler.ast.Assign): + name = node.nodes[0].name + rhs = unparse(node.expr).strip() + doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) + yield name, rhs, doc + diff --git a/doc/sphinxext/numpydoc/compiler_unparse.py b/doc/sphinxext/numpydoc/compiler_unparse.py new file mode 100644 index 0000000000..8933a83db3 --- /dev/null +++ b/doc/sphinxext/numpydoc/compiler_unparse.py @@ -0,0 +1,865 @@ +""" Turn compiler.ast structures back into executable python code. + + The unparse method takes a compiler.ast tree and transforms it back into + valid python code. It is incomplete and currently only works for + import statements, function calls, function definitions, assignments, and + basic expressions. + + Inspired by python-2.5-svn/Demo/parser/unparse.py + + fixme: We may want to move to using _ast trees because the compiler for + them is about 6 times faster than compiler.compile. +""" +from __future__ import division, absolute_import, print_function + +import sys +from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +def unparse(ast, single_line_functions=False): + s = StringIO() + UnparseCompilerAst(ast, s, single_line_functions) + return s.getvalue().lstrip() + +op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, + 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } + +class UnparseCompilerAst: + """ Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarged. + """ + + ######################################################################### + # object interface. + ######################################################################### + + def __init__(self, tree, file = sys.stdout, single_line_functions=False): + """ Unparser(tree, file=sys.stdout) -> None. + + Print the source for tree to file. + """ + self.f = file + self._single_func = single_line_functions + self._do_indent = True + self._indent = 0 + self._dispatch(tree) + self._write("\n") + self.f.flush() + + ######################################################################### + # Unparser private interface. + ######################################################################### + + ### format, output, and dispatch methods ################################ + + def _fill(self, text = ""): + "Indent a piece of text, according to the current indentation level" + if self._do_indent: + self._write("\n"+" "*self._indent + text) + else: + self._write(text) + + def _write(self, text): + "Append a piece of text to the current line." + self.f.write(text) + + def _enter(self): + "Print ':', and increase the indentation." + self._write(": ") + self._indent += 1 + + def _leave(self): + "Decrease the indentation level." + self._indent -= 1 + + def _dispatch(self, tree): + "_dispatcher function, _dispatching tree type T to method _T." + if isinstance(tree, list): + for t in tree: + self._dispatch(t) + return + meth = getattr(self, "_"+tree.__class__.__name__) + if tree.__class__.__name__ == 'NoneType' and not self._do_indent: + return + meth(tree) + + + ######################################################################### + # compiler.ast unparsing methods. + # + # There should be one method per concrete grammar type. They are + # organized in alphabetical order. + ######################################################################### + + def _Add(self, t): + self.__binary_op(t, '+') + + def _And(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") and (") + self._write(")") + + def _AssAttr(self, t): + """ Handle assigning an attribute of an object + """ + self._dispatch(t.expr) + self._write('.'+t.attrname) + + def _Assign(self, t): + """ Expression Assignment such as "a = 1". + + This only handles assignment in expressions. Keyword assignment + is handled separately. + """ + self._fill() + for target in t.nodes: + self._dispatch(target) + self._write(" = ") + self._dispatch(t.expr) + if not self._do_indent: + self._write('; ') + + def _AssName(self, t): + """ Name on left hand side of expression. + + Treat just like a name on the right side of an expression. + """ + self._Name(t) + + def _AssTuple(self, t): + """ Tuple on left hand side of an expression. + """ + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + def _AugAssign(self, t): + """ +=,-=,*=,/=,**=, etc. operations + """ + + self._fill() + self._dispatch(t.node) + self._write(' '+t.op+' ') + self._dispatch(t.expr) + if not self._do_indent: + self._write(';') + + def _Bitand(self, t): + """ Bit and operation. + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" & ") + + def _Bitor(self, t): + """ Bit or operation + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" | ") + + def _CallFunc(self, t): + """ Function call. + """ + self._dispatch(t.node) + self._write("(") + comma = False + for e in t.args: + if comma: self._write(", ") + else: comma = True + self._dispatch(e) + if t.star_args: + if comma: self._write(", ") + else: comma = True + self._write("*") + self._dispatch(t.star_args) + if t.dstar_args: + if comma: self._write(", ") + else: comma = True + self._write("**") + self._dispatch(t.dstar_args) + self._write(")") + + def _Compare(self, t): + self._dispatch(t.expr) + for op, expr in t.ops: + self._write(" " + op + " ") + self._dispatch(expr) + + def _Const(self, t): + """ A constant value such as an integer value, 3, or a string, "hello". + """ + self._dispatch(t.value) + + def _Decorators(self, t): + """ Handle function decorators (eg. @has_units) + """ + for node in t.nodes: + self._dispatch(node) + + def _Dict(self, t): + self._write("{") + for i, (k, v) in enumerate(t.items): + self._dispatch(k) + self._write(": ") + self._dispatch(v) + if i < len(t.items)-1: + self._write(", ") + self._write("}") + + def _Discard(self, t): + """ Node for when return value is ignored such as in "foo(a)". + """ + self._fill() + self._dispatch(t.expr) + + def _Div(self, t): + self.__binary_op(t, '/') + + def _Ellipsis(self, t): + self._write("...") + + def _From(self, t): + """ Handle "from xyz import foo, bar as baz". + """ + # fixme: Are From and ImportFrom handled differently? + self._fill("from ") + self._write(t.modname) + self._write(" import ") + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Function(self, t): + """ Handle function definitions + """ + if t.decorators is not None: + self._fill("@") + self._dispatch(t.decorators) + self._fill("def "+t.name + "(") + defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) + for i, arg in enumerate(zip(t.argnames, defaults)): + self._write(arg[0]) + if arg[1] is not None: + self._write('=') + self._dispatch(arg[1]) + if i < len(t.argnames)-1: + self._write(', ') + self._write(")") + if self._single_func: + self._do_indent = False + self._enter() + self._dispatch(t.code) + self._leave() + self._do_indent = True + + def _Getattr(self, t): + """ Handle getting an attribute of an object + """ + if isinstance(t.expr, (Div, Mul, Sub, Add)): + self._write('(') + self._dispatch(t.expr) + self._write(')') + else: + self._dispatch(t.expr) + + self._write('.'+t.attrname) + + def _If(self, t): + self._fill() + + for i, (compare,code) in enumerate(t.tests): + if i == 0: + self._write("if ") + else: + self._write("elif ") + self._dispatch(compare) + self._enter() + self._fill() + self._dispatch(code) + self._leave() + self._write("\n") + + if t.else_ is not None: + self._write("else") + self._enter() + self._fill() + self._dispatch(t.else_) + self._leave() + self._write("\n") + + def _IfExp(self, t): + self._dispatch(t.then) + self._write(" if ") + self._dispatch(t.test) + + if t.else_ is not None: + self._write(" else (") + self._dispatch(t.else_) + self._write(")") + + def _Import(self, t): + """ Handle "import xyz.foo". + """ + self._fill("import ") + + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Keyword(self, t): + """ Keyword value assignment within function calls and definitions. + """ + self._write(t.name) + self._write("=") + self._dispatch(t.expr) + + def _List(self, t): + self._write("[") + for i,node in enumerate(t.nodes): + self._dispatch(node) + if i < len(t.nodes)-1: + self._write(", ") + self._write("]") + + def _Module(self, t): + if t.doc is not None: + self._dispatch(t.doc) + self._dispatch(t.node) + + def _Mul(self, t): + self.__binary_op(t, '*') + + def _Name(self, t): + self._write(t.name) + + def _NoneType(self, t): + self._write("None") + + def _Not(self, t): + self._write('not (') + self._dispatch(t.expr) + self._write(')') + + def _Or(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") or (") + self._write(")") + + def _Pass(self, t): + self._write("pass\n") + + def _Printnl(self, t): + self._fill("print ") + if t.dest: + self._write(">> ") + self._dispatch(t.dest) + self._write(", ") + comma = False + for node in t.nodes: + if comma: self._write(', ') + else: comma = True + self._dispatch(node) + + def _Power(self, t): + self.__binary_op(t, '**') + + def _Return(self, t): + self._fill("return ") + if t.value: + if isinstance(t.value, Tuple): + text = ', '.join([ name.name for name in t.value.asList() ]) + self._write(text) + else: + self._dispatch(t.value) + if not self._do_indent: + self._write('; ') + + def _Slice(self, t): + self._dispatch(t.expr) + self._write("[") + if t.lower: + self._dispatch(t.lower) + self._write(":") + if t.upper: + self._dispatch(t.upper) + #if t.step: + # self._write(":") + # self._dispatch(t.step) + self._write("]") + + def _Sliceobj(self, t): + for i, node in enumerate(t.nodes): + if i != 0: + self._write(":") + if not (isinstance(node, Const) and node.value is None): + self._dispatch(node) + + def _Stmt(self, tree): + for node in tree.nodes: + self._dispatch(node) + + def _Sub(self, t): + self.__binary_op(t, '-') + + def _Subscript(self, t): + self._dispatch(t.expr) + self._write("[") + for i, value in enumerate(t.subs): + if i != 0: + self._write(",") + self._dispatch(value) + self._write("]") + + def _TryExcept(self, t): + self._fill("try") + self._enter() + self._dispatch(t.body) + self._leave() + + for handler in t.handlers: + self._fill('except ') + self._dispatch(handler[0]) + if handler[1] is not None: + self._write(', ') + self._dispatch(handler[1]) + self._enter() + self._dispatch(handler[2]) + self._leave() + + if t.else_: + self._fill("else") + self._enter() + self._dispatch(t.else_) + self._leave() + + def _Tuple(self, t): + + if not t.nodes: + # Empty tuple. + self._write("()") + else: + self._write("(") + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + self._write(")") + + def _UnaryAdd(self, t): + self._write("+") + self._dispatch(t.expr) + + def _UnarySub(self, t): + self._write("-") + self._dispatch(t.expr) + + def _With(self, t): + self._fill('with ') + self._dispatch(t.expr) + if t.vars: + self._write(' as ') + self._dispatch(t.vars.name) + self._enter() + self._dispatch(t.body) + self._leave() + self._write('\n') + + def _int(self, t): + self._write(repr(t)) + + def __binary_op(self, t, symbol): + # Check if parenthesis are needed on left side and then dispatch + has_paren = False + left_class = str(t.left.__class__) + if (left_class in op_precedence.keys() and + op_precedence[left_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.left) + if has_paren: + self._write(')') + # Write the appropriate symbol for operator + self._write(symbol) + # Check if parenthesis are needed on the right side and then dispatch + has_paren = False + right_class = str(t.right.__class__) + if (right_class in op_precedence.keys() and + op_precedence[right_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.right) + if has_paren: + self._write(')') + + def _float(self, t): + # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' + # We prefer str here. + self._write(str(t)) + + def _str(self, t): + self._write(repr(t)) + + def _tuple(self, t): + self._write(str(t)) + + ######################################################################### + # These are the methods from the _ast modules unparse. + # + # As our needs to handle more advanced code increase, we may want to + # modify some of the methods below so that they work for compiler.ast. + ######################################################################### + +# # stmt +# def _Expr(self, tree): +# self._fill() +# self._dispatch(tree.value) +# +# def _Import(self, t): +# self._fill("import ") +# first = True +# for a in t.names: +# if first: +# first = False +# else: +# self._write(", ") +# self._write(a.name) +# if a.asname: +# self._write(" as "+a.asname) +# +## def _ImportFrom(self, t): +## self._fill("from ") +## self._write(t.module) +## self._write(" import ") +## for i, a in enumerate(t.names): +## if i == 0: +## self._write(", ") +## self._write(a.name) +## if a.asname: +## self._write(" as "+a.asname) +## # XXX(jpe) what is level for? +## +# +# def _Break(self, t): +# self._fill("break") +# +# def _Continue(self, t): +# self._fill("continue") +# +# def _Delete(self, t): +# self._fill("del ") +# self._dispatch(t.targets) +# +# def _Assert(self, t): +# self._fill("assert ") +# self._dispatch(t.test) +# if t.msg: +# self._write(", ") +# self._dispatch(t.msg) +# +# def _Exec(self, t): +# self._fill("exec ") +# self._dispatch(t.body) +# if t.globals: +# self._write(" in ") +# self._dispatch(t.globals) +# if t.locals: +# self._write(", ") +# self._dispatch(t.locals) +# +# def _Print(self, t): +# self._fill("print ") +# do_comma = False +# if t.dest: +# self._write(">>") +# self._dispatch(t.dest) +# do_comma = True +# for e in t.values: +# if do_comma:self._write(", ") +# else:do_comma=True +# self._dispatch(e) +# if not t.nl: +# self._write(",") +# +# def _Global(self, t): +# self._fill("global") +# for i, n in enumerate(t.names): +# if i != 0: +# self._write(",") +# self._write(" " + n) +# +# def _Yield(self, t): +# self._fill("yield") +# if t.value: +# self._write(" (") +# self._dispatch(t.value) +# self._write(")") +# +# def _Raise(self, t): +# self._fill('raise ') +# if t.type: +# self._dispatch(t.type) +# if t.inst: +# self._write(", ") +# self._dispatch(t.inst) +# if t.tback: +# self._write(", ") +# self._dispatch(t.tback) +# +# +# def _TryFinally(self, t): +# self._fill("try") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# self._fill("finally") +# self._enter() +# self._dispatch(t.finalbody) +# self._leave() +# +# def _excepthandler(self, t): +# self._fill("except ") +# if t.type: +# self._dispatch(t.type) +# if t.name: +# self._write(", ") +# self._dispatch(t.name) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _ClassDef(self, t): +# self._write("\n") +# self._fill("class "+t.name) +# if t.bases: +# self._write("(") +# for a in t.bases: +# self._dispatch(a) +# self._write(", ") +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _FunctionDef(self, t): +# self._write("\n") +# for deco in t.decorators: +# self._fill("@") +# self._dispatch(deco) +# self._fill("def "+t.name + "(") +# self._dispatch(t.args) +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _For(self, t): +# self._fill("for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# def _While(self, t): +# self._fill("while ") +# self._dispatch(t.test) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# # expr +# def _Str(self, tree): +# self._write(repr(tree.s)) +## +# def _Repr(self, t): +# self._write("`") +# self._dispatch(t.value) +# self._write("`") +# +# def _Num(self, t): +# self._write(repr(t.n)) +# +# def _ListComp(self, t): +# self._write("[") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write("]") +# +# def _GeneratorExp(self, t): +# self._write("(") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write(")") +# +# def _comprehension(self, t): +# self._write(" for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# for if_clause in t.ifs: +# self._write(" if ") +# self._dispatch(if_clause) +# +# def _IfExp(self, t): +# self._dispatch(t.body) +# self._write(" if ") +# self._dispatch(t.test) +# if t.orelse: +# self._write(" else ") +# self._dispatch(t.orelse) +# +# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} +# def _UnaryOp(self, t): +# self._write(self.unop[t.op.__class__.__name__]) +# self._write("(") +# self._dispatch(t.operand) +# self._write(")") +# +# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", +# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", +# "FloorDiv":"//", "Pow": "**"} +# def _BinOp(self, t): +# self._write("(") +# self._dispatch(t.left) +# self._write(")" + self.binop[t.op.__class__.__name__] + "(") +# self._dispatch(t.right) +# self._write(")") +# +# boolops = {_ast.And: 'and', _ast.Or: 'or'} +# def _BoolOp(self, t): +# self._write("(") +# self._dispatch(t.values[0]) +# for v in t.values[1:]: +# self._write(" %s " % self.boolops[t.op.__class__]) +# self._dispatch(v) +# self._write(")") +# +# def _Attribute(self,t): +# self._dispatch(t.value) +# self._write(".") +# self._write(t.attr) +# +## def _Call(self, t): +## self._dispatch(t.func) +## self._write("(") +## comma = False +## for e in t.args: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## for e in t.keywords: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## if t.starargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("*") +## self._dispatch(t.starargs) +## if t.kwargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("**") +## self._dispatch(t.kwargs) +## self._write(")") +# +# # slice +# def _Index(self, t): +# self._dispatch(t.value) +# +# def _ExtSlice(self, t): +# for i, d in enumerate(t.dims): +# if i != 0: +# self._write(': ') +# self._dispatch(d) +# +# # others +# def _arguments(self, t): +# first = True +# nonDef = len(t.args)-len(t.defaults) +# for a in t.args[0:nonDef]: +# if first:first = False +# else: self._write(", ") +# self._dispatch(a) +# for a,d in zip(t.args[nonDef:], t.defaults): +# if first:first = False +# else: self._write(", ") +# self._dispatch(a), +# self._write("=") +# self._dispatch(d) +# if t.vararg: +# if first:first = False +# else: self._write(", ") +# self._write("*"+t.vararg) +# if t.kwarg: +# if first:first = False +# else: self._write(", ") +# self._write("**"+t.kwarg) +# +## def _keyword(self, t): +## self._write(t.arg) +## self._write("=") +## self._dispatch(t.value) +# +# def _Lambda(self, t): +# self._write("lambda ") +# self._dispatch(t.args) +# self._write(": ") +# self._dispatch(t.body) + + + diff --git a/doc/sphinxext/numpy_ext/docscrape.py b/doc/sphinxext/numpydoc/docscrape.py similarity index 73% rename from doc/sphinxext/numpy_ext/docscrape.py rename to doc/sphinxext/numpydoc/docscrape.py index e9670c05f5..2b1719db5c 100644 --- a/doc/sphinxext/numpy_ext/docscrape.py +++ b/doc/sphinxext/numpydoc/docscrape.py @@ -1,13 +1,15 @@ """Extract reference documentation from the NumPy source tree. """ +from __future__ import division, absolute_import, print_function import inspect import textwrap import re import pydoc -from StringIO import StringIO from warnings import warn +import collections +import sys class Reader(object): @@ -22,10 +24,10 @@ def __init__(self, data): String with lines separated by '\n'. """ - if isinstance(data, list): + if isinstance(data,list): self._str = data else: - self._str = data.split('\n') # store string as list of lines + self._str = data.split('\n') # store string as list of lines self.reset() @@ -33,7 +35,7 @@ def __getitem__(self, n): return self._str[n] def reset(self): - self._l = 0 # current line nr + self._l = 0 # current line nr def read(self): if not self.eof(): @@ -60,12 +62,11 @@ def read_to_condition(self, condition_func): return self[start:self._l] self._l += 1 if self.eof(): - return self[start:self._l + 1] + return self[start:self._l+1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() - def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) @@ -75,7 +76,7 @@ def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) - def peek(self, n=0): + def peek(self,n=0): if self._l + n < len(self._str): return self[self._l + n] else: @@ -111,11 +112,11 @@ def __init__(self, docstring, config={}): self._parse() - def __getitem__(self, key): + def __getitem__(self,key): return self._parsed_data[key] - def __setitem__(self, key, val): - if not self._parsed_data.has_key(key): + def __setitem__(self,key,val): + if key not in self._parsed_data: warn("Unknown section %s" % key) else: self._parsed_data[key] = val @@ -131,27 +132,25 @@ def _is_at_section(self): if l1.startswith('.. index::'): return True - l2 = self._doc.peek(1).strip() # ---------- or ========== - return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1)) + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) - def _strip(self, doc): + def _strip(self,doc): i = 0 j = 0 - for i, line in enumerate(doc): - if line.strip(): - break + for i,line in enumerate(doc): + if line.strip(): break - for j, line in enumerate(doc[::-1]): - if line.strip(): - break + for j,line in enumerate(doc[::-1]): + if line.strip(): break - return doc[i:len(doc) - j] + return doc[i:len(doc)-j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): - if not self._doc.peek(-1).strip(): # previous line was empty + if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() @@ -163,14 +162,14 @@ def _read_sections(self): data = self._read_to_next_section() name = data[0].strip() - if name.startswith('..'): # index section + if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) - def _parse_param_list(self, content): + def _parse_param_list(self,content): r = Reader(content) params = [] while not r.eof(): @@ -183,13 +182,13 @@ def _parse_param_list(self, content): desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) - params.append((arg_name, arg_type, desc)) + params.append((arg_name,arg_type,desc)) return params + _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) - def _parse_see_also(self, content): """ func_name : Descriptive text @@ -222,8 +221,7 @@ def push_item(name, rest): rest = [] for line in content: - if not line.strip(): - continue + if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): @@ -237,7 +235,8 @@ def push_item(name, rest): current_func = None if ',' in line: for func in line.split(','): - push_item(func, []) + if func.strip(): + push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: @@ -269,13 +268,17 @@ def _parse_summary(self): if self._is_at_section(): return - summary = self._doc.read_to_next_empty_line() - summary_str = " ".join([s.strip() for s in summary]).strip() - if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): - self['Signature'] = summary_str - if not self._is_at_section(): - self['Summary'] = self._doc.read_to_next_empty_line() - else: + # If several signatures present, take the last one + while True: + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + continue + break + + if summary is not None: self['Summary'] = summary if not self._is_at_section(): @@ -285,12 +288,11 @@ def _parse(self): self._doc.reset() self._parse_summary() - for (section, content) in self._read_sections(): + for (section,content) in self._read_sections(): if not section.startswith('..'): - section = ' '.join([s.capitalize() - for s in section.split(' ')]) - if section in ('Parameters', 'Attributes', 'Methods', - 'Returns', 'Raises', 'Warns'): + section = ' '.join([s.capitalize() for s in section.split(' ')]) + if section in ('Parameters', 'Returns', 'Raises', 'Warns', + 'Other Parameters', 'Attributes', 'Methods'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) @@ -302,17 +304,17 @@ def _parse(self): # string conversion routines def _str_header(self, name, symbol='-'): - return [name, len(name) * symbol] + return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: - out += [' ' * indent + line] + out += [' '*indent + line] return out def _str_signature(self): if self['Signature']: - return [self['Signature'].replace('*', '\*')] + [''] + return [self['Signature'].replace('*','\*')] + [''] else: return [''] @@ -332,8 +334,11 @@ def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) - for param, param_type, desc in self[name]: - out += ['%s : %s' % (param, param_type)] + for param,param_type,desc in self[name]: + if param_type: + out += ['%s : %s' % (param, param_type)] + else: + out += [param] out += self._str_indent(desc) out += [''] return out @@ -347,8 +352,7 @@ def _str_section(self, name): return out def _str_see_also(self, func_role): - if not self['See Also']: - return [] + if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True @@ -375,8 +379,8 @@ def _str_see_also(self, func_role): def _str_index(self): idx = self['index'] out = [] - out += ['.. index:: %s' % idx.get('default', '')] - for section, references in idx.iteritems(): + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.items(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] @@ -387,11 +391,12 @@ def __str__(self, func_role=''): out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Raises'): + for param_list in ('Parameters', 'Returns', 'Other Parameters', + 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) - for s in ('Notes', 'References', 'Examples'): + for s in ('Notes','References','Examples'): out += self._str_section(s) for param_list in ('Attributes', 'Methods'): out += self._str_param_list(param_list) @@ -399,27 +404,25 @@ def __str__(self, func_role=''): return '\n'.join(out) -def indent(str, indent=4): - indent_str = ' ' * indent +def indent(str,indent=4): + indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) - def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") - def header(text, style='-'): - return text + '\n' + style * len(text) + '\n' + return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None, config={}): self._f = func - self._role = role # e.g. "func" or "meth" + self._role = role # e.g. "func" or "meth" if doc is None: if func is None: @@ -431,11 +434,14 @@ def __init__(self, func, role='func', doc=None, config={}): func, func_name = self.get_func() try: # try to read signature - argspec = inspect.getargspec(func) + if sys.version_info[0] >= 3: + argspec = inspect.getfullargspec(func) + else: + argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) - argspec = argspec.replace('*', '\*') + argspec = argspec.replace('*','\*') signature = '%s%s' % (func_name, argspec) - except TypeError, e: + except TypeError as e: signature = '%s()' % func_name self['Signature'] = signature @@ -457,9 +463,9 @@ def __str__(self): 'meth': 'method'} if self._role: - if not roles.has_key(self._role): - print "Warning: invalid role %s" % self._role - out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), + if self._role not in roles: + print("Warning: invalid role %s" % self._role) + out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) @@ -467,8 +473,11 @@ def __str__(self): class ClassDoc(NumpyDocString): + + extra_public_methods = ['__call__'] + def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, - config=None): + config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls @@ -484,24 +493,39 @@ def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, NumpyDocString.__init__(self, doc) - if config is not None and config.get('show_class_members', True): - if not self['Methods']: - self['Methods'] = [(name, '', '') - for name in sorted(self.methods)] - if not self['Attributes']: - self['Attributes'] = [(name, '', '') - for name in sorted(self.properties)] + if config.get('show_class_members', True): + def splitlines_x(s): + if not s: + return [] + else: + return s.splitlines() + + for field, items in [('Methods', self.methods), + ('Attributes', self.properties)]: + if not self[field]: + doc_list = [] + for name in sorted(items): + try: + doc_item = pydoc.getdoc(getattr(self._cls, name)) + doc_list.append((name, '', splitlines_x(doc_item))) + except AttributeError: + pass # method doesn't exist + self[field] = doc_list @property def methods(self): if self._cls is None: return [] - return [name for name, func in inspect.getmembers(self._cls) - if not name.startswith('_') and callable(func)] + return [name for name,func in inspect.getmembers(self._cls) + if ((not name.startswith('_') + or name in self.extra_public_methods) + and isinstance(func, collections.Callable))] @property def properties(self): if self._cls is None: return [] - return [name for name, func in inspect.getmembers(self._cls) - if not name.startswith('_') and func is None] + return [name for name,func in inspect.getmembers(self._cls) + if not name.startswith('_') and + (func is None or isinstance(func, property) or + inspect.isgetsetdescriptor(func))] diff --git a/doc/sphinxext/numpy_ext/docscrape_sphinx.py b/doc/sphinxext/numpydoc/docscrape_sphinx.py similarity index 65% rename from doc/sphinxext/numpy_ext/docscrape_sphinx.py rename to doc/sphinxext/numpydoc/docscrape_sphinx.py index bcf7e70731..cdc2a37d17 100644 --- a/doc/sphinxext/numpy_ext/docscrape_sphinx.py +++ b/doc/sphinxext/numpydoc/docscrape_sphinx.py @@ -1,18 +1,24 @@ -import re -import inspect -import textwrap -import pydoc +from __future__ import division, absolute_import, print_function + +import sys, re, inspect, textwrap, pydoc import sphinx -from docscrape import NumpyDocString -from docscrape import FunctionDoc -from docscrape import ClassDoc +import collections +from .docscrape import NumpyDocString, FunctionDoc, ClassDoc + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') class SphinxDocString(NumpyDocString): - def __init__(self, docstring, config=None): - config = {} if config is None else config - self.use_plots = config.get('use_plots', False) + def __init__(self, docstring, config={}): NumpyDocString.__init__(self, docstring, config=config) + self.load_config(config) + + def load_config(self, config): + self.use_plots = config.get('use_plots', False) + self.class_members_toctree = config.get('class_members_toctree', True) # string conversion routines def _str_header(self, name, symbol='`'): @@ -24,7 +30,7 @@ def _str_field_list(self, name): def _str_indent(self, doc, indent=4): out = [] for line in doc: - out += [' ' * indent + line] + out += [' '*indent + line] return out def _str_signature(self): @@ -40,16 +46,37 @@ def _str_summary(self): def _str_extended_summary(self): return self['Extended Summary'] + [''] + def _str_returns(self): + out = [] + if self['Returns']: + out += self._str_field_list('Returns') + out += [''] + for param, param_type, desc in self['Returns']: + if param_type: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + else: + out += self._str_indent([param.strip()]) + if desc: + out += [''] + out += self._str_indent(desc, 8) + out += [''] + return out + def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: - out += self._str_indent(['**%s** : %s' % (param.strip(), - param_type)]) - out += [''] - out += self._str_indent(desc, 8) + if param_type: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + else: + out += self._str_indent(['**%s**' % param.strip()]) + if desc: + out += [''] + out += self._str_indent(desc, 8) out += [''] return out @@ -79,28 +106,36 @@ def _str_member_list(self, name): others = [] for param, param_type, desc in self[name]: param = param.strip() - if not self._obj or hasattr(self._obj, param): + + # Check if the referenced member can have a docstring or not + param_obj = getattr(self._obj, param, None) + if not (callable(param_obj) + or isinstance(param_obj, property) + or inspect.isgetsetdescriptor(param_obj)): + param_obj = None + + if param_obj and (pydoc.getdoc(param_obj) or not desc): + # Referenced object has a docstring autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: - # GAEL: Toctree commented out below because it creates - # hundreds of sphinx warnings - # out += ['.. autosummary::', ' :toctree:', ''] - out += ['.. autosummary::', ''] - out += autosum + out += ['.. autosummary::'] + if self.class_members_toctree: + out += [' :toctree:'] + out += [''] + autosum if others: - maxlen_0 = max([len(x[0]) for x in others]) - maxlen_1 = max([len(x[1]) for x in others]) - hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 - fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) - n_indent = maxlen_0 + maxlen_1 + 4 - out += [hdr] + maxlen_0 = max(3, max([len(x[0]) for x in others])) + hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10 + fmt = sixu('%%%ds %%s ') % (maxlen_0,) + out += ['', hdr] for param, param_type, desc in others: - out += [fmt % (param.strip(), param_type)] - out += self._str_indent(desc, n_indent) + desc = sixu(" ").join(x.strip() for x in desc).strip() + if param_type: + desc = "(%s) %s" % (param_type, desc) + out += [fmt % (param.strip(), desc)] out += [hdr] out += [''] return out @@ -136,8 +171,8 @@ def _str_index(self): if len(idx) == 0: return out - out += ['.. index:: %s' % idx.get('default', '')] - for section, references in idx.iteritems(): + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.items(): if section == 'default': continue elif section == 'refguide': @@ -157,9 +192,9 @@ def _str_references(self): # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": - out += ['.. only:: latex', ''] + out += ['.. only:: latex',''] else: - out += ['.. latexonly::', ''] + out += ['.. latexonly::',''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) @@ -188,7 +223,9 @@ def __str__(self, indent=0, func_role="obj"): out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Raises'): + out += self._str_param_list('Parameters') + out += self._str_returns() + for param_list in ('Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) @@ -197,35 +234,32 @@ def __str__(self, indent=0, func_role="obj"): out += self._str_examples() for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) - out = self._str_indent(out, indent) + out = self._str_indent(out,indent) return '\n'.join(out) - class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): - self.use_plots = config.get('use_plots', False) + self.load_config(config) FunctionDoc.__init__(self, obj, doc=doc, config=config) - class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): - self.use_plots = config.get('use_plots', False) + self.load_config(config) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) - class SphinxObjDoc(SphinxDocString): - def __init__(self, obj, doc=None, config=None): + def __init__(self, obj, doc=None, config={}): self._f = obj + self.load_config(config) SphinxDocString.__init__(self, doc, config=config) - def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' - elif callable(obj): + elif isinstance(obj, collections.Callable): what = 'function' else: what = 'object' diff --git a/doc/sphinxext/numpydoc/linkcode.py b/doc/sphinxext/numpydoc/linkcode.py new file mode 100644 index 0000000000..1ad3ab82cb --- /dev/null +++ b/doc/sphinxext/numpydoc/linkcode.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +""" + linkcode + ~~~~~~~~ + + Add external links to module code in Python object descriptions. + + :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import collections + +warnings.warn("This extension has been accepted to Sphinx upstream. " + "Use the version from there (Sphinx >= 1.2) " + "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode", + FutureWarning, stacklevel=1) + + +from docutils import nodes + +from sphinx import addnodes +from sphinx.locale import _ +from sphinx.errors import SphinxError + +class LinkcodeError(SphinxError): + category = "linkcode error" + +def doctree_read(app, doctree): + env = app.builder.env + + resolve_target = getattr(env.config, 'linkcode_resolve', None) + if not isinstance(env.config.linkcode_resolve, collections.Callable): + raise LinkcodeError( + "Function `linkcode_resolve` is not given in conf.py") + + domain_keys = dict( + py=['module', 'fullname'], + c=['names'], + cpp=['names'], + js=['object', 'fullname'], + ) + + for objnode in doctree.traverse(addnodes.desc): + domain = objnode.get('domain') + uris = set() + for signode in objnode: + if not isinstance(signode, addnodes.desc_signature): + continue + + # Convert signode to a specified format + info = {} + for key in domain_keys.get(domain, []): + value = signode.get(key) + if not value: + value = '' + info[key] = value + if not info: + continue + + # Call user code to resolve the link + uri = resolve_target(domain, info) + if not uri: + # no source + continue + + if uri in uris or not uri: + # only one link per name, please + continue + uris.add(uri) + + onlynode = addnodes.only(expr='html') + onlynode += nodes.reference('', '', internal=False, refuri=uri) + onlynode[0] += nodes.inline('', _('[source]'), + classes=['viewcode-link']) + signode += onlynode + +def setup(app): + app.connect('doctree-read', doctree_read) + app.add_config_value('linkcode_resolve', None, '') diff --git a/doc/sphinxext/numpy_ext/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py similarity index 63% rename from doc/sphinxext/numpy_ext/numpydoc.py rename to doc/sphinxext/numpydoc/numpydoc.py index 62adb56ae7..2bc2d1e91e 100644 --- a/doc/sphinxext/numpy_ext/numpydoc.py +++ b/doc/sphinxext/numpydoc/numpydoc.py @@ -10,52 +10,66 @@ - Convert Parameters etc. sections to field lists. - Convert See Also section to a See also entry. - Renumber references. -- Extract the signature from the docstring, if it can't be determined - otherwise. +- Extract the signature from the docstring, if it can't be determined otherwise. -.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard +.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ +from __future__ import division, absolute_import, print_function -import os -import re -import pydoc -from docscrape_sphinx import get_doc_object -from docscrape_sphinx import SphinxDocString -from sphinx.util.compat import Directive +import os, sys, re, pydoc +import sphinx import inspect +import collections + +if sphinx.__version__ < '1.0.1': + raise RuntimeError("Sphinx 1.0.1 or newer is required") + +from .docscrape_sphinx import get_doc_object, SphinxDocString +from sphinx.util.compat import Directive + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = dict(use_plots=app.config.numpydoc_use_plots, - show_class_members=app.config.numpydoc_show_class_members) + show_class_members=app.config.numpydoc_show_class_members, + class_members_toctree=app.config.numpydoc_class_members_toctree, + ) if what == 'module': # Strip top title - title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', - re.I | re.S) - lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") + title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'), + re.I|re.S) + lines[:] = title_re.sub(sixu(''), sixu("\n").join(lines)).split(sixu("\n")) else: - doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) - lines[:] = unicode(doc).split(u"\n") + doc = get_doc_object(obj, what, sixu("\n").join(lines), config=cfg) + if sys.version_info[0] >= 3: + doc = str(doc) + else: + doc = unicode(doc) + lines[:] = doc.split(sixu("\n")) if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ obj.__name__: if hasattr(obj, '__module__'): - v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) + v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) - lines += [u'', u'.. htmlonly::', ''] - lines += [u' %s' % x for x in + lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] + lines += [sixu(' %s') % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() - m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) + m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) if m: references.append(m.group(1)) @@ -64,38 +78,36 @@ def mangle_docstrings(app, what, name, obj, options, lines, if references: for i, line in enumerate(lines): for r in references: - if re.match(ur'^\d+$', r): - new_r = u"R%d" % (reference_offset[0] + int(r)) + if re.match(sixu('^\\d+$'), r): + new_r = sixu("R%d") % (reference_offset[0] + int(r)) else: - new_r = u"%s%d" % (r, reference_offset[0]) - lines[i] = lines[i].replace(u'[%s]_' % r, - u'[%s]_' % new_r) - lines[i] = lines[i].replace(u'.. [%s]' % r, - u'.. [%s]' % new_r) + new_r = sixu("%s%d") % (r, reference_offset[0]) + lines[i] = lines[i].replace(sixu('[%s]_') % r, + sixu('[%s]_') % new_r) + lines[i] = lines[i].replace(sixu('.. [%s]') % r, + sixu('.. [%s]') % new_r) reference_offset[0] += len(references) - -def mangle_signature(app, what, name, obj, - options, sig, retann): +def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' - if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): - return - if not hasattr(obj, '__doc__'): - return + if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return + if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: - sig = re.sub(u"^[^(]*", u"", doc['Signature']) - return sig, u'' - + sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) + return sig, sixu('') def setup(app, get_doc_object_=get_doc_object): + if not hasattr(app, 'add_config_value'): + return # probably called by nose, better bail out + global get_doc_object get_doc_object = get_doc_object_ @@ -104,20 +116,20 @@ def setup(app, get_doc_object_=get_doc_object): app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) + app.add_config_value('numpydoc_class_members_toctree', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) -#----------------------------------------------------------------------------- +#------------------------------------------------------------------------------ # Docstring-mangling domains -#----------------------------------------------------------------------------- +#------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain - class ManglingDomainBase(object): directive_mangling_map = {} @@ -126,11 +138,10 @@ def __init__(self, *a, **kw): self.wrap_mangling_directives() def wrap_mangling_directives(self): - for name, objtype in self.directive_mangling_map.items(): + for name, objtype in list(self.directive_mangling_map.items()): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) - class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { @@ -142,7 +153,7 @@ class NumpyPythonDomain(ManglingDomainBase, PythonDomain): 'staticmethod': 'function', 'attribute': 'attribute', } - + indices = [] class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' @@ -154,7 +165,6 @@ class NumpyCDomain(ManglingDomainBase, CDomain): 'var': 'object', } - def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): diff --git a/doc/sphinxext/numpydoc/phantom_import.py b/doc/sphinxext/numpydoc/phantom_import.py new file mode 100644 index 0000000000..9a60b4a35b --- /dev/null +++ b/doc/sphinxext/numpydoc/phantom_import.py @@ -0,0 +1,167 @@ +""" +============== +phantom_import +============== + +Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar +extensions to use docstrings loaded from an XML file. + +This extension loads an XML file in the Pydocweb format [1] and +creates a dummy module that contains the specified docstrings. This +can be used to get the current docstrings from a Pydocweb instance +without needing to rebuild the documented module. + +.. [1] http://code.google.com/p/pydocweb + +""" +from __future__ import division, absolute_import, print_function + +import imp, sys, compiler, types, os, inspect, re + +def setup(app): + app.connect('builder-inited', initialize) + app.add_config_value('phantom_import_file', None, True) + +def initialize(app): + fn = app.config.phantom_import_file + if (fn and os.path.isfile(fn)): + print("[numpydoc] Phantom importing modules from", fn, "...") + import_phantom_module(fn) + +#------------------------------------------------------------------------------ +# Creating 'phantom' modules from an XML description +#------------------------------------------------------------------------------ +def import_phantom_module(xml_file): + """ + Insert a fake Python module to sys.modules, based on a XML file. + + The XML file is expected to conform to Pydocweb DTD. The fake + module will contain dummy objects, which guarantee the following: + + - Docstrings are correct. + - Class inheritance relationships are correct (if present in XML). + - Function argspec is *NOT* correct (even if present in XML). + Instead, the function signature is prepended to the function docstring. + - Class attributes are *NOT* correct; instead, they are dummy objects. + + Parameters + ---------- + xml_file : str + Name of an XML file to read + + """ + import lxml.etree as etree + + object_cache = {} + + tree = etree.parse(xml_file) + root = tree.getroot() + + # Sort items so that + # - Base classes come before classes inherited from them + # - Modules come before their contents + all_nodes = dict([(n.attrib['id'], n) for n in root]) + + def _get_bases(node, recurse=False): + bases = [x.attrib['ref'] for x in node.findall('base')] + if recurse: + j = 0 + while True: + try: + b = bases[j] + except IndexError: break + if b in all_nodes: + bases.extend(_get_bases(all_nodes[b])) + j += 1 + return bases + + type_index = ['module', 'class', 'callable', 'object'] + + def base_cmp(a, b): + x = cmp(type_index.index(a.tag), type_index.index(b.tag)) + if x != 0: return x + + if a.tag == 'class' and b.tag == 'class': + a_bases = _get_bases(a, recurse=True) + b_bases = _get_bases(b, recurse=True) + x = cmp(len(a_bases), len(b_bases)) + if x != 0: return x + if a.attrib['id'] in b_bases: return -1 + if b.attrib['id'] in a_bases: return 1 + + return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) + + nodes = root.getchildren() + nodes.sort(base_cmp) + + # Create phantom items + for node in nodes: + name = node.attrib['id'] + doc = (node.text or '').decode('string-escape') + "\n" + if doc == "\n": doc = "" + + # create parent, if missing + parent = name + while True: + parent = '.'.join(parent.split('.')[:-1]) + if not parent: break + if parent in object_cache: break + obj = imp.new_module(parent) + object_cache[parent] = obj + sys.modules[parent] = obj + + # create object + if node.tag == 'module': + obj = imp.new_module(name) + obj.__doc__ = doc + sys.modules[name] = obj + elif node.tag == 'class': + bases = [object_cache[b] for b in _get_bases(node) + if b in object_cache] + bases.append(object) + init = lambda self: None + init.__doc__ = doc + obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) + obj.__name__ = name.split('.')[-1] + elif node.tag == 'callable': + funcname = node.attrib['id'].split('.')[-1] + argspec = node.attrib.get('argspec') + if argspec: + argspec = re.sub('^[^(]*', '', argspec) + doc = "%s%s\n\n%s" % (funcname, argspec, doc) + obj = lambda: 0 + obj.__argspec_is_invalid_ = True + if sys.version_info[0] >= 3: + obj.__name__ = funcname + else: + obj.func_name = funcname + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__objclass__ = object_cache[parent] + else: + class Dummy(object): pass + obj = Dummy() + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__get__ = lambda: None + object_cache[name] = obj + + if parent: + if inspect.ismodule(object_cache[parent]): + obj.__module__ = parent + setattr(object_cache[parent], name.split('.')[-1], obj) + + # Populate items + for node in root: + obj = object_cache.get(node.attrib['id']) + if obj is None: continue + for ref in node.findall('ref'): + if node.tag == 'class': + if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) + else: + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) diff --git a/doc/sphinxext/numpydoc/plot_directive.py b/doc/sphinxext/numpydoc/plot_directive.py new file mode 100644 index 0000000000..2014f85707 --- /dev/null +++ b/doc/sphinxext/numpydoc/plot_directive.py @@ -0,0 +1,642 @@ +""" +A special directive for generating a matplotlib plot. + +.. warning:: + + This is a hacked version of plot_directive.py from Matplotlib. + It's very much subject to change! + + +Usage +----- + +Can be used like this:: + + .. plot:: examples/example.py + + .. plot:: + + import matplotlib.pyplot as plt + plt.plot([1,2,3], [4,5,6]) + + .. plot:: + + A plotting example: + + >>> import matplotlib.pyplot as plt + >>> plt.plot([1,2,3], [4,5,6]) + +The content is interpreted as doctest formatted if it has a line starting +with ``>>>``. + +The ``plot`` directive supports the options + + format : {'python', 'doctest'} + Specify the format of the input + + include-source : bool + Whether to display the source code. Default can be changed in conf.py + +and the ``image`` directive options ``alt``, ``height``, ``width``, +``scale``, ``align``, ``class``. + +Configuration options +--------------------- + +The plot directive has the following configuration options: + + plot_include_source + Default value for the include-source option + + plot_pre_code + Code that should be executed before each plot. + + plot_basedir + Base directory, to which plot:: file names are relative to. + (If None or empty, file names are relative to the directoly where + the file containing the directive is.) + + plot_formats + File formats to generate. List of tuples or strings:: + + [(suffix, dpi), suffix, ...] + + that determine the file format and the DPI. For entries whose + DPI was omitted, sensible defaults are chosen. + + plot_html_show_formats + Whether to show links to the files in HTML. + +TODO +---- + +* Refactor Latex output; now it's plain images, but it would be nice + to make them appear side-by-side, or in floats. + +""" +from __future__ import division, absolute_import, print_function + +import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback +import sphinx + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from io import StringIO + +import warnings +warnings.warn("A plot_directive module is also available under " + "matplotlib.sphinxext; expect this numpydoc.plot_directive " + "module to be deprecated after relevant features have been " + "integrated there.", + FutureWarning, stacklevel=2) + + +#------------------------------------------------------------------------------ +# Registration hook +#------------------------------------------------------------------------------ + +def setup(app): + setup.app = app + setup.config = app.config + setup.confdir = app.confdir + + app.add_config_value('plot_pre_code', '', True) + app.add_config_value('plot_include_source', False, True) + app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) + app.add_config_value('plot_basedir', None, True) + app.add_config_value('plot_html_show_formats', True, True) + + app.add_directive('plot', plot_directive, True, (0, 1, False), + **plot_directive_options) + +#------------------------------------------------------------------------------ +# plot:: directive +#------------------------------------------------------------------------------ +from docutils.parsers.rst import directives +from docutils import nodes + +def plot_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(arguments, content, options, state_machine, state, lineno) +plot_directive.__doc__ = __doc__ + +def _option_boolean(arg): + if not arg or not arg.strip(): + # no argument given, assume used as a flag + return True + elif arg.strip().lower() in ('no', '0', 'false'): + return False + elif arg.strip().lower() in ('yes', '1', 'true'): + return True + else: + raise ValueError('"%s" unknown boolean' % arg) + +def _option_format(arg): + return directives.choice(arg, ('python', 'lisp')) + +def _option_align(arg): + return directives.choice(arg, ("top", "middle", "bottom", "left", "center", + "right")) + +plot_directive_options = {'alt': directives.unchanged, + 'height': directives.length_or_unitless, + 'width': directives.length_or_percentage_or_unitless, + 'scale': directives.nonnegative_int, + 'align': _option_align, + 'class': directives.class_option, + 'include-source': _option_boolean, + 'format': _option_format, + } + +#------------------------------------------------------------------------------ +# Generating output +#------------------------------------------------------------------------------ + +from docutils import nodes, utils + +try: + # Sphinx depends on either Jinja or Jinja2 + import jinja2 + def format_template(template, **kw): + return jinja2.Template(template).render(**kw) +except ImportError: + import jinja + def format_template(template, **kw): + return jinja.from_string(template, **kw) + +TEMPLATE = """ +{{ source_code }} + +{{ only_html }} + + {% if source_link or (html_show_formats and not multi_image) %} + ( + {%- if source_link -%} + `Source code <{{ source_link }}>`__ + {%- endif -%} + {%- if html_show_formats and not multi_image -%} + {%- for img in images -%} + {%- for fmt in img.formats -%} + {%- if source_link or not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + {%- endfor -%} + {%- endif -%} + ) + {% endif %} + + {% for img in images %} + .. figure:: {{ build_dir }}/{{ img.basename }}.png + {%- for option in options %} + {{ option }} + {% endfor %} + + {% if html_show_formats and multi_image -%} + ( + {%- for fmt in img.formats -%} + {%- if not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + ) + {%- endif -%} + {% endfor %} + +{{ only_latex }} + + {% for img in images %} + .. image:: {{ build_dir }}/{{ img.basename }}.pdf + {% endfor %} + +""" + +class ImageFile(object): + def __init__(self, basename, dirname): + self.basename = basename + self.dirname = dirname + self.formats = [] + + def filename(self, format): + return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) + + def filenames(self): + return [self.filename(fmt) for fmt in self.formats] + +def run(arguments, content, options, state_machine, state, lineno): + if arguments and content: + raise RuntimeError("plot:: directive can't have both args and content") + + document = state_machine.document + config = document.settings.env.config + + options.setdefault('include-source', config.plot_include_source) + + # determine input + rst_file = document.attributes['source'] + rst_dir = os.path.dirname(rst_file) + + if arguments: + if not config.plot_basedir: + source_file_name = os.path.join(rst_dir, + directives.uri(arguments[0])) + else: + source_file_name = os.path.join(setup.confdir, config.plot_basedir, + directives.uri(arguments[0])) + code = open(source_file_name, 'r').read() + output_base = os.path.basename(source_file_name) + else: + source_file_name = rst_file + code = textwrap.dedent("\n".join(map(str, content))) + counter = document.attributes.get('_plot_counter', 0) + 1 + document.attributes['_plot_counter'] = counter + base, ext = os.path.splitext(os.path.basename(source_file_name)) + output_base = '%s-%d.py' % (base, counter) + + base, source_ext = os.path.splitext(output_base) + if source_ext in ('.py', '.rst', '.txt'): + output_base = base + else: + source_ext = '' + + # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames + output_base = output_base.replace('.', '-') + + # is it in doctest format? + is_doctest = contains_doctest(code) + if 'format' in options: + if options['format'] == 'python': + is_doctest = False + else: + is_doctest = True + + # determine output directory name fragment + source_rel_name = relpath(source_file_name, setup.confdir) + source_rel_dir = os.path.dirname(source_rel_name) + while source_rel_dir.startswith(os.path.sep): + source_rel_dir = source_rel_dir[1:] + + # build_dir: where to place output files (temporarily) + build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), + 'plot_directive', + source_rel_dir) + if not os.path.exists(build_dir): + os.makedirs(build_dir) + + # output_dir: final location in the builder's directory + dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, + source_rel_dir)) + + # how to link to files from the RST file + dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), + source_rel_dir).replace(os.path.sep, '/') + build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') + source_link = dest_dir_link + '/' + output_base + source_ext + + # make figures + try: + results = makefig(code, source_file_name, build_dir, output_base, + config) + errors = [] + except PlotError as err: + reporter = state.memo.reporter + sm = reporter.system_message( + 2, "Exception occurred in plotting %s: %s" % (output_base, err), + line=lineno) + results = [(code, [])] + errors = [sm] + + # generate output restructuredtext + total_lines = [] + for j, (code_piece, images) in enumerate(results): + if options['include-source']: + if is_doctest: + lines = [''] + lines += [row.rstrip() for row in code_piece.split('\n')] + else: + lines = ['.. code-block:: python', ''] + lines += [' %s' % row.rstrip() + for row in code_piece.split('\n')] + source_code = "\n".join(lines) + else: + source_code = "" + + opts = [':%s: %s' % (key, val) for key, val in list(options.items()) + if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] + + only_html = ".. only:: html" + only_latex = ".. only:: latex" + + if j == 0: + src_link = source_link + else: + src_link = None + + result = format_template( + TEMPLATE, + dest_dir=dest_dir_link, + build_dir=build_dir_link, + source_link=src_link, + multi_image=len(images) > 1, + only_html=only_html, + only_latex=only_latex, + options=opts, + images=images, + source_code=source_code, + html_show_formats=config.plot_html_show_formats) + + total_lines.extend(result.split("\n")) + total_lines.extend("\n") + + if total_lines: + state_machine.insert_input(total_lines, source=source_file_name) + + # copy image files to builder's output directory + if not os.path.exists(dest_dir): + os.makedirs(dest_dir) + + for code_piece, images in results: + for img in images: + for fn in img.filenames(): + shutil.copyfile(fn, os.path.join(dest_dir, + os.path.basename(fn))) + + # copy script (if necessary) + if source_file_name == rst_file: + target_name = os.path.join(dest_dir, output_base + source_ext) + f = open(target_name, 'w') + f.write(unescape_doctest(code)) + f.close() + + return errors + + +#------------------------------------------------------------------------------ +# Run code and capture figures +#------------------------------------------------------------------------------ + +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.image as image +from matplotlib import _pylab_helpers + +import exceptions + +def contains_doctest(text): + try: + # check if it's valid Python as-is + compile(text, '', 'exec') + return False + except SyntaxError: + pass + r = re.compile(r'^\s*>>>', re.M) + m = r.search(text) + return bool(m) + +def unescape_doctest(text): + """ + Extract code from a piece of text, which contains either Python code + or doctests. + + """ + if not contains_doctest(text): + return text + + code = "" + for line in text.split("\n"): + m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) + if m: + code += m.group(2) + "\n" + elif line.strip(): + code += "# " + line.strip() + "\n" + else: + code += "\n" + return code + +def split_code_at_show(text): + """ + Split code at plt.show() + + """ + + parts = [] + is_doctest = contains_doctest(text) + + part = [] + for line in text.split("\n"): + if (not is_doctest and line.strip() == 'plt.show()') or \ + (is_doctest and line.strip() == '>>> plt.show()'): + part.append(line) + parts.append("\n".join(part)) + part = [] + else: + part.append(line) + if "\n".join(part).strip(): + parts.append("\n".join(part)) + return parts + +class PlotError(RuntimeError): + pass + +def run_code(code, code_path, ns=None): + # Change the working directory to the directory of the example, so + # it can get at its data files, if any. + pwd = os.getcwd() + old_sys_path = list(sys.path) + if code_path is not None: + dirname = os.path.abspath(os.path.dirname(code_path)) + os.chdir(dirname) + sys.path.insert(0, dirname) + + # Redirect stdout + stdout = sys.stdout + sys.stdout = StringIO() + + # Reset sys.argv + old_sys_argv = sys.argv + sys.argv = [code_path] + + try: + try: + code = unescape_doctest(code) + if ns is None: + ns = {} + if not ns: + exec(setup.config.plot_pre_code, ns) + exec(code, ns) + except (Exception, SystemExit) as err: + raise PlotError(traceback.format_exc()) + finally: + os.chdir(pwd) + sys.argv = old_sys_argv + sys.path[:] = old_sys_path + sys.stdout = stdout + return ns + + +#------------------------------------------------------------------------------ +# Generating figures +#------------------------------------------------------------------------------ + +def out_of_date(original, derived): + """ + Returns True if derivative is out-of-date wrt original, + both of which are full file paths. + """ + return (not os.path.exists(derived) + or os.stat(derived).st_mtime < os.stat(original).st_mtime) + + +def makefig(code, code_path, output_dir, output_base, config): + """ + Run a pyplot script *code* and save the images under *output_dir* + with file names derived from *output_base* + + """ + + # -- Parse format list + default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} + formats = [] + for fmt in config.plot_formats: + if isinstance(fmt, str): + formats.append((fmt, default_dpi.get(fmt, 80))) + elif type(fmt) in (tuple, list) and len(fmt)==2: + formats.append((str(fmt[0]), int(fmt[1]))) + else: + raise PlotError('invalid image format "%r" in plot_formats' % fmt) + + # -- Try to determine if all images already exist + + code_pieces = split_code_at_show(code) + + # Look for single-figure output files first + all_exists = True + img = ImageFile(output_base, output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + if all_exists: + return [(code, [img])] + + # Then look for multi-figure output files + results = [] + all_exists = True + for i, code_piece in enumerate(code_pieces): + images = [] + for j in range(1000): + img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + # assume that if we have one, we have them all + if not all_exists: + all_exists = (j > 0) + break + images.append(img) + if not all_exists: + break + results.append((code_piece, images)) + + if all_exists: + return results + + # -- We didn't find the files, so build them + + results = [] + ns = {} + + for i, code_piece in enumerate(code_pieces): + # Clear between runs + plt.close('all') + + # Run code + run_code(code_piece, code_path, ns) + + # Collect images + images = [] + fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() + for j, figman in enumerate(fig_managers): + if len(fig_managers) == 1 and len(code_pieces) == 1: + img = ImageFile(output_base, output_dir) + else: + img = ImageFile("%s_%02d_%02d" % (output_base, i, j), + output_dir) + images.append(img) + for format, dpi in formats: + try: + figman.canvas.figure.savefig(img.filename(format), dpi=dpi) + except exceptions.BaseException as err: + raise PlotError(traceback.format_exc()) + img.formats.append(format) + + # Results + results.append((code_piece, images)) + + return results + + +#------------------------------------------------------------------------------ +# Relative pathnames +#------------------------------------------------------------------------------ + +try: + from os.path import relpath +except ImportError: + # Copied from Python 2.7 + if 'posix' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir + + if not path: + raise ValueError("no path specified") + + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + elif 'nt' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir, splitunc + + if not path: + raise ValueError("no path specified") + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + if start_list[0].lower() != path_list[0].lower(): + unc_path, rest = splitunc(path) + unc_start, rest = splitunc(start) + if bool(unc_path) ^ bool(unc_start): + raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" + % (path, start)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_list[0], start_list[0])) + # Work out how much of the filepath is shared by start and path. + for i in range(min(len(start_list), len(path_list))): + if start_list[i].lower() != path_list[i].lower(): + break + else: + i += 1 + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + else: + raise RuntimeError("Unsupported platform (no relpath available!)") diff --git a/doc/sphinxext/numpydoc/traitsdoc.py b/doc/sphinxext/numpydoc/traitsdoc.py new file mode 100644 index 0000000000..596c54eb38 --- /dev/null +++ b/doc/sphinxext/numpydoc/traitsdoc.py @@ -0,0 +1,142 @@ +""" +========= +traitsdoc +========= + +Sphinx extension that handles docstrings in the Numpy standard format, [1] +and support Traits [2]. + +This extension can be used as a replacement for ``numpydoc`` when support +for Traits is required. + +.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard +.. [2] http://code.enthought.com/projects/traits/ + +""" +from __future__ import division, absolute_import, print_function + +import inspect +import os +import pydoc +import collections + +from . import docscrape +from . import docscrape_sphinx +from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString + +from . import numpydoc + +from . import comment_eater + +class SphinxTraitsDoc(SphinxClassDoc): + def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): + if not inspect.isclass(cls): + raise ValueError("Initialise using a class. Got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + self._name = cls.__name__ + self._func_doc = func_doc + + docstring = pydoc.getdoc(cls) + docstring = docstring.split('\n') + + # De-indent paragraph + try: + indent = min(len(s) - len(s.lstrip()) for s in docstring + if s.strip()) + except ValueError: + indent = 0 + + for n,line in enumerate(docstring): + docstring[n] = docstring[n][indent:] + + self._doc = docscrape.Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': '', + 'Description': [], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Traits': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'References': '', + 'Example': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Description'] + self['Extended Summary'] + [''] + + def __str__(self, indent=0, func_role="func"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Traits', 'Methods', + 'Returns','Raises'): + out += self._str_param_list(param_list) + out += self._str_see_also("obj") + out += self._str_section('Notes') + out += self._str_references() + out += self._str_section('Example') + out += self._str_section('Examples') + out = self._str_indent(out,indent) + return '\n'.join(out) + +def looks_like_issubclass(obj, classname): + """ Return True if the object has a class or superclass with the given class + name. + + Ignores old-style classes. + """ + t = obj + if t.__name__ == classname: + return True + for klass in t.__mro__: + if klass.__name__ == classname: + return True + return False + +def get_doc_object(obj, what=None, config=None): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif isinstance(obj, collections.Callable): + what = 'function' + else: + what = 'object' + if what == 'class': + doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) + if looks_like_issubclass(obj, 'HasTraits'): + for name, trait, comment in comment_eater.get_class_traits(obj): + # Exclude private traits. + if not name.startswith('_'): + doc['Traits'].append((name, trait, comment.splitlines())) + return doc + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, '', config=config) + else: + return SphinxDocString(pydoc.getdoc(obj), config=config) + +def setup(app): + # init numpydoc + numpydoc.setup(app, get_doc_object) + From 792f3c9412eb5435b681655f1ad0e7365b5bd50f Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 17 Dec 2015 11:28:35 +0100 Subject: [PATCH 0020/1925] Fix failing test --- nilearn/datasets/tests/test_atlas.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index 30bc90a506..4cfd5c169e 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -121,7 +121,7 @@ def test_fail_fetch_atlas_harvard_oxford(): data_dir=tst.tmpdir) assert_true(isinstance(nibabel.load(ho.maps), nibabel.Nifti1Image)) - assert_true(isinstance(ho.labels, np.ndarray)) + assert_true(isinstance(ho.labels, list)) assert_true(len(ho.labels) > 0) From 12c7dc85f3a2633c49a572a73082402d9145b5b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 17 Dec 2015 15:07:07 +0100 Subject: [PATCH 0021/1925] Remove warnings due to interation between autosummary and numpydoc --- doc/conf.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/conf.py b/doc/conf.py index 0838e6dc97..ed8b32ed0a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -287,6 +287,12 @@ 'sklearn': 'http://scikit-learn.org/stable'} } +# Get rid of spurious warnings due to some interaction between +# autosummary and numpydoc. See +# https://github.com/phn/pytpm/issues/3#issuecomment-12133978 for more +# details +numpydoc_show_class_members = False + def touch_example_backreferences(app, what, name, obj, options, lines): # generate empty examples files, so that we don't get From 059aa04e5db961c820fcf1e120ef73be232b6f51 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Fri, 18 Dec 2015 08:09:48 +0100 Subject: [PATCH 0022/1925] BUG: a test that should fail on recent numpy? Commit to test on travis --- nilearn/tests/test_signal.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nilearn/tests/test_signal.py b/nilearn/tests/test_signal.py index 572d51b5b9..c1294e6f44 100644 --- a/nilearn/tests/test_signal.py +++ b/nilearn/tests/test_signal.py @@ -215,6 +215,9 @@ def test_detrend(): np.testing.assert_array_equal(length_1_signal, nisignal._detrend(length_1_signal)) + # Mean removal only (out-of-place) + detrended = nisignal._detrend(x.astype(np.int), inplace=True, + type="constant") def test_mean_of_squares(): """Test _mean_of_squares.""" From 8d4841efb07343efd94c2e56c710a1f125e80a2a Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Fri, 18 Dec 2015 08:19:21 +0100 Subject: [PATCH 0023/1925] FIX: convert to float in detrend Fixes #896 --- nilearn/signal.py | 5 ++--- nilearn/tests/test_signal.py | 4 +++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/nilearn/signal.py b/nilearn/signal.py index ca8428e730..be1522035d 100644 --- a/nilearn/signal.py +++ b/nilearn/signal.py @@ -13,7 +13,7 @@ import numpy as np import scipy from scipy import signal, stats, linalg -from sklearn.utils import gen_even_slices +from sklearn.utils import gen_even_slices, as_float_array from distutils.version import LooseVersion from ._utils.compat import _basestring @@ -135,8 +135,7 @@ def _detrend(signals, inplace=False, type="linear", n_batches=10): If a signal of lenght 1 is given, it is returned unchanged. """ - if not inplace: - signals = signals.copy() + signals = as_float_array(signals, copy=not inplace) if signals.shape[0] == 1: warnings.warn('Detrending of 3D signal has been requested but ' 'would lead to zero values. Skipping.') diff --git a/nilearn/tests/test_signal.py b/nilearn/tests/test_signal.py index c1294e6f44..cca642c81b 100644 --- a/nilearn/tests/test_signal.py +++ b/nilearn/tests/test_signal.py @@ -215,9 +215,11 @@ def test_detrend(): np.testing.assert_array_equal(length_1_signal, nisignal._detrend(length_1_signal)) - # Mean removal only (out-of-place) + # Mean removal on integers detrended = nisignal._detrend(x.astype(np.int), inplace=True, type="constant") + assert_true(abs(detrended.mean(axis=0)).max() + < 15. * np.finfo(np.float).eps) def test_mean_of_squares(): """Test _mean_of_squares.""" From 1e9386352c773fa163e027a3488f2057aa8b0e1f Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Fri, 18 Dec 2015 13:58:01 +0100 Subject: [PATCH 0024/1925] Fixing error message for display mode ortho and cut coords integer --- nilearn/plotting/img_plotting.py | 5 +++++ nilearn/plotting/tests/test_img_plotting.py | 13 +++++++++++++ 2 files changed, 18 insertions(+) diff --git a/nilearn/plotting/img_plotting.py b/nilearn/plotting/img_plotting.py index e69ead2c07..4abd23bcab 100644 --- a/nilearn/plotting/img_plotting.py +++ b/nilearn/plotting/img_plotting.py @@ -139,6 +139,11 @@ def _plot_img_with_bg(img, bg_img=None, cut_coords=None, 'Tip: Use np.nan_max() instead of np.max().') warnings.warn(nan_msg) + if isinstance(cut_coords, numbers.Number) and display_mode == 'ortho': + raise ValueError("The input given for display_mode='ortho' needs to be " + "a list of 3d world coordinates in (x, y, z). " + "You provided single cut, cut_coords={0}".format(cut_coords)) + if img is not False and img is not None: img = _utils.check_niimg_3d(img, dtype='auto') data = img.get_data() diff --git a/nilearn/plotting/tests/test_img_plotting.py b/nilearn/plotting/tests/test_img_plotting.py index bc420347a5..2c3e52e08e 100644 --- a/nilearn/plotting/tests/test_img_plotting.py +++ b/nilearn/plotting/tests/test_img_plotting.py @@ -88,6 +88,7 @@ def test_plot_anat(): finally: os.remove(filename) + def test_plot_functions(): img = _generate_img() @@ -787,3 +788,15 @@ def test_get_colorbar_and_data_ranges_masked_array(): assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) + + +def test_invalid_in_display_mode_cut_coords_all_plots(): + img = _generate_img() + + for plot_func in [plot_img, plot_anat, plot_roi, plot_epi, + plot_stat_map, plot_prob_atlas, plot_glass_brain]: + assert_raises_regex(ValueError, + "The input given for display_mode='ortho' needs to " + "be a list of 3d world coordinates.", + plot_func, + img, display_mode='ortho', cut_coords=2) From 2ee156f915fa823dd14caabc8d5de00a998200d3 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Fri, 18 Dec 2015 19:29:10 +0100 Subject: [PATCH 0025/1925] FIX: adapt numerical precision for windows This might make the tests pass under AppVeyor --- nilearn/tests/test_signal.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nilearn/tests/test_signal.py b/nilearn/tests/test_signal.py index cca642c81b..044e18b525 100644 --- a/nilearn/tests/test_signal.py +++ b/nilearn/tests/test_signal.py @@ -8,6 +8,7 @@ import numpy as np from nose.tools import assert_true, assert_false, assert_raises +from sklearn.utils.testing import assert_less # Use nisignal here to avoid name collisions (using nilearn.signal is # not possible) @@ -216,10 +217,10 @@ def test_detrend(): nisignal._detrend(length_1_signal)) # Mean removal on integers - detrended = nisignal._detrend(x.astype(np.int), inplace=True, + detrended = nisignal._detrend(x.astype(np.int64), inplace=True, type="constant") - assert_true(abs(detrended.mean(axis=0)).max() - < 15. * np.finfo(np.float).eps) + assert_less(abs(detrended.mean(axis=0)).max(), + 20. * np.finfo(np.float).eps) def test_mean_of_squares(): """Test _mean_of_squares.""" From cb9d60c7fa9269c17910b18bf95e5a23aa41a8d2 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 21 Dec 2015 17:31:02 +0100 Subject: [PATCH 0026/1925] Changed design layout. Now accepts only one input parameter. --- .../plot_visualize_megatrawls_netmats.py | 41 ++--- nilearn/_utils/param_validation.py | 30 +-- nilearn/datasets/description/Megatrawls.rst | 25 ++- nilearn/datasets/func.py | 174 ++++++------------ nilearn/datasets/tests/test_func.py | 87 ++++++--- nilearn/tests/test_param_validation.py | 44 +---- 6 files changed, 144 insertions(+), 257 deletions(-) diff --git a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py index fdff8d31d1..8ee1b2acff 100644 --- a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py +++ b/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py @@ -7,34 +7,17 @@ See :func:`nilearn.datasets.fetch_megatrawls_netmats` documentation for more details. """ - - -def plot_matrix(matrix, title): - plt.figure() - plt.imshow(matrix, interpolation="nearest", cmap=plotting.cm.bwr) - plt.colorbar() - plt.title(title) - ################################################################################ -# Fetch the network matrices data of dimensionalities d=100 and d=300 for +# Fetching the partial correlation matrices of dimensionality d=300 with # timeseries method 'eigen regression' by importing datasets module from nilearn import datasets -netmats = datasets.fetch_megatrawls_netmats(dimensionality=[300, 100], - timeseries=['eigen_regression'], - matrices=['partial_correlation']) - -# Output matrices are returned according to the sequence of the given inputs. -# Partial correlation matrix arrays: array 1 has matrix with dimensionality=300 -# and array 2 has matrix with dimensionality=100 -correlation_matrices = netmats.partial_correlation - -# Array of given dimensions -dimensions_partial = netmats.dimensions_partial - -# Array of timeseries method repeated for total number of given dimensions -timeseries_partial = netmats.timeseries_partial - +netmats = datasets.fetch_megatrawls_netmats(dimensionality=300, + timeseries='eigen_regression', + matrices='partial_correlation') +# Partial correlation matrices array of size (300, 300) are stored in the name +# of 'correlation_matrices' +partial_correlation = netmats.correlation_matrices ################################################################################ # Visualization @@ -43,9 +26,9 @@ def plot_matrix(matrix, title): import matplotlib.pyplot as plt from nilearn import plotting -for matrix, dim, tserie in zip(correlation_matrices, dimensions_partial, - timeseries_partial): - title = 'Partial correlation matrices of d=%d & timeseries=%s' % (dim, tserie) - plot_matrix(matrix, title) - +title = "Partial correlation matrices of d=300 with timeseries='eigen_regression'" +plt.figure() +plt.imshow(partial_correlation, interpolation="nearest", cmap=plotting.cm.bwr) +plt.colorbar() +plt.title(title) plt.show() diff --git a/nilearn/_utils/param_validation.py b/nilearn/_utils/param_validation.py index dade1da5c7..48709567ce 100644 --- a/nilearn/_utils/param_validation.py +++ b/nilearn/_utils/param_validation.py @@ -5,7 +5,7 @@ import numbers import warnings -from .compat import _basestring, izip +from .compat import _basestring def check_threshold(threshold, data, percentile_func, name='threshold'): @@ -63,31 +63,3 @@ def check_threshold(threshold, data, percentile_func, name='threshold'): raise TypeError('%s should be either a number ' 'or a string finishing with a percent sign' % (name, )) return threshold - - -def check_parameters_megatrawls_datasets(inputs, standards, name): - """ Checks given inputs against standards for megatrawls datasets parameters. - - If parameters are valid, then no error message is raised else error message - will be raised. - - Parameters - ---------- - inputs: list of str - list to be checked. - standards: list of str - The given inputs will be checked against this given standards. - name: str - Used for precise naming in error message - """ - message = ("Invalid {0} name is given: {1}. " - "Please choose either of them {2}") - - if isinstance(inputs, _basestring): - raise TypeError("Input given for {0} should be in list. " - "You have given as single variable: {1}".format(name, inputs)) - else: - for each_input in inputs: - if each_input not in standards: - raise ValueError(message.format(name, each_input, str(standards))) - return inputs diff --git a/nilearn/datasets/description/Megatrawls.rst b/nilearn/datasets/description/Megatrawls.rst index 88718012d2..62a3283f31 100644 --- a/nilearn/datasets/description/Megatrawls.rst +++ b/nilearn/datasets/description/Megatrawls.rst @@ -3,27 +3,32 @@ MegaTrawls Network Matrices HCP Notes ----- -Network Matrices data of two types, Full correlation and Partial correlation -matrices estimated by a timeseries signals extracted from a nodes of Group ICA -parcellations. In total, 461 functional connectivity datasets were used. +Contains network matrices data of two types, full correlation and partial +correlation which were estimated using each subject specific timeseries +signals extracted from group of ICA nodes or parcellations. In total, +461 functional connectivity datasets were used to obtain these matrices +and is part of HCP Megatrawls release. -Data is available for all dimensionalities (d=25, d=50, d=100, d=200, d=300) -of Group ICA and for all timeseries methods multiple spatial regression and -eigen regression (ts2 and ts3). +The number of nodes available for download are 25, 50, 100, 200, 300 +with combination of two variants of timeseries extraction methods, +multiple spatial regression (ts2) and eigen regression (ts3). These matrices can be used to predict the relationships between subjects -functional connectivity and subjects behavioural measures. +functional connectivity datasets and their behavioural measures. Both can be +downloaded from HCP connectome website under conditions. See disclaimer below. Content ------- - :'Correlation': an array of matrices containing full correlation values - :'Partialcorrelation': an array of matrices containing partial correlation values + :'dimensions': contains given input in dimensions used in fetching data. + :'timeseries': contains given specific timeseries method used in fetching data. + :'matrices': contains given specific type of matrices name. + :'correlation_matrices': contains correlation network matrices data. References ---------- -For more information about technical details about predicting the measures: +For more technical details about predicting the measures, refer to: Stephen Smith et al, HCP beta-release of the Functional Connectivity MegaTrawl. April 2015 "HCP500-MegaTrawl" release. https://db.humanconnectome.org/megatrawl/ diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 18ae31451f..ea1d9e1f82 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -4,7 +4,6 @@ import warnings import os import re -import itertools import numpy as np import nibabel from sklearn.datasets.base import Bunch @@ -14,7 +13,6 @@ from .._utils.compat import BytesIO, _basestring, _urllib from .._utils.numpy_conversions import csv_to_array -from .._utils.param_validation import check_parameters_megatrawls_datasets def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): @@ -1284,8 +1282,9 @@ def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, return data -def fetch_megatrawls_netmats(data_dir=None, dimensionality=None, timeseries=None, - matrices=None, resume=True, verbose=1): +def fetch_megatrawls_netmats(dimensionality=100, timeseries='eigen_regression', + matrices='partial_correlation', data_dir=None, + resume=True, verbose=1): """Downloads and returns Network Matrices data from MegaTrawls release in HCP. This data can be used to predict relationships between imaging data and @@ -1297,34 +1296,28 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=None, timeseries=None Parameters ---------- + dimensionality: int, optional + Valid inputs are 25, 50, 100, 200, 300. By default, network matrices + estimated using Group ICA brain parcellations of 100 components/dimensions + will be returned. + + timeseries: str, optional + Valid inputs are 'multiple_spatial_regression' or 'eigen_regression'. By + default 'eigen_regression', matrices estimated using first principal + eigen component timeseries signals extracted from each subject data + parcellations will be returned. Otherwise, 'multiple_spatial_regression' + matrices estimated using spatial regressor based timeseries signals + extracted from each subject data parcellations will be returned. + + matrices: str, optional + Valid inputs are 'full_correlation' or 'partial_correlation'. By default, + partial correlation matrices will be returned otherwise if selected + full correlation matrices will be returned. + data_dir: str, default is None, optional Path of the data directory. Used to force data storage in a specified location. - dimensionality: list of int in [25, 50, 100, 200, 300], optional - By default, network matrices data estimated from brain parcellations - of all dimensionalities are returned each in a separate dimensional - array (n, n). If set to specific dimension, then data of its - particular dimension will be returned. For example, if set as - [25] only data corresponding to dimensions 25 of array (25, 25) - in size will be returned. - - timeseries: list of str in ['multiple_spatial_regression', 'eigen_regression'] - By default, network matrices data extimated using both methods will be - returned each in a separate array. If ['multiple_spatial_regression'], - then correlation matrices estimated using spatial regressor based - extraction of subject specific timeseries signals will be returned. - If ['eigen_regression'], then correlation matrices estimated using - first principal eigen component based extraction of subject specific - timeseries signals will be returned. For full technical details - about each method, refer to [3] [4] [5]. - - matrices: list of str in ['correlation', 'partial_correlation'], optional - By default, matrices of both types will be returned. If ['correlation'], - then only full correlation matrices will be returned otherwise if - set as ['partial_correlation'], only partial correlation matrices - will be returned. - resume: bool, default is True This parameter is required if a partially downloaded file is needed to be resumed to download again. @@ -1337,24 +1330,18 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=None, timeseries=None Returns ------- data: Bunch - Dictionary-like object, the attributes are : - - - 'correlation': list of arrays - contains full correlation matrices. - - 'partial_correlation': list of arrays - contains partial correlation matrices. - - 'dimensions_correlation': array - consists of given input in dimensions used in fetching its full - correlation matrices. - - 'dimensions_partial': array - consists of given input in dimensions used in fetching its partial - correlation matrices. - - 'timeseries_correlation': array - consists of given input in timeseries methods used in fetching its - full correlation matrices. - - 'timeseries_partial': array - consists of given input in timeseries methods used in fetching its - partial correlation matrices. + dictionary-like object, the attributes are : + + - 'dimensions': int, consists of given input in dimensions. + + - 'timeseries': str, consists of given input in timeseries method. + + - 'matrices': str, consists of given type of specific matrices. + + - 'correlation_matrices': ndarray, consists of correlation matrices + based on given type of matrices. Array size will depend on given + dimensions (n, n). + - 'description': data description References ---------- @@ -1376,95 +1363,46 @@ def fetch_megatrawls_netmats(data_dir=None, dimensionality=None, timeseries=None cerebellum defined by resting state functional connectivity. Cerebral Cortex, 2009. - Disclaimer - ---------- - IMPORTANT: This is open access data. You must agree to Terms and conditions - of using this data before using it, available at - http://humanconnectome.org/data/data-use-terms/open-access.html. - Open Access Data (all imaging data and most of the behavioral data) is - available to those who register an account at ConnectomeDB and agree to the - Open Access Data Use Terms. This includes agreement to comply with - institutional rules and regulations. This means you may need the approval - of your IRB or Ethics Committee to use the data. The released HCP data are - not considered de-identified, since certain combinations of HCP Restricted - Data (available through a separate process) might allow identification of - individuals. Different national, state and local laws may apply and be - interpreted differently, so it is important that you consult with your IRB - or Ethics Committee before beginning your research. If needed and upon - request, the HCP will provide a certificate stating that you have accepted - the HCP Open Access Data Use Terms. Please note that everyone who works with - HCP open access data must review and agree to these terms, including those - who are accessing shared copies of this data. If you are sharing HCP Open - Access data, please advise your co-researchers that they must register with - ConnectomeDB and agree to these terms. - Register and sign the Open Access Data Use Terms at - ConnectomeDB: https://db.humanconnectome.org/ + Note: See description for terms & conditions on data usage. + """ url = "http://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" opts = {'uncompress': True} + error_message = "Invalid {0} input is provided: {1}, choose one of them {2}" # standard dataset terms dimensionalities = [25, 50, 100, 200, 300] + if dimensionality not in dimensionalities: + raise ValueError(error_message.format('dimensionality', dimensionality, + dimensionalities)) timeseries_methods = ['multiple_spatial_regression', 'eigen_regression'] - output_matrices_names = ['correlation', 'partial_correlation'] - - if dimensionality is not None: - dimensionality = check_parameters_megatrawls_datasets( - dimensionality, dimensionalities, 'dimensionality') - else: - dimensionality = dimensionalities - - if timeseries is not None: - timeseries = check_parameters_megatrawls_datasets( - timeseries, timeseries_methods, 'timeseries') - else: - timeseries = timeseries_methods - - if matrices is not None: - matrices = check_parameters_megatrawls_datasets( - matrices, output_matrices_names, 'matrices') - else: - matrices = output_matrices_names + if timeseries not in timeseries_methods: + raise ValueError(error_message.format('timeseries', timeseries, + timeseries_methods)) + output_matrices_names = ['full_correlation', 'partial_correlation'] + if matrices not in output_matrices_names: + raise ValueError(error_message.format('matrices', matrices, + output_matrices_names)) dataset_name = 'Megatrawls' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) description = _get_dataset_descr(dataset_name) - # Generate all combinations - dims, tseries, mats = list(zip(*list(itertools.product(dimensionality, timeseries, matrices)))) - files = [] - ids_correlation = [] - ids_partial = [] timeseries_map = dict(multiple_spatial_regression='ts2', eigen_regression='ts3') - matrices_map = dict(correlation='Znet1.txt', partial_correlation='Znet2.txt') - for index, (dim, tserie, mat) in enumerate(zip(dims, tseries, mats)): - if mat == 'correlation': - ids_correlation.append(index) - elif mat == 'partial_correlation': - ids_partial.append(index) - filepath = os.path.join( - '3T_Q1-Q6related468_MSMsulc_d%d_%s' % (dim, timeseries_map[tserie]), matrices_map[mat]) - files.append((filepath, url, opts)) + matrices_map = dict(full_correlation='Znet1.txt', partial_correlation='Znet2.txt') + filepath = [(os.path.join( + '3T_Q1-Q6related468_MSMsulc_d%d_%s' % (dimensionality, timeseries_map[timeseries]), + matrices_map[matrices]), url, opts)] # Fetch all the files - files = _fetch_files(data_dir, files, resume=resume, verbose=verbose) + files = _fetch_files(data_dir, filepath, resume=resume, verbose=verbose) # Load the files into arrays - correlation = [csv_to_array(files[id_c]) for id_c in ids_correlation] - partial = [csv_to_array(files[id_p]) for id_p in ids_partial] - # Taking the account of all the given dimensions & timeseries - # methods to give the end users to identify themselves about the - # matrices which are fetched. - dimensions_correlation = [dims[id_c] for id_c in ids_correlation] - dimensions_partial = [dims[id_p] for id_p in ids_partial] - timeseries_correlation = [tseries[id_c] for id_c in ids_correlation] - timeseries_partial = [tseries[id_p] for id_p in ids_partial] + correlation_matrices = csv_to_array(files[0]) return Bunch( - correlation=correlation, - partial_correlation=partial, - dimensions_correlation=dimensions_correlation, - dimensions_partial=dimensions_partial, - timeseries_correlation=timeseries_correlation, - timeseries_partial=timeseries_partial, + dimensions=dimensionality, + timeseries=timeseries, + matrices=matrices, + correlation_matrices=correlation_matrices, description=description) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 41bab06fa4..3259993f50 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -365,35 +365,66 @@ def test_fetch_mixed_gambles(): assert_equal(len(mgambles["zmaps"]), n_subjects) +def test_check_parameters_megatrawls_datasets(): + # testing whether the function raises the same error message + # if invalid input parameters are provided + message = "Invalid {0} input is provided: {1}." + + for invalid_input_dim in [1, 5, 30]: + assert_raises_regex(ValueError, + message.format('dimensionality', invalid_input_dim), + func.fetch_megatrawls_netmats, + dimensionality=invalid_input_dim) + + for invalid_input_timeserie in ['asdf', 'time', 'st2']: + assert_raises_regex(ValueError, + message.format('timeseries', invalid_input_timeserie), + func.fetch_megatrawls_netmats, + timeseries=invalid_input_timeserie) + + for invalid_output_name in ['net1', 'net2']: + assert_raises_regex(ValueError, + message.format('matrices', invalid_output_name), + func.fetch_megatrawls_netmats, + matrices=invalid_output_name) + + @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_megatrawls_netmats(): # smoke test to see that files are fetched and read properly - # since we are loading information present in it and returning - # the same - dimensionality = [25, 100, 200, 300] - for dim in dimensionality: - files_dir = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d%d_ts3' % dim) - os.makedirs(files_dir) - with open(os.path.join(files_dir, 'Znet2.txt'), 'w') as net_file: - net_file.write("1") - - timeseries = ['eigen_regression'] - megatrawl_netmats_data = func.fetch_megatrawls_netmats( - data_dir=tst.tmpdir, dimensionality=dimensionality, - timeseries=timeseries, matrices=['partial_correlation']) - - # expected number of returns sitting in output name correlations should be equal - assert_equal(len(megatrawl_netmats_data), 7) - - # dimensions given to fetch partial correlation should be same meaning we - # check if same array is returned as given - assert_equal(megatrawl_netmats_data.dimensions_partial, dimensionality) - - # same timeseries method is repeated for each dimension meaning we check - # multiplying by 4 since given dimensionalities are of 4 types - assert_equal(megatrawl_netmats_data.timeseries_partial, timeseries * 4) - - # check length of output matrices should be equal - assert_equal(len(megatrawl_netmats_data.partial_correlation), 4) - # check if description is not empty + # since we are loading data present in it + files_dir = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d100_ts3') + os.makedirs(files_dir) + with open(os.path.join(files_dir, 'Znet2.txt'), 'w') as net_file: + net_file.write("1") + + files_dir2 = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d300_ts2') + os.makedirs(files_dir2) + with open(os.path.join(files_dir2, 'Znet1.txt'), 'w') as net_file2: + net_file2.write("1") + + megatrawl_netmats_data = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir) + + # expected number of returns in output name should be equal + assert_equal(len(megatrawl_netmats_data), 5) + # check if returned bunch should not be empty + # dimensions + assert_not_equal(megatrawl_netmats_data.dimensions, '') + # timeseries + assert_not_equal(megatrawl_netmats_data.timeseries, '') + # matrices + assert_not_equal(megatrawl_netmats_data.matrices, '') + # correlation matrices + assert_not_equal(megatrawl_netmats_data.correlation_matrices, '') + # description assert_not_equal(megatrawl_netmats_data.description, '') + + # check if input provided for dimensions, timeseries, matrices to be same + # to user settings + netmats_data = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir, + dimensionality=300, + timeseries='multiple_spatial_regression', + matrices='full_correlation') + assert_equal(netmats_data.dimensions, 300) + assert_equal(netmats_data.timeseries, 'multiple_spatial_regression') + assert_equal(netmats_data.matrices, 'full_correlation') diff --git a/nilearn/tests/test_param_validation.py b/nilearn/tests/test_param_validation.py index 24554b15db..2a649522e7 100644 --- a/nilearn/tests/test_param_validation.py +++ b/nilearn/tests/test_param_validation.py @@ -2,7 +2,6 @@ Test the _utils.param_validation module """ -import warnings import numpy as np from nose.tools import assert_true, assert_equal @@ -10,8 +9,7 @@ from nilearn._utils.testing import assert_raises_regex, assert_warns from nilearn._utils.extmath import fast_abs_percentile -from nilearn._utils.param_validation import (check_threshold, - check_parameters_megatrawls_datasets) +from nilearn._utils.param_validation import check_threshold def test_check_threshold(): @@ -56,43 +54,3 @@ def test_check_threshold(): assert_true(1. < check_threshold("50%", matrix, percentile_func=fast_abs_percentile, name=name) <= 2.) - - -def test_check_parameters_megatrawls_datasets(): - # testing whether the function raises the same error message as in - # main function if wrong input parameters are given - # parameters are dimensionality, timeseries, matrices - message = "Invalid {0} name is given: {1}" - - invalid_inputs_dimensionality = [1, 5, 30] - valid_inputs_dimensionality = [25, 50, 100, 200, 300] - assert_raises_regex(ValueError, - message.format('dimensionality', invalid_inputs_dimensionality), - check_parameters_megatrawls_datasets, - invalid_inputs_dimensionality, valid_inputs_dimensionality, - 'dimensionality') - - invalid_inputs_timeseries = ['asdf', 'time', 'st2'] - valid_inputs_timeseries = ['multiple_spatial_regression', 'eigen_regression'] - assert_raises_regex(ValueError, - message.format('timeseries', invalid_inputs_timeseries), - check_parameters_megatrawls_datasets, - invalid_inputs_timeseries, valid_inputs_timeseries, - 'timeseries') - - invalid_output_names = ['net1', 'net2'] - valid_output_names = ['correlation', 'partial_correlation'] - assert_raises_regex(ValueError, - message.format('matrices', invalid_output_names), - check_parameters_megatrawls_datasets, - invalid_output_names, valid_output_names, 'matrices') - - # giving a valid input as a single element but not as a list to test - # if it raises same error message - message = ("Input given for {0} should be in list. " - "You have given as single variable: {1}") - valid_matrix_name = 'correlation' - assert_raises_regex(TypeError, - message.format('matrices', valid_matrix_name), - check_parameters_megatrawls_datasets, - valid_matrix_name, valid_output_names, 'matrices') From b2f3a4d69b04ac27e39e2e62c95935293319ccf0 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 6 Jan 2016 16:49:55 +0100 Subject: [PATCH 0027/1925] fixing overlapping text and wrong example --- .../manipulating_images.rst | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/manipulating_visualizing/manipulating_images.rst b/doc/manipulating_visualizing/manipulating_images.rst index 476c6b5b28..112159fd66 100644 --- a/doc/manipulating_visualizing/manipulating_images.rst +++ b/doc/manipulating_visualizing/manipulating_images.rst @@ -32,21 +32,25 @@ datasets and atlases. Dataset fetching functions can be imported from :mod:`nilearn.datasets`:: >>> from nilearn import datasets - >>> haxby_files = datasets.fetch_haxby(n_subjects=1) # doctest: +SKIP + >>> haxby_dataset = datasets.fetch_haxby(n_subjects=1) They return a data structure that contains different pieces of information on the retrieved dataset, including the file names on hard disk:: >>> # The different files - >>> print(sorted(list(haxby_files.keys()))) # doctest: +SKIP - ['anat', 'description', 'func', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target'] + >>> print(sorted(list(haxby_dataset.keys()))) # doctest: +SKIP + ['anat', 'description', 'func', 'mask_face', 'mask_face_little', + 'mask_house', 'mask_house_little', 'mask_vt', 'session_target'] >>> # Path to first functional file - >>> print(haxby_files.func[0]) # doctest: +ELLIPSIS +SKIP + >>> print(haxby_dataset.func[0]) # doctest: +ELLIPSIS /.../nilearn_data/haxby2001/subj1/bold.nii.gz - >>> # Provide information on the dataset - >>> print(haxby_files.description) # doctest: +ELLIPSIS +SKIP - Haxby 2001 results + +Explanation and further resources of the dataset at hand can be retrieved as +follows:: + + >>> print(haxby_dataset.description) # doctest: +ELLIPSIS + Haxby 2001 results... Notes @@ -55,10 +59,6 @@ file names on hard disk:: | -Explanation and further resources of the dataset at hand can be -retrieved as follows: - - >>> print haxby_dataset['description'] # doctest: +SKIP For a list of all the data fetching functions in nilearn, see :ref:`datasets_ref`. From 434d385401abfdbf5f3a1ec645a059de595e1dad Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 6 Jan 2016 17:14:02 +0100 Subject: [PATCH 0028/1925] skip doc test in order to avoid too much download. --- doc/manipulating_visualizing/manipulating_images.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/manipulating_visualizing/manipulating_images.rst b/doc/manipulating_visualizing/manipulating_images.rst index 112159fd66..20c7276999 100644 --- a/doc/manipulating_visualizing/manipulating_images.rst +++ b/doc/manipulating_visualizing/manipulating_images.rst @@ -32,7 +32,7 @@ datasets and atlases. Dataset fetching functions can be imported from :mod:`nilearn.datasets`:: >>> from nilearn import datasets - >>> haxby_dataset = datasets.fetch_haxby(n_subjects=1) + >>> haxby_dataset = datasets.fetch_haxby(n_subjects=1) # doctest: +SKIP They return a data structure that contains different pieces of information on the retrieved dataset, including the @@ -43,13 +43,13 @@ file names on hard disk:: ['anat', 'description', 'func', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target'] >>> # Path to first functional file - >>> print(haxby_dataset.func[0]) # doctest: +ELLIPSIS + >>> print(haxby_dataset.func[0]) # doctest: +ELLIPSIS +SKIP /.../nilearn_data/haxby2001/subj1/bold.nii.gz Explanation and further resources of the dataset at hand can be retrieved as follows:: - >>> print(haxby_dataset.description) # doctest: +ELLIPSIS + >>> print(haxby_dataset.description) # doctest: +ELLIPSIS +SKIP Haxby 2001 results... From 137c705565ef1b99877198f93b986129fe4474e7 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Wed, 6 Jan 2016 19:17:16 +0100 Subject: [PATCH 0029/1925] DOC: make things a tiny bit simpler Simpler is always better --- doc/connectivity/functional_connectomes.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/connectivity/functional_connectomes.rst b/doc/connectivity/functional_connectomes.rst index d31714f8ae..8525a8a2eb 100644 --- a/doc/connectivity/functional_connectomes.rst +++ b/doc/connectivity/functional_connectomes.rst @@ -42,7 +42,8 @@ at 2mm, and with a threshold of a probability of .25:: from nilearn import datasets dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm') - atlas_filename, labels = dataset.maps, dataset.labels + atlas_filename = dataset.maps + labels = dataset.labels Plotting can then be done as:: From 93e2123a174b50dffccd2d068f6347b1fb7a1677 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 6 Jan 2016 22:03:05 +0100 Subject: [PATCH 0030/1925] using mock version of haxby fetcher with doctest --- .../manipulating_images.rst | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/doc/manipulating_visualizing/manipulating_images.rst b/doc/manipulating_visualizing/manipulating_images.rst index 20c7276999..049cfdf249 100644 --- a/doc/manipulating_visualizing/manipulating_images.rst +++ b/doc/manipulating_visualizing/manipulating_images.rst @@ -23,6 +23,11 @@ Loading data .. _datasets: +.. directive:: .. testsetup:: * + +>>> from nilearn.datasets.tests.test_utils import setup_mock +>>> setup_mock() + Fetching open datasets ---------------------- @@ -32,27 +37,27 @@ datasets and atlases. Dataset fetching functions can be imported from :mod:`nilearn.datasets`:: >>> from nilearn import datasets - >>> haxby_dataset = datasets.fetch_haxby(n_subjects=1) # doctest: +SKIP + >>> haxby_dataset = datasets.fetch_haxby(n_subjects=1) They return a data structure that contains different pieces of information on the retrieved dataset, including the file names on hard disk:: >>> # The different files - >>> print(sorted(list(haxby_dataset.keys()))) # doctest: +SKIP - ['anat', 'description', 'func', 'mask_face', 'mask_face_little', + >>> print(sorted(list(haxby_dataset.keys()))) # doctest: +NORMALIZE_WHITESPACE + ['anat', 'description', 'func', 'mask', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target'] >>> # Path to first functional file - >>> print(haxby_dataset.func[0]) # doctest: +ELLIPSIS +SKIP + >>> print(haxby_dataset.func[0]) # doctest: +ELLIPSIS /.../nilearn_data/haxby2001/subj1/bold.nii.gz Explanation and further resources of the dataset at hand can be retrieved as follows:: - >>> print(haxby_dataset.description) # doctest: +ELLIPSIS +SKIP - Haxby 2001 results... - - + >>> print(haxby_dataset.description) # doctest: +ELLIPSIS + Haxby 2001 results + + Notes ----- Results from a classical fMRI study that... From c515f4e020b6ed6dcb58c731c40ea5e396512a91 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 7 Jan 2016 10:44:59 +0100 Subject: [PATCH 0031/1925] reverting mock and skip test --- .../manipulating_images.rst | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/doc/manipulating_visualizing/manipulating_images.rst b/doc/manipulating_visualizing/manipulating_images.rst index 049cfdf249..7d44db9d41 100644 --- a/doc/manipulating_visualizing/manipulating_images.rst +++ b/doc/manipulating_visualizing/manipulating_images.rst @@ -23,11 +23,6 @@ Loading data .. _datasets: -.. directive:: .. testsetup:: * - ->>> from nilearn.datasets.tests.test_utils import setup_mock ->>> setup_mock() - Fetching open datasets ---------------------- @@ -37,27 +32,27 @@ datasets and atlases. Dataset fetching functions can be imported from :mod:`nilearn.datasets`:: >>> from nilearn import datasets - >>> haxby_dataset = datasets.fetch_haxby(n_subjects=1) + >>> haxby_dataset = datasets.fetch_haxby(n_subjects=1) # doctest: +SKIP They return a data structure that contains different pieces of information on the retrieved dataset, including the file names on hard disk:: >>> # The different files - >>> print(sorted(list(haxby_dataset.keys()))) # doctest: +NORMALIZE_WHITESPACE + >>> print(sorted(list(haxby_dataset.keys()))) # doctest: +SKIP ['anat', 'description', 'func', 'mask', 'mask_face', 'mask_face_little', 'mask_house', 'mask_house_little', 'mask_vt', 'session_target'] >>> # Path to first functional file - >>> print(haxby_dataset.func[0]) # doctest: +ELLIPSIS + >>> print(haxby_dataset.func[0]) # doctest: +SKIP /.../nilearn_data/haxby2001/subj1/bold.nii.gz Explanation and further resources of the dataset at hand can be retrieved as follows:: - >>> print(haxby_dataset.description) # doctest: +ELLIPSIS + >>> print(haxby_dataset.description) # doctest: +SKIP Haxby 2001 results - - + + Notes ----- Results from a classical fMRI study that... From 64f26cbfa44400374de90409f95f9cc8f5be1fb1 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Thu, 7 Jan 2016 13:48:32 +0100 Subject: [PATCH 0032/1925] BUG fixing with view type as filled_contours in plot prob plotting function --- nilearn/plotting/displays.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nilearn/plotting/displays.py b/nilearn/plotting/displays.py index 7e39ddad45..b794f20215 100644 --- a/nilearn/plotting/displays.py +++ b/nilearn/plotting/displays.py @@ -539,8 +539,11 @@ def add_contours(self, img, filled=False, **kwargs): if filled: colors = kwargs['colors'] levels = kwargs['levels'] - # Append lower boundary value to '0' for contour fillings - levels.append(0.) + # contour fillings levels should be given as (lower, upper). + # we force append each upper level to inf than zero. Appending zero + # will break the contour fillings. + # See nilearn issue #931 which has matplotlib reference for this problem. + levels.append(np.inf) alpha = kwargs['alpha'] self._map_show(img, type='contourf', levels=levels, alpha=alpha, colors=colors[:3]) From 526eba22d57ada7552756d85a0591fd3044db459 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Thu, 7 Jan 2016 14:12:03 +0100 Subject: [PATCH 0033/1925] Comment addressed --- nilearn/plotting/displays.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nilearn/plotting/displays.py b/nilearn/plotting/displays.py index b794f20215..413a501208 100644 --- a/nilearn/plotting/displays.py +++ b/nilearn/plotting/displays.py @@ -540,9 +540,6 @@ def add_contours(self, img, filled=False, **kwargs): colors = kwargs['colors'] levels = kwargs['levels'] # contour fillings levels should be given as (lower, upper). - # we force append each upper level to inf than zero. Appending zero - # will break the contour fillings. - # See nilearn issue #931 which has matplotlib reference for this problem. levels.append(np.inf) alpha = kwargs['alpha'] self._map_show(img, type='contourf', levels=levels, alpha=alpha, From 5ff76db267e2b7d25784842fcd1fad9caeddaa1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Fri, 8 Jan 2016 16:55:26 +0100 Subject: [PATCH 0034/1925] Add comment about ignored PEP8 error messages in setup.cfg --- setup.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.cfg b/setup.cfg index b25c893f07..48ddd82a97 100644 --- a/setup.cfg +++ b/setup.cfg @@ -18,4 +18,7 @@ ignore-files=(plot_.*.py|conf\.py) universal=1 [flake8] +# For PEP8 error codes see +# http://pep8.readthedocs.org/en/latest/intro.html#error-codes +# E402: module level import not at top of file ignore=E402 From add5f6e07710c3aa035346e426d9e76277f6a401 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Sat, 9 Jan 2016 12:42:48 +0100 Subject: [PATCH 0035/1925] ENH: plotting: more robust backend choice Under MacOSX, using the maptloblib MacOSX backend works even without X Should fix #929 --- nilearn/plotting/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py index e7cafda82f..009d8ecfa3 100644 --- a/nilearn/plotting/__init__.py +++ b/nilearn/plotting/__init__.py @@ -11,8 +11,12 @@ def _set_mpl_backend(): # We are doing local imports here to avoid poluting our namespace import matplotlib import os + import sys # Set the backend to a non-interactive one for unices without X - if os.name == 'posix' and 'DISPLAY' not in os.environ: + if (os.name == 'posix' and 'DISPLAY' not in os.environ + and not (sys.platform == 'darwin' + and matplotlib.get_backend() == 'MacOSX' + )): matplotlib.use('Agg') except ImportError: from .._utils.testing import skip_if_running_nose From 54f042223002796b343e727928f00ed554918b07 Mon Sep 17 00:00:00 2001 From: banilo Date: Sun, 10 Jan 2016 13:28:58 +0100 Subject: [PATCH 0036/1925] find_coords: allow 4D input --- nilearn/plotting/find_cuts.py | 5 +++++ nilearn/plotting/tests/test_find_cuts.py | 8 ++++++++ 2 files changed, 13 insertions(+) diff --git a/nilearn/plotting/find_cuts.py b/nilearn/plotting/find_cuts.py index 3c36d262d4..ed0f180434 100644 --- a/nilearn/plotting/find_cuts.py +++ b/nilearn/plotting/find_cuts.py @@ -50,6 +50,11 @@ def find_xyz_cut_coords(img, mask=None, activation_threshold=None): # To speed up computations, we work with partial views of the array, # and keep track of the offset offset = np.zeros(3) + + # if a pseudo-4D image or several images were passed (cf. #922), + # we reduce to a single 3D image to find the coordinates + if len(data.shape) > 3: + data = data[:, :, :, 0] # Deal with masked arrays: if hasattr(data, 'mask'): diff --git a/nilearn/plotting/tests/test_find_cuts.py b/nilearn/plotting/tests/test_find_cuts.py index a587fef832..3f1293e226 100644 --- a/nilearn/plotting/tests/test_find_cuts.py +++ b/nilearn/plotting/tests/test_find_cuts.py @@ -41,6 +41,14 @@ def test_find_cut_coords(): np.testing.assert_array_equal( np.array([x, y, z]), 0.5 * np.array(data.shape).astype(np.float)) + + # regression test (cf. #922) + # pseudo-4D images as input (i.e., X, Y, Z, 1) + # previously raised "ValueError: too many values to unpack" + data = np.ones((36, 43, 36))[..., np.newaxis] + affine = np.eye(4) + img = nibabel.Nifti1Image(data, affine) + x, y, z = find_xyz_cut_coords(img, activation_threshold=1.1) def test_find_cut_slices(): From 5d1c8a923bdc9c7d3153bedc58f4aa01e20439ab Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Fri, 8 Jan 2016 13:06:59 +0100 Subject: [PATCH 0037/1925] Breaking down examples to plotting and manipulating and advanced --- doc/connectivity/functional_connectomes.rst | 10 +-- doc/decoding/decoding_tutorial.rst | 8 +-- doc/decoding/searchlight.rst | 6 +- doc/index.rst | 4 +- doc/introduction.rst | 6 +- .../data_preparation.rst | 40 +++++------ doc/manipulating_images/index.rst | 21 ++++++ .../manipulating_images.rst | 54 +++++++------- doc/manipulating_visualizing/index.rst | 23 ------ doc/plotting/index.rst | 20 ++++++ .../plotting.rst | 70 +++++++++---------- doc/user_guide.rst | 3 +- examples/advanced/README.txt | 2 + .../plot_haxby_mass_univariate.py | 0 .../plot_localizer_mass_univariate_methods.py | 0 examples/manipulating_images/README.txt | 4 ++ .../plot_affine_transformation.py | 0 .../plot_extract_rois_smith_atlas.py | 0 .../plot_extract_rois_statistical_maps.py | 0 .../plot_mask_computation.py | 0 .../plot_nifti_simple.py | 0 .../plot_roi_extraction.py | 0 .../plot_smooth_mean_image.py | 0 examples/manipulating_visualizing/README.txt | 4 -- examples/plotting/README.txt | 4 ++ .../plot_atlas.py | 0 .../plot_demo_glass_brain.py | 0 .../plot_demo_glass_brain_extensive.py | 0 .../plot_demo_more_plotting.py | 0 .../plot_demo_plotting.py | 0 .../plot_dim_plotting.py | 0 .../plot_haxby_masks.py | 0 .../plot_overlay.py | 0 .../plot_prob_atlas.py | 0 .../plot_visualization.py | 0 35 files changed, 152 insertions(+), 127 deletions(-) rename doc/{manipulating_visualizing => manipulating_images}/data_preparation.rst (91%) create mode 100644 doc/manipulating_images/index.rst rename doc/{manipulating_visualizing => manipulating_images}/manipulating_images.rst (86%) delete mode 100644 doc/manipulating_visualizing/index.rst create mode 100644 doc/plotting/index.rst rename doc/{manipulating_visualizing => plotting}/plotting.rst (68%) create mode 100644 examples/advanced/README.txt rename examples/{manipulating_visualizing => advanced}/plot_haxby_mass_univariate.py (100%) rename examples/{manipulating_visualizing => advanced}/plot_localizer_mass_univariate_methods.py (100%) create mode 100644 examples/manipulating_images/README.txt rename examples/{manipulating_visualizing => manipulating_images}/plot_affine_transformation.py (100%) rename examples/{manipulating_visualizing => manipulating_images}/plot_extract_rois_smith_atlas.py (100%) rename examples/{manipulating_visualizing => manipulating_images}/plot_extract_rois_statistical_maps.py (100%) rename examples/{manipulating_visualizing => manipulating_images}/plot_mask_computation.py (100%) rename examples/{manipulating_visualizing => manipulating_images}/plot_nifti_simple.py (100%) rename examples/{manipulating_visualizing => manipulating_images}/plot_roi_extraction.py (100%) rename examples/{manipulating_visualizing => manipulating_images}/plot_smooth_mean_image.py (100%) delete mode 100644 examples/manipulating_visualizing/README.txt create mode 100644 examples/plotting/README.txt rename examples/{manipulating_visualizing => plotting}/plot_atlas.py (100%) rename examples/{manipulating_visualizing => plotting}/plot_demo_glass_brain.py (100%) rename examples/{manipulating_visualizing => plotting}/plot_demo_glass_brain_extensive.py (100%) rename examples/{manipulating_visualizing => plotting}/plot_demo_more_plotting.py (100%) rename examples/{manipulating_visualizing => plotting}/plot_demo_plotting.py (100%) rename examples/{manipulating_visualizing => plotting}/plot_dim_plotting.py (100%) rename examples/{manipulating_visualizing => plotting}/plot_haxby_masks.py (100%) rename examples/{manipulating_visualizing => plotting}/plot_overlay.py (100%) rename examples/{manipulating_visualizing => plotting}/plot_prob_atlas.py (100%) rename examples/{manipulating_visualizing => plotting}/plot_visualization.py (100%) diff --git a/doc/connectivity/functional_connectomes.rst b/doc/connectivity/functional_connectomes.rst index 8525a8a2eb..998d18f0ea 100644 --- a/doc/connectivity/functional_connectomes.rst +++ b/doc/connectivity/functional_connectomes.rst @@ -50,8 +50,8 @@ Plotting can then be done as:: from nilearn import plotting plotting.plot_roi(atlas_filename) -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_atlas_001.png - :target: ../auto_examples/manipulating_visualizing/plot_atlas.html +.. image:: ../auto_examples/plotting/images/sphx_glr_plot_atlas_001.png + :target: ../auto_examples/plotting/plot_atlas.html :scale: 60 .. seealso:: @@ -136,10 +136,10 @@ atlas well suited to resting-state data analysis is the `MSDL atlas Probabilistic atlases are represented as a set of continuous maps, in a 4D nifti image. Visualization the atlas thus requires to visualize each of these maps, which requires accessing them with -:func:`nilearn.image.index_img` (see the :ref:`corresponding example `). +:func:`nilearn.image.index_img` (see the :ref:`corresponding example `). -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_overlay_001.png - :target: ../auto_examples/manipulating_visualizing/plot_overlay.html +.. image:: ../auto_examples/plotting/images/sphx_glr_plot_overlay_001.png + :target: ../auto_examples/plotting/plot_overlay.html :scale: 60 diff --git a/doc/decoding/decoding_tutorial.rst b/doc/decoding/decoding_tutorial.rst index 72b434cb83..51edb95218 100644 --- a/doc/decoding/decoding_tutorial.rst +++ b/doc/decoding/decoding_tutorial.rst @@ -50,8 +50,8 @@ corresponding category. Cat stimuli -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_haxby_masks_001.png - :target: ../auto_examples/manipulating_visualizing/plot_haxby_masks.html +.. figure:: ../auto_examples/plotting/images/sphx_glr_plot_haxby_masks_001.png + :target: ../auto_examples/plotting/plot_haxby_masks.html :scale: 30 :align: left @@ -328,8 +328,8 @@ permutation testing on the labels, with scores. -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_haxby_masks_001.png - :target: ../auto_examples/manipulating_visualizing/plot_haxby_masks.html +.. figure:: ../auto_examples/plotting/images/sphx_glr_plot_haxby_masks_001.png + :target: ../auto_examples/plotting/plot_haxby_masks.html :scale: 55 :align: left diff --git a/doc/decoding/searchlight.rst b/doc/decoding/searchlight.rst index bc4267ab95..438241312b 100644 --- a/doc/decoding/searchlight.rst +++ b/doc/decoding/searchlight.rst @@ -185,7 +185,7 @@ is its associated p-value. The :func:`nilearn.mass_univariate.permuted_ols` function returns the p-values computed with a permutation test. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_haxby_mass_univariate.py +.. literalinclude:: ../../examples/advanced/plot_haxby_mass_univariate.py :start-after: # Perform massively univariate analysis with permuted OLS :end-before: neg_log_pvals_unmasked @@ -206,8 +206,8 @@ every voxel so that the F-statistics are comparable. This correction strategy is applied in nilearn :func:`nilearn.mass_univariate.permuted_ols` function. -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_haxby_mass_univariate_001.png - :target: ../auto_examples/manipulating_visualizing/plot_haxby_searchlight.html +.. figure:: ../auto_examples/advanced/images/sphx_glr_plot_haxby_mass_univariate_001.png + :target: ../auto_examples/advanced/plot_haxby_mass_univariate.html :align: center :scale: 60 diff --git a/doc/index.rst b/doc/index.rst index 69004cdfe0..ead7dfb534 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -15,8 +15,8 @@ .. Here we are building the carrousel -.. |glass_brain| image:: auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_glass_brain_002.png - :target: auto_examples/manipulating_visualizing/plot_demo_glass_brain.html +.. |glass_brain| image:: auto_examples/plotting/images/sphx_glr_plot_demo_glass_brain_002.png + :target: auto_examples/plotting/plot_demo_glass_brain.html .. |connectome| image:: auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png :target: auto_examples/connectivity/plot_inverse_covariance_connectome.html diff --git a/doc/introduction.rst b/doc/introduction.rst index aa3a46b248..7e57222896 100644 --- a/doc/introduction.rst +++ b/doc/introduction.rst @@ -204,8 +204,8 @@ the file name:: :ref:`See more on file name matchings `. -.. image:: auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_glass_brain_001.png - :target: auto_examples/manipulating_visualizing/plot_demo_glass_brain.html +.. image:: auto_examples/plotting/images/sphx_glr_plot_demo_glass_brain_001.png + :target: auto_examples/plotting/plot_demo_glass_brain.html :align: center :scale: 60 @@ -264,7 +264,7 @@ To loop over each individual volume of a 4D image, use :func:`image.iter_img`:: * To perform a for loop in Python, you can use the "range" function * The solution can be found :ref:`here - ` + ` | diff --git a/doc/manipulating_visualizing/data_preparation.rst b/doc/manipulating_images/data_preparation.rst similarity index 91% rename from doc/manipulating_visualizing/data_preparation.rst rename to doc/manipulating_images/data_preparation.rst index 7ea3dc1081..009ffa29a9 100644 --- a/doc/manipulating_visualizing/data_preparation.rst +++ b/doc/manipulating_images/data_preparation.rst @@ -120,7 +120,7 @@ possible, there is no need to save your data to a file to pass it to a in memory: -.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py +.. literalinclude:: ../../examples/manipulating_images/plot_mask_computation.py :start-after: Load NYU resting-state dataset :end-before: # To display the background @@ -140,9 +140,9 @@ Computing the mask .. note:: The full example described in this section can be found here: - :doc:`plot_mask_computation.py <../auto_examples/manipulating_visualizing/plot_mask_computation>`. + :doc:`plot_mask_computation.py <../auto_examples/manipulating_images/plot_mask_computation>`. It is also related to this example: - :doc:`plot_nifti_simple.py <../auto_examples/manipulating_visualizing/plot_nifti_simple>`. + :doc:`plot_nifti_simple.py <../auto_examples/manipulating_images/plot_nifti_simple>`. If a mask is not specified as an argument, :class:`NiftiMasker` will try to compute @@ -160,13 +160,13 @@ we can compare the data-derived mask against. The first step is to generate a mask with default parameters and visualize it. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py +.. literalinclude:: ../../examples/manipulating_images/plot_mask_computation.py :start-after: # Simple mask extraction from EPI images :end-before: # Generate mask with strong opening -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_mask_computation_002.png - :target: ../auto_examples/plot_mask_computation.html +.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_mask_computation_002.png + :target: ../auto_examples/manipulating_images/plot_mask_computation.html :scale: 50% @@ -176,13 +176,13 @@ opening steps (*opening=10*) using the `mask_args` argument of the on the outer voxel layers of the mask, which can for example remove remaining skull parts in the image. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py +.. literalinclude:: ../../examples/manipulating_images/plot_mask_computation.py :start-after: # Generate mask with strong opening :end-before: # Generate mask with a high lower cutoff -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_mask_computation_003.png - :target: ../auto_examples/plot_mask_computation.html +.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_mask_computation_003.png + :target: ../auto_examples/manipulating_images/plot_mask_computation.html :scale: 50% @@ -195,13 +195,13 @@ the lower cutoff to enforce selection of those voxels that appear as bright in the EPI image. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_mask_computation.py +.. literalinclude:: ../../examples/manipulating_images/plot_mask_computation.py :start-after: # Generate mask with a high lower cutoff :end-before: ################################################################################ -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_mask_computation_004.png - :target: ../auto_examples/plot_mask_computation.html +.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_mask_computation_004.png + :target: ../auto_examples/manipulating_images/plot_mask_computation.html :scale: 50% @@ -245,14 +245,14 @@ In this case, nilearn computes automatically the translation part of the transformation matrix (i.e., affine). -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_affine_transformation_002.png - :target: ../auto_examples/plot_affine_transformation.html +.. image:: ../auto_examples/manipulating_images/images/sphx_glr_plot_affine_transformation_002.png + :target: ../auto_examples/manipulating_images/plot_affine_transformation.html :scale: 33% -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_affine_transformation_004.png - :target: ../auto_examples/plot_affine_transformation.html +.. image:: ../auto_examples/manipulating_images/images/sphx_glr_plot_affine_transformation_004.png + :target: ../auto_examples/manipulating_images/plot_affine_transformation.html :scale: 33% -.. image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_affine_transformation_003.png - :target: ../auto_examples/plot_affine_transformation.html +.. image:: ../auto_examples/manipulating_images/images/sphx_glr_plot_affine_transformation_003.png + :target: ../auto_examples/manipulating_images/plot_affine_transformation.html :scale: 33% @@ -349,9 +349,9 @@ an excerpt of :ref:`the example performing Anova-SVM on the Haxby data .. topic:: **Examples to better understand the NiftiMasker** - * :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_nifti_simple.py` + * :ref:`sphx_glr_auto_examples_manipulating_images_plot_nifti_simple.py` - * :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_mask_computation.py` + * :ref:`sphx_glr_auto_examples_manipulating_images_plot_mask_computation.py` .. _region: diff --git a/doc/manipulating_images/index.rst b/doc/manipulating_images/index.rst new file mode 100644 index 0000000000..3709f44c6c --- /dev/null +++ b/doc/manipulating_images/index.rst @@ -0,0 +1,21 @@ +.. include:: ../tune_toc.rst + + +.. _manipulating_images: + +================================= +Image manipulation +================================= + +In this section, we detail the general tools to manipulation of +brain images with nilearn. + +| + +.. include:: ../includes/big_toc_css.rst + + +.. toctree:: + + data_preparation.rst + manipulating_images.rst diff --git a/doc/manipulating_visualizing/manipulating_images.rst b/doc/manipulating_images/manipulating_images.rst similarity index 86% rename from doc/manipulating_visualizing/manipulating_images.rst rename to doc/manipulating_images/manipulating_images.rst index 7d44db9d41..c63834ed37 100644 --- a/doc/manipulating_visualizing/manipulating_images.rst +++ b/doc/manipulating_images/manipulating_images.rst @@ -143,7 +143,7 @@ Nifti and Analyze files Neuroimaging data can be loaded in a simple way thanks to nibabel_. A Nifti file on disk can be loaded with a single line. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_visualization.py +.. literalinclude:: ../../examples/plotting/plot_visualization.py :start-after: # Fetch data :end-before: # Visualization @@ -221,12 +221,12 @@ If we do not have a spatial mask of the target regions, a brain mask can be easily extracted from the fMRI data by the :func:`nilearn.masking.compute_epi_mask` function: -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_visualization_002.png - :target: ../auto_examples/manipulating_visualizing/plot_visualization.html +.. figure:: ../auto_examples/plotting/images/sphx_glr_plot_visualization_002.png + :target: ../auto_examples/plotting/plot_visualization.html :align: right :scale: 50% -.. literalinclude:: ../../examples/manipulating_visualizing/plot_visualization.py +.. literalinclude:: ../../examples/plotting/plot_visualization.py :start-after: # Extracting a brain mask :end-before: # Applying the mask to extract the corresponding time series @@ -248,12 +248,12 @@ brain. It is thus convenient to apply a brain mask in order to convert the :width: 100% -.. literalinclude:: ../../examples/manipulating_visualizing/plot_visualization.py +.. literalinclude:: ../../examples/plotting/plot_visualization.py :start-after: # Applying the mask to extract the corresponding time series :end-before: # Find voxels of interest -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_visualization_003.png - :target: ../auto_examples/manipulating_visualizing/plot_visualization.html +.. figure:: ../auto_examples/plotting/images/sphx_glr_plot_visualization_003.png + :target: ../auto_examples/plotting/plot_visualization.html :align: center :scale: 50 @@ -271,7 +271,7 @@ set up your own data preparation procedure: .. currentmodule:: nilearn * Resampling: :func:`nilearn.image.resample_img`. See the example - :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_affine_transformation.py` to + :ref:`sphx_glr_auto_examples_manipulating_images_plot_affine_transformation.py` to see the effect of affine transforms on data and bounding boxes. * Computing the mean of images (along the time/4th dimension): :func:`nilearn.image.mean_img` @@ -321,12 +321,12 @@ the three dimensions). Analogous to the majority of nilearn functions, it can also use file names as input parameters. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py +.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py :start-after: # Smooth the data :end-before: # Run a T-test for face and houses -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_001.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html +.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_001.png + :target: ../auto_examples/manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -352,12 +352,12 @@ This test returns p-values that represent probabilities that the two time-series had been drawn from the same distribution. The lower is the p-value, the more discriminative is the voxel in distinguishing the two conditions. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py +.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py :start-after: # Run a T-test for face and houses :end-before: # Build a mask from this statistical map -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_002.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html +.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_002.png + :target: ../auto_examples/manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -373,12 +373,12 @@ Voxels with better p-values are kept as voxels of interest. Applying a threshold to an array is easy thanks to numpy indexing à la Matlab. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py +.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py :start-after: # Thresholding :end-before: # Binarization and intersection with VT mask -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_003.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html +.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_003.png + :target: ../auto_examples/manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -395,12 +395,12 @@ nibabel's **nibabel.load**. We can then use a logical "and" operation that have been selected in both masks. In neuroimaging jargon, this is called an "AND conjunction." -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py +.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py :start-after: # Binarization and intersection with VT mask :end-before: # Dilation -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_004.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html +.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_004.png + :target: ../auto_examples/manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -415,12 +415,12 @@ not to forget voxels located on the edge of a ROI. Put differently, such operations can fill "holes" in masked voxel representations. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py +.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py :start-after: # Dilation :end-before: # Identification of connected components -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_005.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html +.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_005.png + :target: ../auto_examples/manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -432,12 +432,12 @@ identifies immediately neighboring voxels in our voxels mask. It assigns a separate integer label to each one of them. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py +.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py :start-after: # Identification of connected components :end-before: # Use the new ROIs to extract data maps in both ROIs -.. figure:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_roi_extraction_006.png - :target: ../auto_examples/manipulating_visualizing/plot_roi_extraction.html +.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_006.png + :target: ../auto_examples/manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -447,7 +447,7 @@ Saving the result The final voxel mask is saved using nibabel for further inspection with a software such as FSLView. -.. literalinclude:: ../../examples/manipulating_visualizing/plot_roi_extraction.py +.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py :start-after: # save the ROI 'atlas' to a single output Nifti .. _nibabel: http://nipy.sourceforge.net/nibabel/ diff --git a/doc/manipulating_visualizing/index.rst b/doc/manipulating_visualizing/index.rst deleted file mode 100644 index e06891257e..0000000000 --- a/doc/manipulating_visualizing/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. include:: ../tune_toc.rst - - -.. _manipulation_visualization: - -============================================ -Image manipulation and visualization -============================================ - -In this section, we detail the general tools to manipulation and -visualize neuroimaging volume with nilearn. - -| - -.. include:: ../includes/big_toc_css.rst - - -.. toctree:: - - plotting.rst - data_preparation.rst - manipulating_images.rst - diff --git a/doc/plotting/index.rst b/doc/plotting/index.rst new file mode 100644 index 0000000000..b24155030a --- /dev/null +++ b/doc/plotting/index.rst @@ -0,0 +1,20 @@ +.. include:: ../tune_toc.rst + + +.. _plotting: + +========================= +Visualization +========================= + +In this section, we detail the general tools to visualize +neuroimaging volumes with nilearn. + +| + +.. include:: ../includes/big_toc_css.rst + + +.. toctree:: + + plotting.rst diff --git a/doc/manipulating_visualizing/plotting.rst b/doc/plotting/plotting.rst similarity index 68% rename from doc/manipulating_visualizing/plotting.rst rename to doc/plotting/plotting.rst index 09227e452e..31ca3ffdf8 100644 --- a/doc/manipulating_visualizing/plotting.rst +++ b/doc/plotting/plotting.rst @@ -16,32 +16,32 @@ Nilearn has a set of plotting functions to plot brain volumes that are fined tuned to specific applications. Amongst other things, they use different heuristics to find cutting coordinates. -.. |plot_stat_map| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_001.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html +.. |plot_stat_map| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_001.png + :target: ../auto_examples/plotting/plot_demo_plotting.html :scale: 50 -.. |plot_glass_brain| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_glass_brain_extensive_001.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_glass_brain_extensive.html +.. |plot_glass_brain| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_glass_brain_extensive_001.png + :target: ../auto_examples/plotting/plot_demo_glass_brain_extensive.html :scale: 50 .. |plot_connectome| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html :scale: 50 -.. |plot_anat| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_003.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html +.. |plot_anat| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_003.png + :target: ../auto_examples/plotting/plot_demo_plotting.html :scale: 50 -.. |plot_roi| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_004.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html +.. |plot_roi| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_004.png + :target: ../auto_examples/plotting/plot_demo_plotting.html :scale: 50 -.. |plot_epi| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_plotting_005.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_plotting.html +.. |plot_epi| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_005.png + :target: ../auto_examples/plotting/plot_demo_plotting.html :scale: 50 -.. |plot_prob_atlas| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_prob_atlas_003.png - :target: ../auto_examples/manipulating_visualizing/plot_prob_atlas.html +.. |plot_prob_atlas| image:: ../auto_examples/plotting/images/sphx_glr_plot_prob_atlas_003.png + :target: ../auto_examples/plotting/plot_prob_atlas.html :scale: 50 .. A temporary hack to avoid a sphinx bug @@ -105,41 +105,41 @@ different heuristics to find cutting coordinates. .. seealso:: - :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_dim_plotting.py` + :ref:`sphx_glr_auto_examples_plotting_plot_dim_plotting.py` Different display modes ======================== -.. |plot_ortho| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_001.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_ortho| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_001.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_z_many| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_002.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_z_many| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_002.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 30 -.. |plot_x| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_003.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_x| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_003.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_x_small| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_004.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_x_small| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_004.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_z_small| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_005.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_z_small| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_005.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_xz| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_006.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_xz| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_006.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_yx| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_007.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_yx| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_007.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_yz| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_008.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_yz| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_008.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 50 @@ -197,12 +197,12 @@ plot, and has methods to add overlays, contours or edge maps:: display = plotting.plot_epi(...) -.. |plot_edges| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_009.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_edges| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_009.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_contours| image:: ../auto_examples/manipulating_visualizing/images/sphx_glr_plot_demo_more_plotting_010.png - :target: ../auto_examples/manipulating_visualizing/plot_demo_more_plotting.html +.. |plot_contours| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_010.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html :scale: 50 ================= ========================================================= @@ -222,14 +222,14 @@ plot, and has methods to add overlays, contours or edge maps:: 'levels'. This is typically useful to outline a mask, or ROI on top of another map. |hack| - **Example:** :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_haxby_masks.py` + **Example:** :ref:`sphx_glr_auto_examples_plotting_plot_haxby_masks.py` **add_overlay** `display.add_overlay(img, cmap=plotting.cm.purple_green, threshold=3)` |hack| Add a new overlay on the existing figure |hack| - **Example:** :ref:`sphx_glr_auto_examples_manipulating_visualizing_plot_overlay.py` + **Example:** :ref:`sphx_glr_auto_examples_plotting_plot_overlay.py` ================= ========================================================= diff --git a/doc/user_guide.rst b/doc/user_guide.rst index 86a8d7da8a..1cb2be5cce 100644 --- a/doc/user_guide.rst +++ b/doc/user_guide.rst @@ -17,7 +17,8 @@ User guide: table of contents introduction.rst decoding/index.rst connectivity/index.rst - manipulating_visualizing/index.rst + plotting/index.rst + manipulating_images/index.rst building_blocks/index.rst modules/reference.rst diff --git a/examples/advanced/README.txt b/examples/advanced/README.txt new file mode 100644 index 0000000000..4d7571adc7 --- /dev/null +++ b/examples/advanced/README.txt @@ -0,0 +1,2 @@ +Advanced statistical analysis of brain images +--------------------------------------------- diff --git a/examples/manipulating_visualizing/plot_haxby_mass_univariate.py b/examples/advanced/plot_haxby_mass_univariate.py similarity index 100% rename from examples/manipulating_visualizing/plot_haxby_mass_univariate.py rename to examples/advanced/plot_haxby_mass_univariate.py diff --git a/examples/manipulating_visualizing/plot_localizer_mass_univariate_methods.py b/examples/advanced/plot_localizer_mass_univariate_methods.py similarity index 100% rename from examples/manipulating_visualizing/plot_localizer_mass_univariate_methods.py rename to examples/advanced/plot_localizer_mass_univariate_methods.py diff --git a/examples/manipulating_images/README.txt b/examples/manipulating_images/README.txt new file mode 100644 index 0000000000..3e9090f5fa --- /dev/null +++ b/examples/manipulating_images/README.txt @@ -0,0 +1,4 @@ +Manipulating brain image volumes +-------------------------------- + +See :ref:`data_manipulation` for more details. diff --git a/examples/manipulating_visualizing/plot_affine_transformation.py b/examples/manipulating_images/plot_affine_transformation.py similarity index 100% rename from examples/manipulating_visualizing/plot_affine_transformation.py rename to examples/manipulating_images/plot_affine_transformation.py diff --git a/examples/manipulating_visualizing/plot_extract_rois_smith_atlas.py b/examples/manipulating_images/plot_extract_rois_smith_atlas.py similarity index 100% rename from examples/manipulating_visualizing/plot_extract_rois_smith_atlas.py rename to examples/manipulating_images/plot_extract_rois_smith_atlas.py diff --git a/examples/manipulating_visualizing/plot_extract_rois_statistical_maps.py b/examples/manipulating_images/plot_extract_rois_statistical_maps.py similarity index 100% rename from examples/manipulating_visualizing/plot_extract_rois_statistical_maps.py rename to examples/manipulating_images/plot_extract_rois_statistical_maps.py diff --git a/examples/manipulating_visualizing/plot_mask_computation.py b/examples/manipulating_images/plot_mask_computation.py similarity index 100% rename from examples/manipulating_visualizing/plot_mask_computation.py rename to examples/manipulating_images/plot_mask_computation.py diff --git a/examples/manipulating_visualizing/plot_nifti_simple.py b/examples/manipulating_images/plot_nifti_simple.py similarity index 100% rename from examples/manipulating_visualizing/plot_nifti_simple.py rename to examples/manipulating_images/plot_nifti_simple.py diff --git a/examples/manipulating_visualizing/plot_roi_extraction.py b/examples/manipulating_images/plot_roi_extraction.py similarity index 100% rename from examples/manipulating_visualizing/plot_roi_extraction.py rename to examples/manipulating_images/plot_roi_extraction.py diff --git a/examples/manipulating_visualizing/plot_smooth_mean_image.py b/examples/manipulating_images/plot_smooth_mean_image.py similarity index 100% rename from examples/manipulating_visualizing/plot_smooth_mean_image.py rename to examples/manipulating_images/plot_smooth_mean_image.py diff --git a/examples/manipulating_visualizing/README.txt b/examples/manipulating_visualizing/README.txt deleted file mode 100644 index bea909c2b4..0000000000 --- a/examples/manipulating_visualizing/README.txt +++ /dev/null @@ -1,4 +0,0 @@ -Manipulating images and visualization -------------------------------------- - -See :ref:`plotting` and :ref:`data_manipulation` for more details. diff --git a/examples/plotting/README.txt b/examples/plotting/README.txt new file mode 100644 index 0000000000..eb0027b784 --- /dev/null +++ b/examples/plotting/README.txt @@ -0,0 +1,4 @@ +Visualization of brain images +----------------------------- + +See :ref:`plotting` for more details. diff --git a/examples/manipulating_visualizing/plot_atlas.py b/examples/plotting/plot_atlas.py similarity index 100% rename from examples/manipulating_visualizing/plot_atlas.py rename to examples/plotting/plot_atlas.py diff --git a/examples/manipulating_visualizing/plot_demo_glass_brain.py b/examples/plotting/plot_demo_glass_brain.py similarity index 100% rename from examples/manipulating_visualizing/plot_demo_glass_brain.py rename to examples/plotting/plot_demo_glass_brain.py diff --git a/examples/manipulating_visualizing/plot_demo_glass_brain_extensive.py b/examples/plotting/plot_demo_glass_brain_extensive.py similarity index 100% rename from examples/manipulating_visualizing/plot_demo_glass_brain_extensive.py rename to examples/plotting/plot_demo_glass_brain_extensive.py diff --git a/examples/manipulating_visualizing/plot_demo_more_plotting.py b/examples/plotting/plot_demo_more_plotting.py similarity index 100% rename from examples/manipulating_visualizing/plot_demo_more_plotting.py rename to examples/plotting/plot_demo_more_plotting.py diff --git a/examples/manipulating_visualizing/plot_demo_plotting.py b/examples/plotting/plot_demo_plotting.py similarity index 100% rename from examples/manipulating_visualizing/plot_demo_plotting.py rename to examples/plotting/plot_demo_plotting.py diff --git a/examples/manipulating_visualizing/plot_dim_plotting.py b/examples/plotting/plot_dim_plotting.py similarity index 100% rename from examples/manipulating_visualizing/plot_dim_plotting.py rename to examples/plotting/plot_dim_plotting.py diff --git a/examples/manipulating_visualizing/plot_haxby_masks.py b/examples/plotting/plot_haxby_masks.py similarity index 100% rename from examples/manipulating_visualizing/plot_haxby_masks.py rename to examples/plotting/plot_haxby_masks.py diff --git a/examples/manipulating_visualizing/plot_overlay.py b/examples/plotting/plot_overlay.py similarity index 100% rename from examples/manipulating_visualizing/plot_overlay.py rename to examples/plotting/plot_overlay.py diff --git a/examples/manipulating_visualizing/plot_prob_atlas.py b/examples/plotting/plot_prob_atlas.py similarity index 100% rename from examples/manipulating_visualizing/plot_prob_atlas.py rename to examples/plotting/plot_prob_atlas.py diff --git a/examples/manipulating_visualizing/plot_visualization.py b/examples/plotting/plot_visualization.py similarity index 100% rename from examples/manipulating_visualizing/plot_visualization.py rename to examples/plotting/plot_visualization.py From cba0d3a70c9dc7fb70eb350b8fcf25a877d5e6e8 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Fri, 8 Jan 2016 15:04:36 +0100 Subject: [PATCH 0038/1925] Fixing sphinx failure --- doc/plotting/plotting.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/plotting/plotting.rst b/doc/plotting/plotting.rst index 31ca3ffdf8..cfdf97d6a6 100644 --- a/doc/plotting/plotting.rst +++ b/doc/plotting/plotting.rst @@ -1,4 +1,4 @@ -.. _plotting: +.. _plotting_images: ====================== Plotting brain images From 185759b4fa8c99e399575ccb528cb5ccfcd9e0cf Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sun, 10 Jan 2016 18:48:48 +0100 Subject: [PATCH 0039/1925] Moved low level ICA example from connectivity to advanced --- examples/{connectivity => advanced}/plot_ica_resting_state.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{connectivity => advanced}/plot_ica_resting_state.py (100%) diff --git a/examples/connectivity/plot_ica_resting_state.py b/examples/advanced/plot_ica_resting_state.py similarity index 100% rename from examples/connectivity/plot_ica_resting_state.py rename to examples/advanced/plot_ica_resting_state.py From c1d7b4d4a1ae34f90e45f1f4785d8a80f8fc99f6 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sun, 10 Jan 2016 21:01:15 +0100 Subject: [PATCH 0040/1925] Included contents of plotting.rst in index.rst and removed plotting.rst --- doc/plotting/index.rst | 268 ++++++++++++++++++++++++++++++++++++-- doc/plotting/plotting.rst | 267 ------------------------------------- 2 files changed, 259 insertions(+), 276 deletions(-) delete mode 100644 doc/plotting/plotting.rst diff --git a/doc/plotting/index.rst b/doc/plotting/index.rst index b24155030a..956abc8c27 100644 --- a/doc/plotting/index.rst +++ b/doc/plotting/index.rst @@ -1,20 +1,270 @@ -.. include:: ../tune_toc.rst - - .. _plotting: -========================= -Visualization -========================= +====================== +Plotting brain images +====================== In this section, we detail the general tools to visualize neuroimaging volumes with nilearn. +Nilearn comes with plotting function to display brain maps coming from +Nifti-like images, in the :mod:`nilearn.plotting` module. + +.. currentmodule:: nilearn.plotting + +Different plotting functions +============================= + +Nilearn has a set of plotting functions to plot brain volumes that are +fined tuned to specific applications. Amongst other things, they use +different heuristics to find cutting coordinates. + +.. |plot_stat_map| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_001.png + :target: ../auto_examples/plotting/plot_demo_plotting.html + :scale: 50 + +.. |plot_glass_brain| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_glass_brain_extensive_001.png + :target: ../auto_examples/plotting/plot_demo_glass_brain_extensive.html + :scale: 50 + +.. |plot_connectome| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png + :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html + :scale: 50 + +.. |plot_anat| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_003.png + :target: ../auto_examples/plotting/plot_demo_plotting.html + :scale: 50 + +.. |plot_roi| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_004.png + :target: ../auto_examples/plotting/plot_demo_plotting.html + :scale: 50 + +.. |plot_epi| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_005.png + :target: ../auto_examples/plotting/plot_demo_plotting.html + :scale: 50 + +.. |plot_prob_atlas| image:: ../auto_examples/plotting/images/sphx_glr_plot_prob_atlas_003.png + :target: ../auto_examples/plotting/plot_prob_atlas.html + :scale: 50 + +.. A temporary hack to avoid a sphinx bug +.. |hack| raw:: html + +
+ + +=================== ========================================================= +=================== ========================================================= +|plot_anat| :func:`plot_anat` + |hack| + Plotting an anatomical image + +|plot_epi| :func:`plot_epi` + |hack| + Plotting an EPI, or T2* image + +|plot_glass_brain| :func:`plot_glass_brain` + |hack| + Glass brain visualization. By default plots maximum + intensity projection of the absolute values. To plot + positive and negative values set plot_abs parameter to + False. + +|plot_stat_map| :func:`plot_stat_map` + |hack| + Plotting a statistical map, like a T-map, a Z-map, or + an ICA, with an optional background + +|plot_roi| :func:`plot_roi` + |hack| + Plotting ROIs, or a mask, with an optional background + +|plot_connectome| :func:`plot_connectome` + |hack| + Plotting a connectome + +|plot_prob_atlas| :func:`plot_prob_atlas` + |hack| + Plotting 4D probabilistic atlas maps + +**plot_img** :func:`plot_img` + |hack| + General-purpose function, with no specific presets +=================== ========================================================= + + +.. warning:: **Opening too many figures without closing** + + Each call to a plotting function creates a new figure by default. When + used in non-interactive settings, such as a script or a program, these + are not displayed, but still accumulate and eventually lead to slowing + the execution and running out of memory. + + To avoid this, you must close the plot as follow:: + + >>> from nilearn import plotting + >>> display = plotting.plot_stat_map(img) # doctest: +SKIP + >>> display.close() # doctest: +SKIP + +.. seealso:: + + :ref:`sphx_glr_auto_examples_plotting_plot_dim_plotting.py` + +Different display modes +======================== + +.. |plot_ortho| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_001.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_z_many| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_002.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 30 + +.. |plot_x| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_003.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_x_small| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_004.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_z_small| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_005.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_xz| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_006.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_yx| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_007.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_yz| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_008.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 50 + + +================= ========================================================= +================= ========================================================= +|plot_ortho| `display_mode='ortho', cut_coords=(36, -27, 60)` + |hack| + Ortho slicer: 3 cuts along the x, y, z directions + +|plot_z_many| `display_mode='z', cut_coords=5` + |hack| + Cutting in the z direction, specifying the number of + cuts + +|plot_x| `display_mode='x', cut_coords=(-36, 36)` + |hack| + Cutting in the x direction, specifying the exact + cuts + +|plot_x_small| `display_mode='x', cut_coords=1` + |hack| + Cutting in the x direction, with only 1 cut, that is + automatically positionned + +|plot_z_small| `display_mode='z', cut_coords=1, colorbar=False` + |hack| + Cutting in the z direction, with only 1 cut, that is + automatically positionned + +|plot_xz| `display_mode='xz', cut_coords=(36, 60)` + |hack| + Cutting in the x and z direction, with cuts manually + positionned + +|plot_yx| `display_mode='yx', cut_coords=(-27, 36)` + |hack| + Cutting in the y and x direction, with cuts manually + positionned + +|plot_yz| `display_mode='yz', cut_coords=(-27, 60)` + |hack| + Cutting in the y and z direction, with cuts manually + positionned + + +================= ========================================================= + +Adding overlays, edges and contours +==================================== + +To add overlays, contours, or edges, use the return value of the plotting +functions. Indeed, these return a display object, such as the +:class:`nilearn.plotting.displays.OrthoSlicer`. This object represents the +plot, and has methods to add overlays, contours or edge maps:: + + display = plotting.plot_epi(...) + +.. |plot_edges| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_009.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 50 + +.. |plot_contours| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_010.png + :target: ../auto_examples/plotting/plot_demo_more_plotting.html + :scale: 50 + +================= ========================================================= +================= ========================================================= +|plot_edges| `display.add_edges(img)` + |hack| + Add a plot of the edges of `img`, where edges are + extracted using a Canny edge-detection routine. This + is typically useful to check registration. Note that + `img` should have some visible sharp edges. Typically + an EPI img does not, but a T1 does. + +|plot_contours| `display.add_contours(img, levels=[.5], colors='r')` + |hack| + Add a plot of the contours of `img`, where contours + are computed for constant values, specified in + 'levels'. This is typically useful to outline a mask, + or ROI on top of another map. + |hack| + **Example:** :ref:`sphx_glr_auto_examples_plotting_plot_haxby_masks.py` + + +**add_overlay** `display.add_overlay(img, cmap=plotting.cm.purple_green, threshold=3)` + |hack| + Add a new overlay on the existing figure + |hack| + **Example:** :ref:`sphx_glr_auto_examples_plotting_plot_overlay.py` + + +================= ========================================================= + +Displaying or saving to an image file +===================================== + +To display the figure when running a script, you need to call +:func:`nilearn.plotting.show`: (this is just an alias to +:func:`matplotlib.pyplot.show`):: + + >>> from nilearn import plotting + >>> plotting.show() # doctest: +SKIP + +The simplest way to output an image file from the plotting functions is +to specify the `output_file` argument:: + + >>> from nilearn import plotting + >>> plotting.plot_stat_map(img, output_file='pretty_brain.png') # doctest: +SKIP + +In this case, the display is closed automatically and the plotting +function returns None. + | -.. include:: ../includes/big_toc_css.rst +The display object returned by the plotting function has a savefig method +that can be used to save the plot to an image file:: + >>> from nilearn import plotting + >>> display = plotting.plot_stat_map(img) # doctest: +SKIP + >>> display.savefig('pretty_brain.png') # doctest: +SKIP + # Don't forget to close the display + >>> display.close() # doctest: +SKIP -.. toctree:: - plotting.rst diff --git a/doc/plotting/plotting.rst b/doc/plotting/plotting.rst deleted file mode 100644 index cfdf97d6a6..0000000000 --- a/doc/plotting/plotting.rst +++ /dev/null @@ -1,267 +0,0 @@ -.. _plotting_images: - -====================== -Plotting brain images -====================== - -Nilearn comes with plotting function to display brain maps coming from -Nifti-like images, in the :mod:`nilearn.plotting` module. - -.. currentmodule:: nilearn.plotting - -Different plotting functions -============================= - -Nilearn has a set of plotting functions to plot brain volumes that are -fined tuned to specific applications. Amongst other things, they use -different heuristics to find cutting coordinates. - -.. |plot_stat_map| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_001.png - :target: ../auto_examples/plotting/plot_demo_plotting.html - :scale: 50 - -.. |plot_glass_brain| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_glass_brain_extensive_001.png - :target: ../auto_examples/plotting/plot_demo_glass_brain_extensive.html - :scale: 50 - -.. |plot_connectome| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html - :scale: 50 - -.. |plot_anat| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_003.png - :target: ../auto_examples/plotting/plot_demo_plotting.html - :scale: 50 - -.. |plot_roi| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_004.png - :target: ../auto_examples/plotting/plot_demo_plotting.html - :scale: 50 - -.. |plot_epi| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_005.png - :target: ../auto_examples/plotting/plot_demo_plotting.html - :scale: 50 - -.. |plot_prob_atlas| image:: ../auto_examples/plotting/images/sphx_glr_plot_prob_atlas_003.png - :target: ../auto_examples/plotting/plot_prob_atlas.html - :scale: 50 - -.. A temporary hack to avoid a sphinx bug -.. |hack| raw:: html - -
- - -=================== ========================================================= -=================== ========================================================= -|plot_anat| :func:`plot_anat` - |hack| - Plotting an anatomical image - -|plot_epi| :func:`plot_epi` - |hack| - Plotting an EPI, or T2* image - -|plot_glass_brain| :func:`plot_glass_brain` - |hack| - Glass brain visualization. By default plots maximum - intensity projection of the absolute values. To plot - positive and negative values set plot_abs parameter to - False. - -|plot_stat_map| :func:`plot_stat_map` - |hack| - Plotting a statistical map, like a T-map, a Z-map, or - an ICA, with an optional background - -|plot_roi| :func:`plot_roi` - |hack| - Plotting ROIs, or a mask, with an optional background - -|plot_connectome| :func:`plot_connectome` - |hack| - Plotting a connectome - -|plot_prob_atlas| :func:`plot_prob_atlas` - |hack| - Plotting 4D probabilistic atlas maps - -**plot_img** :func:`plot_img` - |hack| - General-purpose function, with no specific presets -=================== ========================================================= - - -.. warning:: **Opening too many figures without closing** - - Each call to a plotting function creates a new figure by default. When - used in non-interactive settings, such as a script or a program, these - are not displayed, but still accumulate and eventually lead to slowing - the execution and running out of memory. - - To avoid this, you must close the plot as follow:: - - >>> from nilearn import plotting - >>> display = plotting.plot_stat_map(img) # doctest: +SKIP - >>> display.close() # doctest: +SKIP - -.. seealso:: - - :ref:`sphx_glr_auto_examples_plotting_plot_dim_plotting.py` - -Different display modes -======================== - -.. |plot_ortho| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_001.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_z_many| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_002.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 30 - -.. |plot_x| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_003.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_x_small| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_004.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_z_small| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_005.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_xz| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_006.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_yx| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_007.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_yz| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_008.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 50 - - -================= ========================================================= -================= ========================================================= -|plot_ortho| `display_mode='ortho', cut_coords=(36, -27, 60)` - |hack| - Ortho slicer: 3 cuts along the x, y, z directions - -|plot_z_many| `display_mode='z', cut_coords=5` - |hack| - Cutting in the z direction, specifying the number of - cuts - -|plot_x| `display_mode='x', cut_coords=(-36, 36)` - |hack| - Cutting in the x direction, specifying the exact - cuts - -|plot_x_small| `display_mode='x', cut_coords=1` - |hack| - Cutting in the x direction, with only 1 cut, that is - automatically positionned - -|plot_z_small| `display_mode='z', cut_coords=1, colorbar=False` - |hack| - Cutting in the z direction, with only 1 cut, that is - automatically positionned - -|plot_xz| `display_mode='xz', cut_coords=(36, 60)` - |hack| - Cutting in the x and z direction, with cuts manually - positionned - -|plot_yx| `display_mode='yx', cut_coords=(-27, 36)` - |hack| - Cutting in the y and x direction, with cuts manually - positionned - -|plot_yz| `display_mode='yz', cut_coords=(-27, 60)` - |hack| - Cutting in the y and z direction, with cuts manually - positionned - - -================= ========================================================= - -Adding overlays, edges and contours -==================================== - -To add overlays, contours, or edges, use the return value of the plotting -functions. Indeed, these return a display object, such as the -:class:`nilearn.plotting.displays.OrthoSlicer`. This object represents the -plot, and has methods to add overlays, contours or edge maps:: - - display = plotting.plot_epi(...) - -.. |plot_edges| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_009.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 50 - -.. |plot_contours| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_010.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html - :scale: 50 - -================= ========================================================= -================= ========================================================= -|plot_edges| `display.add_edges(img)` - |hack| - Add a plot of the edges of `img`, where edges are - extracted using a Canny edge-detection routine. This - is typically useful to check registration. Note that - `img` should have some visible sharp edges. Typically - an EPI img does not, but a T1 does. - -|plot_contours| `display.add_contours(img, levels=[.5], colors='r')` - |hack| - Add a plot of the contours of `img`, where contours - are computed for constant values, specified in - 'levels'. This is typically useful to outline a mask, - or ROI on top of another map. - |hack| - **Example:** :ref:`sphx_glr_auto_examples_plotting_plot_haxby_masks.py` - - -**add_overlay** `display.add_overlay(img, cmap=plotting.cm.purple_green, threshold=3)` - |hack| - Add a new overlay on the existing figure - |hack| - **Example:** :ref:`sphx_glr_auto_examples_plotting_plot_overlay.py` - - -================= ========================================================= - -Displaying or saving to an image file -===================================== - -To display the figure when running a script, you need to call -:func:`nilearn.plotting.show`: (this is just an alias to -:func:`matplotlib.pyplot.show`):: - - >>> from nilearn import plotting - >>> plotting.show() # doctest: +SKIP - -The simplest way to output an image file from the plotting functions is -to specify the `output_file` argument:: - - >>> from nilearn import plotting - >>> plotting.plot_stat_map(img, output_file='pretty_brain.png') # doctest: +SKIP - -In this case, the display is closed automatically and the plotting -function returns None. - -| - -The display object returned by the plotting function has a savefig method -that can be used to save the plot to an image file:: - - >>> from nilearn import plotting - >>> display = plotting.plot_stat_map(img) # doctest: +SKIP - >>> display.savefig('pretty_brain.png') # doctest: +SKIP - # Don't forget to close the display - >>> display.close() # doctest: +SKIP - - From 3c7b1f8a818c35eef8a6c4736893cc381e486613 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 11 Jan 2016 13:18:36 +0100 Subject: [PATCH 0041/1925] Changed format from foobar to 01_foobar --- doc/{plotting => 01_plotting}/index.rst | 74 +++++++++---------- .../decoding_simulated.rst | 26 +++---- .../decoding_tutorial.rst | 36 ++++----- .../estimator_choice.rst | 58 +++++++-------- doc/{decoding => 02_decoding}/index.rst | 0 doc/{decoding => 02_decoding}/searchlight.rst | 28 +++---- doc/{decoding => 02_decoding}/space_net.rst | 14 ++-- .../connectome_extraction.rst | 28 +++---- .../functional_connectomes.rst | 34 ++++----- .../index.rst | 0 .../parcellating.rst | 26 +++---- .../region_extraction.rst | 42 +++++------ .../resting_state_networks.rst | 42 +++++------ .../data_preparation.rst | 50 ++++++------- .../index.rst | 4 +- .../manipulating_images.rst | 54 +++++++------- doc/index.rst | 28 +++---- doc/introduction.rst | 8 +- doc/user_guide.rst | 8 +- 19 files changed, 280 insertions(+), 280 deletions(-) rename doc/{plotting => 01_plotting}/index.rst (70%) rename doc/{decoding => 02_decoding}/decoding_simulated.rst (73%) rename doc/{decoding => 02_decoding}/decoding_tutorial.rst (91%) rename doc/{decoding => 02_decoding}/estimator_choice.rst (71%) rename doc/{decoding => 02_decoding}/index.rst (100%) rename doc/{decoding => 02_decoding}/searchlight.rst (90%) rename doc/{decoding => 02_decoding}/space_net.rst (85%) rename doc/{connectivity => 03_connectivity}/connectome_extraction.rst (88%) rename doc/{connectivity => 03_connectivity}/functional_connectomes.rst (84%) rename doc/{connectivity => 03_connectivity}/index.rst (100%) rename doc/{connectivity => 03_connectivity}/parcellating.rst (81%) rename doc/{connectivity => 03_connectivity}/region_extraction.rst (77%) rename doc/{connectivity => 03_connectivity}/resting_state_networks.rst (69%) rename doc/{manipulating_images => 04_manipulating_images}/data_preparation.rst (88%) rename doc/{manipulating_images => 04_manipulating_images}/index.rst (74%) rename doc/{manipulating_images => 04_manipulating_images}/manipulating_images.rst (86%) diff --git a/doc/plotting/index.rst b/doc/01_plotting/index.rst similarity index 70% rename from doc/plotting/index.rst rename to doc/01_plotting/index.rst index 956abc8c27..631f9c391e 100644 --- a/doc/plotting/index.rst +++ b/doc/01_plotting/index.rst @@ -19,32 +19,32 @@ Nilearn has a set of plotting functions to plot brain volumes that are fined tuned to specific applications. Amongst other things, they use different heuristics to find cutting coordinates. -.. |plot_stat_map| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_001.png - :target: ../auto_examples/plotting/plot_demo_plotting.html +.. |plot_stat_map| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_001.png + :target: ../auto_examples/01_plotting/plot_demo_plotting.html :scale: 50 -.. |plot_glass_brain| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_glass_brain_extensive_001.png - :target: ../auto_examples/plotting/plot_demo_glass_brain_extensive.html +.. |plot_glass_brain| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_extensive_001.png + :target: ../auto_examples/01_plotting/plot_demo_glass_brain_extensive.html :scale: 50 -.. |plot_connectome| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |plot_connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png + :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html :scale: 50 -.. |plot_anat| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_003.png - :target: ../auto_examples/plotting/plot_demo_plotting.html +.. |plot_anat| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_003.png + :target: ../auto_examples/01_plotting/plot_demo_plotting.html :scale: 50 -.. |plot_roi| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_004.png - :target: ../auto_examples/plotting/plot_demo_plotting.html +.. |plot_roi| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_004.png + :target: ../auto_examples/01_plotting/plot_demo_plotting.html :scale: 50 -.. |plot_epi| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_plotting_005.png - :target: ../auto_examples/plotting/plot_demo_plotting.html +.. |plot_epi| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_plotting_005.png + :target: ../auto_examples/01_plotting/plot_demo_plotting.html :scale: 50 -.. |plot_prob_atlas| image:: ../auto_examples/plotting/images/sphx_glr_plot_prob_atlas_003.png - :target: ../auto_examples/plotting/plot_prob_atlas.html +.. |plot_prob_atlas| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_prob_atlas_003.png + :target: ../auto_examples/01_plotting/plot_prob_atlas.html :scale: 50 .. A temporary hack to avoid a sphinx bug @@ -108,41 +108,41 @@ different heuristics to find cutting coordinates. .. seealso:: - :ref:`sphx_glr_auto_examples_plotting_plot_dim_plotting.py` + :ref:`sphx_glr_auto_examples_01_plotting_plot_dim_plotting.py` Different display modes ======================== -.. |plot_ortho| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_001.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_ortho| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_001.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_z_many| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_002.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_z_many| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_002.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 30 -.. |plot_x| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_003.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_x| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_003.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_x_small| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_004.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_x_small| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_004.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_z_small| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_005.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_z_small| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_005.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_xz| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_006.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_xz| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_006.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_yx| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_007.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_yx| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_007.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_yz| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_008.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_yz| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_008.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 @@ -200,12 +200,12 @@ plot, and has methods to add overlays, contours or edge maps:: display = plotting.plot_epi(...) -.. |plot_edges| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_009.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_edges| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_009.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 -.. |plot_contours| image:: ../auto_examples/plotting/images/sphx_glr_plot_demo_more_plotting_010.png - :target: ../auto_examples/plotting/plot_demo_more_plotting.html +.. |plot_contours| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_010.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 ================= ========================================================= @@ -225,14 +225,14 @@ plot, and has methods to add overlays, contours or edge maps:: 'levels'. This is typically useful to outline a mask, or ROI on top of another map. |hack| - **Example:** :ref:`sphx_glr_auto_examples_plotting_plot_haxby_masks.py` + **Example:** :ref:`sphx_glr_auto_examples_01_plotting_plot_haxby_masks.py` **add_overlay** `display.add_overlay(img, cmap=plotting.cm.purple_green, threshold=3)` |hack| Add a new overlay on the existing figure |hack| - **Example:** :ref:`sphx_glr_auto_examples_plotting_plot_overlay.py` + **Example:** :ref:`sphx_glr_auto_examples_01_plotting_plot_overlay.py` ================= ========================================================= diff --git a/doc/decoding/decoding_simulated.rst b/doc/02_decoding/decoding_simulated.rst similarity index 73% rename from doc/decoding/decoding_simulated.rst rename to doc/02_decoding/decoding_simulated.rst index 3377824fcd..89e37ac024 100644 --- a/doc/decoding/decoding_simulated.rst +++ b/doc/02_decoding/decoding_simulated.rst @@ -24,8 +24,8 @@ a linear model with a random design matrix **X**: brain regions. Here, in the simulations, they form a 3D image with 5, four of which in opposite corners and one in the middle. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_001.png - :target: auto_examples/decoding/plot_simulated_data.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_simulated_data_001.png + :target: auto_examples/02_decoding/plot_simulated_data.html :align: center :scale: 90 @@ -36,7 +36,7 @@ a linear model with a random design matrix **X**: * **e** is random normal noise. We provide a black-box function to create the data in the -:ref:`example script `. +:ref:`example script `. Running various estimators @@ -65,24 +65,24 @@ a for loop: they all have a `fit` method for fitting the data, a `score` method to retrieve the prediction score, and because they are all linear models, a `coef_` attribute that stores the coefficients **w** estimated (see the :ref:`code of the simulation -`). +`). .. note:: All parameters estimated from the data end with an underscore -.. |estimator1| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_002.png - :target: ../auto_examples/decoding/plot_simulated_data.html +.. |estimator1| image:: ../auto_examples/02_decoding/images/sphx_glr_plot_simulated_data_002.png + :target: ../auto_examples/02_decoding/plot_simulated_data.html :scale: 60 -.. |estimator2| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_003.png - :target: ../auto_examples/decoding/plot_simulated_data.html +.. |estimator2| image:: ../auto_examples/02_decoding/images/sphx_glr_plot_simulated_data_003.png + :target: ../auto_examples/02_decoding/plot_simulated_data.html :scale: 60 -.. |estimator3| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_004.png - :target: ../auto_examples/decoding/plot_simulated_data.html +.. |estimator3| image:: ../auto_examples/02_decoding/images/sphx_glr_plot_simulated_data_004.png + :target: ../auto_examples/02_decoding/plot_simulated_data.html :scale: 60 -.. |estimator4| image:: ../auto_examples/decoding/images/sphx_glr_plot_simulated_data_005.png - :target: ../auto_examples/decoding/plot_simulated_data.html +.. |estimator4| image:: ../auto_examples/02_decoding/images/sphx_glr_plot_simulated_data_005.png + :target: ../auto_examples/02_decoding/plot_simulated_data.html :scale: 60 |estimator1| |estimator2| |estimator3| |estimator4| @@ -103,7 +103,7 @@ models, a `coef_` attribute that stores the coefficients **w** estimated .. topic:: **Source code to run the simulation** The full file to run the simulation can be found in - :ref:`sphx_glr_auto_examples_decoding_plot_simulated_data.py` + :ref:`sphx_glr_auto_examples_02_decoding_plot_simulated_data.py` .. seealso:: diff --git a/doc/decoding/decoding_tutorial.rst b/doc/02_decoding/decoding_tutorial.rst similarity index 91% rename from doc/decoding/decoding_tutorial.rst rename to doc/02_decoding/decoding_tutorial.rst index 51edb95218..7671e4e1b0 100644 --- a/doc/decoding/decoding_tutorial.rst +++ b/doc/02_decoding/decoding_tutorial.rst @@ -36,29 +36,29 @@ activity recorded in masks of the ventral stream. Significant prediction shows that the signal in the region contains information on the corresponding category. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_stimuli_004.png - :target: ../auto_examples/decoding/plot_haxby_stimuli.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_stimuli_004.png + :target: ../auto_examples/02_decoding/plot_haxby_stimuli.html :scale: 30 :align: left Face stimuli -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_stimuli_002.png - :target: ../auto_examples/decoding/plot_haxby_stimuli.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_stimuli_002.png + :target: ../auto_examples/02_decoding/plot_haxby_stimuli.html :scale: 30 :align: left Cat stimuli -.. figure:: ../auto_examples/plotting/images/sphx_glr_plot_haxby_masks_001.png - :target: ../auto_examples/plotting/plot_haxby_masks.html +.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_haxby_masks_001.png + :target: ../auto_examples/01_plotting/plot_haxby_masks.html :scale: 30 :align: left Masks -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_full_analysis_001.png - :target: ../auto_examples/decoding/plot_haxby_full_analysis.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_full_analysis_001.png + :target: ../auto_examples/02_decoding/plot_haxby_full_analysis.html :scale: 35 :align: left @@ -320,7 +320,7 @@ permutation testing on the labels, with .. topic:: **Putting it all together** The :ref:`ROI-based decoding example - ` does a decoding analysis per + ` does a decoding analysis per mask, giving the f1-score of the prediction for each object. It uses all the notions presented above, with ``for`` loop to iterate @@ -328,16 +328,16 @@ permutation testing on the labels, with scores. -.. figure:: ../auto_examples/plotting/images/sphx_glr_plot_haxby_masks_001.png - :target: ../auto_examples/plotting/plot_haxby_masks.html +.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_haxby_masks_001.png + :target: ../auto_examples/01_plotting/plot_haxby_masks.html :scale: 55 :align: left Masks -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_full_analysis_001.png - :target: ../auto_examples/decoding/plot_haxby_full_analysis.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_full_analysis_001.png + :target: ../auto_examples/02_decoding/plot_haxby_full_analysis.html :scale: 70 :align: left @@ -387,7 +387,7 @@ based feature selection (a.k.a. that we will put before the SVC in a `pipeline` (:class:`sklearn.pipeline.Pipeline`): -.. literalinclude:: ../../examples/decoding/plot_haxby_anova_svm.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py :start-after: # Build the decoder :end-before: # Visualize the results @@ -406,12 +406,12 @@ To visualize the results, we need to: - then, as before, inverse the masking process to retrieve the weights and plot them. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_anova_svm_001.png - :target: ../auto_examples/decoding/plot_haxby_anova_svm.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_anova_svm_001.png + :target: ../auto_examples/02_decoding/plot_haxby_anova_svm.html :align: right :scale: 65 -.. literalinclude:: ../../examples/decoding/plot_haxby_anova_svm.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py :start-after: # Visualize the results :end-before: # Obtain prediction scores via cross validation @@ -423,7 +423,7 @@ To visualize the results, we need to: .. topic:: **Final script** The complete script to do an SVM-Anova analysis can be found as - :ref:`an example `. + :ref:`an example `. .. seealso:: diff --git a/doc/decoding/estimator_choice.rst b/doc/02_decoding/estimator_choice.rst similarity index 71% rename from doc/decoding/estimator_choice.rst rename to doc/02_decoding/estimator_choice.rst index 23181b794b..853871c489 100644 --- a/doc/decoding/estimator_choice.rst +++ b/doc/02_decoding/estimator_choice.rst @@ -81,18 +81,18 @@ whereas the former is linear with the number of classes. :func:`sklearn.metrics.confusion_matrix` is a useful tool to understand the classifier's errors in a multiclass problem. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_multiclass_001.png - :target: ../auto_examples/decoding/plot_haxby_multiclass.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_multiclass_001.png + :target: ../auto_examples/02_decoding/plot_haxby_multiclass.html :align: left :scale: 60 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_multiclass_002.png - :target: ../auto_examples/decoding/plot_haxby_multiclass.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_multiclass_002.png + :target: ../auto_examples/02_decoding/plot_haxby_multiclass.html :align: left :scale: 40 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_multiclass_003.png - :target: ../auto_examples/decoding/plot_haxby_multiclass.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_multiclass_003.png + :target: ../auto_examples/02_decoding/plot_haxby_multiclass.html :align: left :scale: 40 @@ -109,8 +109,8 @@ will have bumps and peaks due to this noise. These will not generalize to new data and chances are that the corresponding choice of parameter will not perform as well on new data. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_grid_search_001.png - :target: ../auto_examples/decoding/plot_haxby_grid_search.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_grid_search_001.png + :target: ../auto_examples/02_decoding/plot_haxby_grid_search.html :align: center :scale: 60 @@ -125,7 +125,7 @@ CPUs. * `The scikit-learn documentation on parameter selection `_ - * The example :ref:`sphx_glr_auto_examples_decoding_plot_haxby_grid_search.py` + * The example :ref:`sphx_glr_auto_examples_02_decoding_plot_haxby_grid_search.py` Different linear models ======================= @@ -163,8 +163,8 @@ Here we apply a few linear models to fMRI data: in every situation. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_001.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_001.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: center :scale: 80 @@ -181,48 +181,48 @@ the other, although the prediction scores are fairly similar. In other terms, a well-performing estimator in terms of prediction error gives us little guarantee on the brain maps. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_007.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_007.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: left :scale: 70 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_008.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_008.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: left :scale: 70 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_005.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_005.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: left :scale: 70 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_006.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_006.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: left :scale: 70 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_004.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_004.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: left :scale: 70 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_002.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_002.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: left :scale: 70 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_003.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_003.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: left :scale: 70 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_009.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_009.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: left :scale: 70 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_different_estimators_010.png - :target: ../auto_examples/decoding/plot_haxby_different_estimators.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_different_estimators_010.png + :target: ../auto_examples/02_decoding/plot_haxby_different_estimators.html :align: left :scale: 70 diff --git a/doc/decoding/index.rst b/doc/02_decoding/index.rst similarity index 100% rename from doc/decoding/index.rst rename to doc/02_decoding/index.rst diff --git a/doc/decoding/searchlight.rst b/doc/02_decoding/searchlight.rst similarity index 90% rename from doc/decoding/searchlight.rst rename to doc/02_decoding/searchlight.rst index 438241312b..3d2d255201 100644 --- a/doc/decoding/searchlight.rst +++ b/doc/02_decoding/searchlight.rst @@ -25,7 +25,7 @@ Loading Fetching the data from internet and loading it can be done with the provided functions (see :ref:`loading_data`): -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # Load Haxby dataset :end-before: # Restrict to faces and houses @@ -39,7 +39,7 @@ For this example we need: - limit our analysis to the `face` and `house` conditions (like in the :ref:`decoding tutorial `) -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # Restrict to faces and houses :end-before: # Prepare masks @@ -62,7 +62,7 @@ be used here : back of the brain. *mask_img* will ensure that no value outside the brain is taken into account when iterating with the sphere. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # brain to speed up computation) :end-before: # Searchlight computation @@ -99,7 +99,7 @@ validation method that does not take too much time. *K*-Fold along with *K* = 4 is a good compromise between running time and quality. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # set once and the others as learning sets :end-before: import nilearn.decoding @@ -112,7 +112,7 @@ parameter left is the radius of the ball that will run through the data. Kriegskorte et al. use a 4mm radius because it yielded the best detection performance in their simulation. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: import nilearn.decoding :end-before: # F-scores computation @@ -127,12 +127,12 @@ background. We can see here that voxels in the visual cortex contains information to distinguish pictures showed to the volunteers, which was the expected result. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # Visualization :end-before: # F_score results -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_searchlight_001.png - :target: ../auto_examples/decoding/plot_haxby_searchlight.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_searchlight_001.png + :target: ../auto_examples/02_decoding/plot_haxby_searchlight.html :align: center :scale: 80 @@ -149,11 +149,11 @@ parametric tests (F-tests ot t-tests). Here we compute the *p-values* of the voxels [1]_. To display the results, we use the negative log of the p-value. -.. literalinclude:: ../../examples/decoding/plot_haxby_searchlight.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py :start-after: # F_score results -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_searchlight_002.png - :target: ../auto_examples/decoding/plot_haxby_searchlight.html +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_searchlight_002.png + :target: ../auto_examples/02_decoding/plot_haxby_searchlight.html :align: center :scale: 80 @@ -185,7 +185,7 @@ is its associated p-value. The :func:`nilearn.mass_univariate.permuted_ols` function returns the p-values computed with a permutation test. -.. literalinclude:: ../../examples/advanced/plot_haxby_mass_univariate.py +.. literalinclude:: ../../examples/05_advanced/plot_haxby_mass_univariate.py :start-after: # Perform massively univariate analysis with permuted OLS :end-before: neg_log_pvals_unmasked @@ -206,8 +206,8 @@ every voxel so that the F-statistics are comparable. This correction strategy is applied in nilearn :func:`nilearn.mass_univariate.permuted_ols` function. -.. figure:: ../auto_examples/advanced/images/sphx_glr_plot_haxby_mass_univariate_001.png - :target: ../auto_examples/advanced/plot_haxby_mass_univariate.html +.. figure:: ../auto_examples/05_advanced/images/sphx_glr_plot_haxby_mass_univariate_001.png + :target: ../auto_examples/05_advanced/plot_haxby_mass_univariate.html :align: center :scale: 60 diff --git a/doc/decoding/space_net.rst b/doc/02_decoding/space_net.rst similarity index 85% rename from doc/decoding/space_net.rst rename to doc/02_decoding/space_net.rst index 5f6e7a3660..cfcbf758f2 100644 --- a/doc/decoding/space_net.rst +++ b/doc/02_decoding/space_net.rst @@ -66,37 +66,37 @@ technical details regarding the implementation of SpaceNet. Mixed gambles ............. -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_mixed_gambles_space_net_001.png +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_mixed_gambles_space_net_001.png :align: right :scale: 60 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_mixed_gambles_space_net_002.png +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_mixed_gambles_space_net_002.png :scale: 60 .. topic:: **Code** The complete script can be found - :ref:`here `. + :ref:`here `. Haxby ..... -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_space_net_001.png +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_space_net_001.png :align: right :scale: 60 -.. figure:: ../auto_examples/decoding/images/sphx_glr_plot_haxby_space_net_002.png +.. figure:: ../auto_examples/02_decoding/images/sphx_glr_plot_haxby_space_net_002.png :scale: 60 .. topic:: **Code** The complete script can be found - :ref:`here `. + :ref:`here `. .. seealso:: - * :ref:`Age prediction on OASIS dataset with SpaceNet `. + * :ref:`Age prediction on OASIS dataset with SpaceNet `. * The `scikit-learn documentation `_ has very detailed explanations on a large variety of estimators and diff --git a/doc/connectivity/connectome_extraction.rst b/doc/03_connectivity/connectome_extraction.rst similarity index 88% rename from doc/connectivity/connectome_extraction.rst rename to doc/03_connectivity/connectome_extraction.rst index 1c79848eee..61a32f1836 100644 --- a/doc/connectivity/connectome_extraction.rst +++ b/doc/03_connectivity/connectome_extraction.rst @@ -68,19 +68,19 @@ of the estimator:: >>> estimator.precision_ # doctest: +SKIP -.. |covariance| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_001.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |covariance| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_001.png + :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html :scale: 40 -.. |precision| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_003.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |precision| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_003.png + :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html :scale: 40 -.. |covariance_graph| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |covariance_graph| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_002.png + :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html :scale: 55 -.. |precision_graph| image:: ../auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png - :target: ../auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |precision_graph| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png + :target: ../auto_examples/03_connectivity/plot_inverse_covariance_connectome.html :scale: 55 .. centered:: |covariance| |precision| @@ -99,7 +99,7 @@ of the estimator:: .. topic:: **Full example** See the following example for a full file running the analysis: - :ref:`sphx_glr_auto_examples_connectivity_plot_inverse_covariance_connectome.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_inverse_covariance_connectome.py` .. topic:: **Exercise: computing sparse inverse covariance** :class: green @@ -157,7 +157,7 @@ group analysis only on the non zero coefficients. .. topic:: **Full example** See the following example for a full file running the analysis: - :ref:`sphx_glr_auto_examples_connectivity_plot_multi_subject_connectome.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py` .. topic:: **Exercise: computing the correlation matrix of rest fmri** @@ -196,8 +196,8 @@ Finally, we use the The results are the following: -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_simulated_connectome_001.png - :target: ../auto_examples/connectivity/plot_simulated_connectome.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_simulated_connectome_001.png + :target: ../auto_examples/03_connectivity/plot_simulated_connectome.html :scale: 60 The group-sparse estimation outputs matrices with @@ -211,7 +211,7 @@ information. .. topic:: **Full Example** The complete source code for this example can be found here: - :ref:`sphx_glr_auto_examples_connectivity_plot_simulated_connectome.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_simulated_connectome.py` .. [#] A lot of technical details on the algorithm used for group-sparse @@ -250,7 +250,7 @@ Deviations from this mean in the tangent space are provided in the connectivitie .. topic:: **Full example** See the following example for a full file running the analysis: - :ref:`sphx_glr_auto_examples_connectivity_plot_connectivity_measures.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_connectivity_measures.py` .. topic:: **Exercise: computing connectivity in tangent space** :class: green diff --git a/doc/connectivity/functional_connectomes.rst b/doc/03_connectivity/functional_connectomes.rst similarity index 84% rename from doc/connectivity/functional_connectomes.rst rename to doc/03_connectivity/functional_connectomes.rst index 998d18f0ea..e70b80f45d 100644 --- a/doc/connectivity/functional_connectomes.rst +++ b/doc/03_connectivity/functional_connectomes.rst @@ -50,8 +50,8 @@ Plotting can then be done as:: from nilearn import plotting plotting.plot_roi(atlas_filename) -.. image:: ../auto_examples/plotting/images/sphx_glr_plot_atlas_001.png - :target: ../auto_examples/plotting/plot_atlas.html +.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_atlas_001.png + :target: ../auto_examples/01_plotting/plot_atlas.html :scale: 60 .. seealso:: @@ -87,17 +87,17 @@ obtain time series that capture well the functional interactions between regions, regressing out noise sources is indeed very important `[Varoquaux & Craddock 2013] `_. -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_signal_extraction_001.png - :target: ../auto_examples/connectivity/plot_signal_extraction.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_signal_extraction_001.png + :target: ../auto_examples/03_connectivity/plot_signal_extraction.html :scale: 40 -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_signal_extraction_002.png - :target: ../auto_examples/connectivity/plot_signal_extraction.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_signal_extraction_002.png + :target: ../auto_examples/03_connectivity/plot_signal_extraction.html :scale: 40 .. topic:: **Full example** See the following example for a full file running the analysis: - :ref:`sphx_glr_auto_examples_connectivity_plot_signal_extraction.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_signal_extraction.py` .. topic:: **Exercise: computing the correlation matrix of rest fmri** @@ -136,10 +136,10 @@ atlas well suited to resting-state data analysis is the `MSDL atlas Probabilistic atlases are represented as a set of continuous maps, in a 4D nifti image. Visualization the atlas thus requires to visualize each of these maps, which requires accessing them with -:func:`nilearn.image.index_img` (see the :ref:`corresponding example `). +:func:`nilearn.image.index_img` (see the :ref:`corresponding example `). -.. image:: ../auto_examples/plotting/images/sphx_glr_plot_overlay_001.png - :target: ../auto_examples/plotting/plot_overlay.html +.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_overlay_001.png + :target: ../auto_examples/01_plotting/plot_overlay.html :scale: 60 @@ -165,15 +165,15 @@ The procedure is the same as with `brain parcellations `_ but using the :class:`NiftiMapsMasker`, and the same considerations on using confounds regressors apply. -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_001.png - :target: ../auto_examples/connectivity/plot_probabilistic_atlas_extraction.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_001.png + :target: ../auto_examples/03_connectivity/plot_probabilistic_atlas_extraction.html :scale: 30 .. topic:: **Full example** A full example of extracting signals on a probabilistic: - :ref:`sphx_glr_auto_examples_connectivity_plot_probabilistic_atlas_extraction.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py` .. topic:: **Exercise: correlation matrix of rest fmri on probabilistic atlas** @@ -201,7 +201,7 @@ function that take the matrix, and coordinates of the nodes in MNI space. In the case of the MSDL atlas (:func:`nilearn.datasets.fetch_atlas_msdl`), the CSV file readily comes with MNI coordinates for each region (see for instance example: -:ref:`sphx_glr_auto_examples_connectivity_plot_probabilistic_atlas_extraction.py`). +:ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py`). .. For doctesting @@ -212,15 +212,15 @@ with MNI coordinates for each region (see for instance example: For another atlas this information can be computed for each region with the :func:`nilearn.plotting.find_xyz_cut_coords` function (see example: -:ref:`sphx_glr_auto_examples_connectivity_plot_multi_subject_connectome.py`):: +:ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py`):: >>> from nilearn import image, plotting >>> atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in image.iter_img(atlas_filename)] # doctest: +SKIP -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_002.png - :target: ../auto_examples/connectivity/plot_probabilistic_atlas_extraction.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_002.png + :target: ../auto_examples/03_connectivity/plot_probabilistic_atlas_extraction.html As you can see, the correlation matrix gives a very "full" graph: every node is connected to every other one. This is because is also captures diff --git a/doc/connectivity/index.rst b/doc/03_connectivity/index.rst similarity index 100% rename from doc/connectivity/index.rst rename to doc/03_connectivity/index.rst diff --git a/doc/connectivity/parcellating.rst b/doc/03_connectivity/parcellating.rst similarity index 81% rename from doc/connectivity/parcellating.rst rename to doc/03_connectivity/parcellating.rst index cdcd732481..03c323306a 100644 --- a/doc/connectivity/parcellating.rst +++ b/doc/03_connectivity/parcellating.rst @@ -26,7 +26,7 @@ Preprocessing: loading and masking We fetch the data from Internet and load it with a dedicated function (see :ref:`loading_data`): -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py +.. literalinclude:: ../../examples/03_connectivity/plot_rest_clustering.py :start-after: ### Load nyu_rest dataset ##################################################### :end-before: ### Ward ###################################################################### @@ -43,7 +43,7 @@ aka connectivity matrix. This is useful to constrain clusters to form contiguous parcels (see `the scikit-learn documentation `_) -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py +.. literalinclude:: ../../examples/03_connectivity/plot_rest_clustering.py :start-after: # Compute connectivity matrix: which voxel is connected to which :end-before: # Computing the ward for the first time, this is long... @@ -67,7 +67,7 @@ Running the Ward algorithm Here we simply launch Ward's algorithm to find 1000 clusters and we time it. -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py +.. literalinclude:: ../../examples/03_connectivity/plot_rest_clustering.py :start-after: # Computing the ward for the first time, this is long... :end-before: # Compute the ward with more clusters, should be faster @@ -75,7 +75,7 @@ This runs in about 10 seconds (depending on your computer configuration). Now, we are not satisfied of the result and we want to cluster the picture in 2000 elements. -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py +.. literalinclude:: ../../examples/03_connectivity/plot_rest_clustering.py :start-after: # Compute the ward with more clusters, should be faster :end-before: ### Show result ############################################################### @@ -90,7 +90,7 @@ Unmasking After applying the ward, we must unmask the data. This can be done simply : -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py +.. literalinclude:: ../../examples/03_connectivity/plot_rest_clustering.py :start-after: # Unmask data :end-before: # Display the labels @@ -105,13 +105,13 @@ Label visualization To visualize the clusters, we assign random colors to each cluster for the labels visualization. -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py +.. literalinclude:: ../../examples/03_connectivity/plot_rest_clustering.py :start-after: ### Show result ############################################################### :end-before: # Display the original data -.. figure:: ../auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_001.png - :target: ../auto_examples/connectivity/plot_rest_clustering.html +.. figure:: ../auto_examples/03_connectivity/images/sphx_glr_plot_rest_clustering_001.png + :target: ../auto_examples/03_connectivity/plot_rest_clustering.html :align: center :scale: 80 @@ -128,15 +128,15 @@ representation thanks to a two-step procedure : - call *ward.inverse_transform* on the previous result to turn it back into the masked picture shape -.. literalinclude:: ../../examples/connectivity/plot_rest_clustering.py +.. literalinclude:: ../../examples/03_connectivity/plot_rest_clustering.py :start-after: # Display the original data -.. |left_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_002.png - :target: ../auto_examples/connectivity/plot_rest_clustering.html +.. |left_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_rest_clustering_002.png + :target: ../auto_examples/03_connectivity/plot_rest_clustering.html :width: 49% -.. |right_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_003.png - :target: ../auto_examples/connectivity/plot_rest_clustering.html +.. |right_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_rest_clustering_003.png + :target: ../auto_examples/03_connectivity/plot_rest_clustering.html :width: 49% |left_img| |right_img| diff --git a/doc/connectivity/region_extraction.rst b/doc/03_connectivity/region_extraction.rst similarity index 77% rename from doc/connectivity/region_extraction.rst rename to doc/03_connectivity/region_extraction.rst index 3b5ccf9881..6f5ca2ae34 100644 --- a/doc/connectivity/region_extraction.rst +++ b/doc/03_connectivity/region_extraction.rst @@ -34,7 +34,7 @@ which is already preprocessed and publicly available at datasets. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py :start-after: # utilities :end-before: ################################################################################ @@ -49,7 +49,7 @@ object and calling fit on the functional filenames without necessarily converting each filename to Nifti1Image object. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py :start-after: # decomposition module :end-before: # Visualization @@ -63,12 +63,12 @@ Here, we use :func:`plot_prob_atlas` for easy visualization of 4D atlas maps onto the anatomical standard template. Each ICA map is displayed in different color and colors are random and automatically picked. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py :start-after: # Show ICA maps by using plotting utilities :end-before: ################################################################################ -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_001.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_001.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html :scale: 60 .. currentmodule:: nilearn.regions @@ -93,7 +93,7 @@ regions. We control the small spurious regions size by thresholding in voxel uni to adapt well to the resolution of the image. Please see the documentation of nilearn.regions.connected_regions for more details. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py :start-after: # regions, both can be done by importing Region Extractor from regions module :end-before: # Visualization @@ -107,12 +107,12 @@ for visualizing extracted regions on a standard template. Each extracted brain region is assigned a color and as you can see that visual cortex area is extracted quite nicely into each hemisphere. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py :start-after: # Show region extraction results :end-before: ################################################################################ -.. image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_002.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_002.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html :scale: 60 .. currentmodule:: nilearn.connectome @@ -133,7 +133,7 @@ shape=(176, 23) where 176 is the length of time series and 23 is the number of extracted regions. Likewise, we have a total of 20 subject specific time series signals. The third step, we compute the mean correlation across all subjects. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py :start-after: # To estimate correlation matrices we import connectome utilities from nilearn :end-before: # Visualization @@ -148,16 +148,16 @@ automatically the coordinates required, for plotting connectome relations. Left image is the correlations in a matrix form and right image is the connectivity relations to brain regions plotted using :func:`plot_connectome` -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py :start-after: # Import image utilities in utilising to operate on 4th dimension :end-before: ################################################################################ -.. |matrix| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_003.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. |matrix| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_003.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html :scale: 60 -.. |connectome| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_004.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. |connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_004.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html :scale: 60 .. centered:: |matrix| |connectome| @@ -172,15 +172,15 @@ Left image displays the DMN regions without region extraction and right image displays the DMN regions after region extraction. Here, we can validate that the DMN regions are nicely separated displaying each extracted region in different color. -.. literalinclude:: ../../examples/connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py :start-after: # First we plot DMN without region extraction, interested in only index=[3] -.. |dmn| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_005.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. |dmn| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_005.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html :scale: 50 -.. |dmn_reg| image:: ../auto_examples/connectivity/images/sphx_glr_plot_extract_regions_canica_maps_006.png - :target: ../auto_examples/connectivity/plot_extract_regions_canica_maps.html +.. |dmn_reg| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_006.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html :scale: 50 .. centered:: |dmn| |dmn_reg| @@ -188,4 +188,4 @@ the DMN regions are nicely separated displaying each extracted region in differe .. seealso:: The full code can be found as an example: - :ref:`sphx_glr_auto_examples_connectivity_plot_extract_regions_canica_maps.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_extract_regions_canica_maps.py` diff --git a/doc/connectivity/resting_state_networks.rst b/doc/03_connectivity/resting_state_networks.rst similarity index 69% rename from doc/connectivity/resting_state_networks.rst rename to doc/03_connectivity/resting_state_networks.rst index b3ce2208d9..ea9c75457d 100644 --- a/doc/connectivity/resting_state_networks.rst +++ b/doc/03_connectivity/resting_state_networks.rst @@ -34,7 +34,7 @@ functions to fetch data from Internet and get the filenames (:ref:`more on data loading `): -.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py +.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # First we load the ADHD200 data :end-before: # Here we apply CanICA on the data @@ -47,7 +47,7 @@ perform a multi-subject ICA decomposition following the CanICA model. As with every object in nilearn, we give its parameters at construction, and then fit it on the data. -.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py +.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # Here we apply CanICA on the data :end-before: # To visualize we plot the outline of all components on one figure @@ -61,23 +61,23 @@ We can visualize the components as in the previous examples. The first plot shows a map generated from all the components. Then we plot an axial cut for each component separately. -.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py +.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # To visualize we plot the outline of all components on one figure :end-before: # Finally, we plot the map for each ICA component separately -.. figure:: ../auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_001.png +.. figure:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_001.png :align: center - :target: ../auto_examples/connectivity/plot_canica_resting_state.html + :target: ../auto_examples/03_connectivity/plot_canica_resting_state.html Finally, we can plot the map for different ICA components separately: -.. literalinclude:: ../../examples/connectivity/plot_canica_resting_state.py +.. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # Finally, we plot the map for each ICA component separately -.. |left_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_003.png +.. |left_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_003.png :width: 23% -.. |right_img| image:: ../auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_004.png +.. |right_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_004.png :width: 23% .. centered:: |left_img| |right_img| @@ -85,7 +85,7 @@ Finally, we can plot the map for different ICA components separately: .. seealso:: The full code can be found as an example: - :ref:`sphx_glr_auto_examples_connectivity_plot_canica_resting_state.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_canica_resting_state.py` .. note:: @@ -117,13 +117,13 @@ Applying DictLearning Sparsity of output map is controlled by a parameter alpha: using a larger alpha yields sparser maps. -.. literalinclude:: ../../examples/connectivity/plot_compare_resting_state_decomposition.py +.. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py :start-after: # Dictionary learning :end-before: # CanICA We can fit both estimators to compare them -.. literalinclude:: ../../examples/connectivity/plot_compare_resting_state_decomposition.py +.. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py :start-after: # Fit both estimators :end-before: # Visualize the results @@ -132,21 +132,21 @@ Visualizing the results 4D plotting offers an efficient way to compare both resulting outputs -.. literalinclude:: ../../examples/connectivity/plot_compare_resting_state_decomposition.py +.. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py :start-after: # Visualize the results -.. |left_img_decomp| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_001.png - :target: ../auto_examples/connectivity/plot_compare_resting_state_decomposition.html +.. |left_img_decomp| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_001.png + :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html :width: 50% -.. |right_img_decomp| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_003.png - :target: ../auto_examples/connectivity/plot_compare_resting_state_decomposition.html +.. |right_img_decomp| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_003.png + :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html :width: 50% -.. |left_img_decomp_single| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_002.png - :target: ../auto_examples/connectivity/plot_compare_resting_state_decomposition.html +.. |left_img_decomp_single| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_002.png + :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html :width: 50% -.. |right_img_decomp_single| image:: ../auto_examples/connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_004.png - :target: ../auto_examples/connectivity/plot_compare_resting_state_decomposition.html +.. |right_img_decomp_single| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_resting_state_decomposition_004.png + :target: ../auto_examples/03_connectivity/plot_compare_resting_state_decomposition.html :width: 50% @@ -163,4 +163,4 @@ classification tasks. .. seealso:: The full code can be found as an example: - :ref:`sphx_glr_auto_examples_connectivity_plot_compare_resting_state_decomposition.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_resting_state_decomposition.py` diff --git a/doc/manipulating_images/data_preparation.rst b/doc/04_manipulating_images/data_preparation.rst similarity index 88% rename from doc/manipulating_images/data_preparation.rst rename to doc/04_manipulating_images/data_preparation.rst index 009ffa29a9..80431e8d95 100644 --- a/doc/manipulating_images/data_preparation.rst +++ b/doc/04_manipulating_images/data_preparation.rst @@ -120,7 +120,7 @@ possible, there is no need to save your data to a file to pass it to a in memory: -.. literalinclude:: ../../examples/manipulating_images/plot_mask_computation.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py :start-after: Load NYU resting-state dataset :end-before: # To display the background @@ -140,9 +140,9 @@ Computing the mask .. note:: The full example described in this section can be found here: - :doc:`plot_mask_computation.py <../auto_examples/manipulating_images/plot_mask_computation>`. + :doc:`plot_mask_computation.py <../auto_examples/04_manipulating_images/plot_mask_computation>`. It is also related to this example: - :doc:`plot_nifti_simple.py <../auto_examples/manipulating_images/plot_nifti_simple>`. + :doc:`plot_nifti_simple.py <../auto_examples/04_manipulating_images/plot_nifti_simple>`. If a mask is not specified as an argument, :class:`NiftiMasker` will try to compute @@ -160,13 +160,13 @@ we can compare the data-derived mask against. The first step is to generate a mask with default parameters and visualize it. -.. literalinclude:: ../../examples/manipulating_images/plot_mask_computation.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py :start-after: # Simple mask extraction from EPI images :end-before: # Generate mask with strong opening -.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_mask_computation_002.png - :target: ../auto_examples/manipulating_images/plot_mask_computation.html +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_002.png + :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html :scale: 50% @@ -176,13 +176,13 @@ opening steps (*opening=10*) using the `mask_args` argument of the on the outer voxel layers of the mask, which can for example remove remaining skull parts in the image. -.. literalinclude:: ../../examples/manipulating_images/plot_mask_computation.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py :start-after: # Generate mask with strong opening :end-before: # Generate mask with a high lower cutoff -.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_mask_computation_003.png - :target: ../auto_examples/manipulating_images/plot_mask_computation.html +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_003.png + :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html :scale: 50% @@ -195,13 +195,13 @@ the lower cutoff to enforce selection of those voxels that appear as bright in the EPI image. -.. literalinclude:: ../../examples/manipulating_images/plot_mask_computation.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py :start-after: # Generate mask with a high lower cutoff :end-before: ################################################################################ -.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_mask_computation_004.png - :target: ../auto_examples/manipulating_images/plot_mask_computation.html +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_004.png + :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html :scale: 50% @@ -245,14 +245,14 @@ In this case, nilearn computes automatically the translation part of the transformation matrix (i.e., affine). -.. image:: ../auto_examples/manipulating_images/images/sphx_glr_plot_affine_transformation_002.png - :target: ../auto_examples/manipulating_images/plot_affine_transformation.html +.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_affine_transformation_002.png + :target: ../auto_examples/04_manipulating_images/plot_affine_transformation.html :scale: 33% -.. image:: ../auto_examples/manipulating_images/images/sphx_glr_plot_affine_transformation_004.png - :target: ../auto_examples/manipulating_images/plot_affine_transformation.html +.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_affine_transformation_004.png + :target: ../auto_examples/04_manipulating_images/plot_affine_transformation.html :scale: 33% -.. image:: ../auto_examples/manipulating_images/images/sphx_glr_plot_affine_transformation_003.png - :target: ../auto_examples/manipulating_images/plot_affine_transformation.html +.. image:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_affine_transformation_003.png + :target: ../auto_examples/04_manipulating_images/plot_affine_transformation.html :scale: 33% @@ -339,9 +339,9 @@ images after unmasking (masked-reduced data transformed back into the original whole-brain space). This step is present in almost all the :ref:`examples ` provided in nilearn. Below you will find an excerpt of :ref:`the example performing Anova-SVM on the Haxby data -`): +`): -.. literalinclude:: ../../examples/decoding/plot_haxby_anova_svm.py +.. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py :start-after: # Look at the SVC's discriminating weights :end-before: # Create the figure @@ -349,9 +349,9 @@ an excerpt of :ref:`the example performing Anova-SVM on the Haxby data .. topic:: **Examples to better understand the NiftiMasker** - * :ref:`sphx_glr_auto_examples_manipulating_images_plot_nifti_simple.py` + * :ref:`sphx_glr_auto_examples_04_manipulating_images_plot_nifti_simple.py` - * :ref:`sphx_glr_auto_examples_manipulating_images_plot_mask_computation.py` + * :ref:`sphx_glr_auto_examples_04_manipulating_images_plot_mask_computation.py` .. _region: @@ -415,7 +415,7 @@ keyword. .. topic:: **Examples** - * :ref:`sphx_glr_auto_examples_connectivity_plot_signal_extraction.py` + * :ref:`sphx_glr_auto_examples_03_connectivity_plot_signal_extraction.py` :class:`NiftiMapsMasker` Usage ------------------------------ @@ -434,7 +434,7 @@ possible option. .. topic:: **Examples** - * :ref:`sphx_glr_auto_examples_connectivity_plot_probabilistic_atlas_extraction.py` + * :ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py` Extraction of signals from seeds:\ :class:`NiftiSpheresMasker`. ================================================================== @@ -460,4 +460,4 @@ seed position is used. .. topic:: **Examples** - * :ref:`sphx_glr_auto_examples_connectivity_plot_adhd_spheres.py` + * :ref:`sphx_glr_auto_examples_03_connectivity_plot_adhd_spheres.py` diff --git a/doc/manipulating_images/index.rst b/doc/04_manipulating_images/index.rst similarity index 74% rename from doc/manipulating_images/index.rst rename to doc/04_manipulating_images/index.rst index 3709f44c6c..63fc89f338 100644 --- a/doc/manipulating_images/index.rst +++ b/doc/04_manipulating_images/index.rst @@ -1,13 +1,13 @@ .. include:: ../tune_toc.rst -.. _manipulating_images: +.. _image_manipulation: ================================= Image manipulation ================================= -In this section, we detail the general tools to manipulation of +In this section, we detail the general tools to manipulate of brain images with nilearn. | diff --git a/doc/manipulating_images/manipulating_images.rst b/doc/04_manipulating_images/manipulating_images.rst similarity index 86% rename from doc/manipulating_images/manipulating_images.rst rename to doc/04_manipulating_images/manipulating_images.rst index c63834ed37..8cb62f0027 100644 --- a/doc/manipulating_images/manipulating_images.rst +++ b/doc/04_manipulating_images/manipulating_images.rst @@ -143,7 +143,7 @@ Nifti and Analyze files Neuroimaging data can be loaded in a simple way thanks to nibabel_. A Nifti file on disk can be loaded with a single line. -.. literalinclude:: ../../examples/plotting/plot_visualization.py +.. literalinclude:: ../../examples/01_plotting/plot_visualization.py :start-after: # Fetch data :end-before: # Visualization @@ -221,12 +221,12 @@ If we do not have a spatial mask of the target regions, a brain mask can be easily extracted from the fMRI data by the :func:`nilearn.masking.compute_epi_mask` function: -.. figure:: ../auto_examples/plotting/images/sphx_glr_plot_visualization_002.png - :target: ../auto_examples/plotting/plot_visualization.html +.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_visualization_002.png + :target: ../auto_examples/01_plotting/plot_visualization.html :align: right :scale: 50% -.. literalinclude:: ../../examples/plotting/plot_visualization.py +.. literalinclude:: ../../examples/01_plotting/plot_visualization.py :start-after: # Extracting a brain mask :end-before: # Applying the mask to extract the corresponding time series @@ -248,12 +248,12 @@ brain. It is thus convenient to apply a brain mask in order to convert the :width: 100% -.. literalinclude:: ../../examples/plotting/plot_visualization.py +.. literalinclude:: ../../examples/01_plotting/plot_visualization.py :start-after: # Applying the mask to extract the corresponding time series :end-before: # Find voxels of interest -.. figure:: ../auto_examples/plotting/images/sphx_glr_plot_visualization_003.png - :target: ../auto_examples/plotting/plot_visualization.html +.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_visualization_003.png + :target: ../auto_examples/01_plotting/plot_visualization.html :align: center :scale: 50 @@ -271,7 +271,7 @@ set up your own data preparation procedure: .. currentmodule:: nilearn * Resampling: :func:`nilearn.image.resample_img`. See the example - :ref:`sphx_glr_auto_examples_manipulating_images_plot_affine_transformation.py` to + :ref:`sphx_glr_auto_examples_04_manipulating_images_plot_affine_transformation.py` to see the effect of affine transforms on data and bounding boxes. * Computing the mean of images (along the time/4th dimension): :func:`nilearn.image.mean_img` @@ -321,12 +321,12 @@ the three dimensions). Analogous to the majority of nilearn functions, it can also use file names as input parameters. -.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_roi_extraction.py :start-after: # Smooth the data :end-before: # Run a T-test for face and houses -.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_001.png - :target: ../auto_examples/manipulating_images/plot_roi_extraction.html +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_roi_extraction_001.png + :target: ../auto_examples/04_manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -352,12 +352,12 @@ This test returns p-values that represent probabilities that the two time-series had been drawn from the same distribution. The lower is the p-value, the more discriminative is the voxel in distinguishing the two conditions. -.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_roi_extraction.py :start-after: # Run a T-test for face and houses :end-before: # Build a mask from this statistical map -.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_002.png - :target: ../auto_examples/manipulating_images/plot_roi_extraction.html +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_roi_extraction_002.png + :target: ../auto_examples/04_manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -373,12 +373,12 @@ Voxels with better p-values are kept as voxels of interest. Applying a threshold to an array is easy thanks to numpy indexing à la Matlab. -.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_roi_extraction.py :start-after: # Thresholding :end-before: # Binarization and intersection with VT mask -.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_003.png - :target: ../auto_examples/manipulating_images/plot_roi_extraction.html +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_roi_extraction_003.png + :target: ../auto_examples/04_manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -395,12 +395,12 @@ nibabel's **nibabel.load**. We can then use a logical "and" operation that have been selected in both masks. In neuroimaging jargon, this is called an "AND conjunction." -.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_roi_extraction.py :start-after: # Binarization and intersection with VT mask :end-before: # Dilation -.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_004.png - :target: ../auto_examples/manipulating_images/plot_roi_extraction.html +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_roi_extraction_004.png + :target: ../auto_examples/04_manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -415,12 +415,12 @@ not to forget voxels located on the edge of a ROI. Put differently, such operations can fill "holes" in masked voxel representations. -.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_roi_extraction.py :start-after: # Dilation :end-before: # Identification of connected components -.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_005.png - :target: ../auto_examples/manipulating_images/plot_roi_extraction.html +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_roi_extraction_005.png + :target: ../auto_examples/04_manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -432,12 +432,12 @@ identifies immediately neighboring voxels in our voxels mask. It assigns a separate integer label to each one of them. -.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_roi_extraction.py :start-after: # Identification of connected components :end-before: # Use the new ROIs to extract data maps in both ROIs -.. figure:: ../auto_examples/manipulating_images/images/sphx_glr_plot_roi_extraction_006.png - :target: ../auto_examples/manipulating_images/plot_roi_extraction.html +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_roi_extraction_006.png + :target: ../auto_examples/04_manipulating_images/plot_roi_extraction.html :align: center :scale: 50% @@ -447,7 +447,7 @@ Saving the result The final voxel mask is saved using nibabel for further inspection with a software such as FSLView. -.. literalinclude:: ../../examples/manipulating_images/plot_roi_extraction.py +.. literalinclude:: ../../examples/04_manipulating_images/plot_roi_extraction.py :start-after: # save the ROI 'atlas' to a single output Nifti .. _nibabel: http://nipy.sourceforge.net/nibabel/ diff --git a/doc/index.rst b/doc/index.rst index ead7dfb534..d5b62a6c91 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -15,29 +15,29 @@ .. Here we are building the carrousel -.. |glass_brain| image:: auto_examples/plotting/images/sphx_glr_plot_demo_glass_brain_002.png - :target: auto_examples/plotting/plot_demo_glass_brain.html +.. |glass_brain| image:: auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_002.png + :target: auto_examples/01_plotting/plot_demo_glass_brain.html -.. |connectome| image:: auto_examples/connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png - :target: auto_examples/connectivity/plot_inverse_covariance_connectome.html +.. |connectome| image:: auto_examples/03_connectivity/images/sphx_glr_plot_inverse_covariance_connectome_004.png + :target: auto_examples/03_connectivity/plot_inverse_covariance_connectome.html .. |haxby_weights| image:: auto_examples/images/sphx_glr_plot_haxby_simple_001.png :target: auto_examples/plot_haxby_simple.html -.. |oasis_weights| image:: auto_examples/decoding/images/sphx_glr_plot_oasis_vbm_002.png - :target: auto_examples/decoding/plot_oasis_vbm.html +.. |oasis_weights| image:: auto_examples/02_decoding/images/sphx_glr_plot_oasis_vbm_002.png + :target: auto_examples/02_decoding/plot_oasis_vbm.html -.. |rest_clustering| image:: auto_examples/connectivity/images/sphx_glr_plot_rest_clustering_001.png - :target: auto_examples/connectivity/plot_rest_clustering.html +.. |rest_clustering| image:: auto_examples/03_connectivity/images/sphx_glr_plot_rest_clustering_001.png + :target: auto_examples/03_connectivity/plot_rest_clustering.html -.. |canica| image:: auto_examples/connectivity/images/sphx_glr_plot_canica_resting_state_011.png - :target: auto_examples/connectivity/plot_canica_resting_state.html +.. |canica| image:: auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_011.png + :target: auto_examples/03_connectivity/plot_canica_resting_state.html -.. |tvl1_haxby| image:: auto_examples/decoding/images/sphx_glr_plot_haxby_space_net_002.png - :target: auto_examples/decoding/plot_haxby_space_net.html +.. |tvl1_haxby| image:: auto_examples/02_decoding/images/sphx_glr_plot_haxby_space_net_002.png + :target: auto_examples/02_decoding/plot_haxby_space_net.html -.. |searchlight| image:: auto_examples/decoding/images/sphx_glr_plot_haxby_searchlight_001.png - :target: auto_examples/decoding/plot_haxby_searchlight.html +.. |searchlight| image:: auto_examples/02_decoding/images/sphx_glr_plot_haxby_searchlight_001.png + :target: auto_examples/02_decoding/plot_haxby_searchlight.html .. raw:: html diff --git a/doc/introduction.rst b/doc/introduction.rst index 7e57222896..0e396c6632 100644 --- a/doc/introduction.rst +++ b/doc/introduction.rst @@ -24,7 +24,7 @@ What is nilearn: MVPA, decoding, predictive models, functional connectivity Nilearn can readily be used on :ref:`task fMRI `, :ref:`resting-state `, or - :ref:`VBM ` data. + :ref:`VBM ` data. For a machine-learning expert, the value of nilearn can be seen as domain-specific **feature engineering** construction, that is, shaping @@ -204,8 +204,8 @@ the file name:: :ref:`See more on file name matchings `. -.. image:: auto_examples/plotting/images/sphx_glr_plot_demo_glass_brain_001.png - :target: auto_examples/plotting/plot_demo_glass_brain.html +.. image:: auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_001.png + :target: auto_examples/01_plotting/plot_demo_glass_brain.html :align: center :scale: 60 @@ -264,7 +264,7 @@ To loop over each individual volume of a 4D image, use :func:`image.iter_img`:: * To perform a for loop in Python, you can use the "range" function * The solution can be found :ref:`here - ` + ` | diff --git a/doc/user_guide.rst b/doc/user_guide.rst index 1cb2be5cce..50ca03f7b7 100644 --- a/doc/user_guide.rst +++ b/doc/user_guide.rst @@ -15,10 +15,10 @@ User guide: table of contents :numbered: introduction.rst - decoding/index.rst - connectivity/index.rst - plotting/index.rst - manipulating_images/index.rst + 02_decoding/index.rst + 03_connectivity/index.rst + 01_plotting/index.rst + 04_manipulating_images/index.rst building_blocks/index.rst modules/reference.rst From 8be5de9f88a5e0073992300d046c1a3776fa1f88 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 11 Jan 2016 15:05:27 +0100 Subject: [PATCH 0042/1925] More files format --- examples/01_plotting/README.txt | 4 + examples/01_plotting/plot_atlas.py | 17 ++ examples/01_plotting/plot_demo_glass_brain.py | 29 +++ .../plot_demo_glass_brain_extensive.py | 38 ++++ .../01_plotting/plot_demo_more_plotting.py | 94 ++++++++++ examples/01_plotting/plot_demo_plotting.py | 62 +++++++ examples/01_plotting/plot_dim_plotting.py | 54 ++++++ examples/01_plotting/plot_haxby_masks.py | 55 ++++++ examples/01_plotting/plot_overlay.py | 41 +++++ examples/01_plotting/plot_prob_atlas.py | 54 ++++++ examples/01_plotting/plot_visualization.py | 60 ++++++ examples/{decoding => 02_decoding}/README.txt | 0 .../plot_haxby_anova_svm.py | 0 .../plot_haxby_different_estimators.py | 0 .../plot_haxby_full_analysis.py | 0 .../plot_haxby_grid_search.py | 0 .../plot_haxby_multiclass.py | 0 .../plot_haxby_searchlight.py | 0 .../plot_haxby_space_net.py | 0 .../plot_haxby_stimuli.py | 0 .../plot_mixed_gambles_space_net.py | 0 .../plot_miyawaki_reconstruction.py | 0 .../plot_oasis_vbm.py | 0 .../plot_oasis_vbm_space_net.py | 0 .../plot_simulated_data.py | 0 .../README.txt | 0 .../plot_adhd_spheres.py | 0 .../plot_canica_resting_state.py | 0 ...lot_compare_resting_state_decomposition.py | 0 .../plot_connectivity_measures.py | 0 .../plot_extract_regions_canica_maps.py | 149 +++++++++++++++ .../plot_inverse_covariance_connectome.py | 0 .../plot_multi_subject_connectome.py | 0 .../plot_probabilistic_atlas_extraction.py | 0 .../plot_rest_clustering.py | 0 .../plot_signal_extraction.py | 0 .../plot_simulated_connectome.py | 0 examples/04_manipulating_images/README.txt | 4 + .../plot_affine_transformation.py | 128 +++++++++++++ .../plot_extract_rois_smith_atlas.py | 57 ++++++ .../plot_extract_rois_statistical_maps.py | 74 ++++++++ .../plot_mask_computation.py | 101 ++++++++++ .../plot_nifti_simple.py | 66 +++++++ .../plot_roi_extraction.py | 151 +++++++++++++++ .../plot_smooth_mean_image.py | 35 ++++ examples/05_advanced/README.txt | 2 + .../05_advanced/plot_haxby_mass_univariate.py | 173 ++++++++++++++++++ .../05_advanced/plot_ica_resting_state.py | 81 ++++++++ .../plot_localizer_mass_univariate_methods.py | 122 ++++++++++++ 49 files changed, 1651 insertions(+) create mode 100644 examples/01_plotting/README.txt create mode 100644 examples/01_plotting/plot_atlas.py create mode 100644 examples/01_plotting/plot_demo_glass_brain.py create mode 100644 examples/01_plotting/plot_demo_glass_brain_extensive.py create mode 100644 examples/01_plotting/plot_demo_more_plotting.py create mode 100644 examples/01_plotting/plot_demo_plotting.py create mode 100644 examples/01_plotting/plot_dim_plotting.py create mode 100644 examples/01_plotting/plot_haxby_masks.py create mode 100644 examples/01_plotting/plot_overlay.py create mode 100644 examples/01_plotting/plot_prob_atlas.py create mode 100644 examples/01_plotting/plot_visualization.py rename examples/{decoding => 02_decoding}/README.txt (100%) rename examples/{decoding => 02_decoding}/plot_haxby_anova_svm.py (100%) rename examples/{decoding => 02_decoding}/plot_haxby_different_estimators.py (100%) rename examples/{decoding => 02_decoding}/plot_haxby_full_analysis.py (100%) rename examples/{decoding => 02_decoding}/plot_haxby_grid_search.py (100%) rename examples/{decoding => 02_decoding}/plot_haxby_multiclass.py (100%) rename examples/{decoding => 02_decoding}/plot_haxby_searchlight.py (100%) rename examples/{decoding => 02_decoding}/plot_haxby_space_net.py (100%) rename examples/{decoding => 02_decoding}/plot_haxby_stimuli.py (100%) rename examples/{decoding => 02_decoding}/plot_mixed_gambles_space_net.py (100%) rename examples/{decoding => 02_decoding}/plot_miyawaki_reconstruction.py (100%) rename examples/{decoding => 02_decoding}/plot_oasis_vbm.py (100%) rename examples/{decoding => 02_decoding}/plot_oasis_vbm_space_net.py (100%) rename examples/{decoding => 02_decoding}/plot_simulated_data.py (100%) rename examples/{connectivity => 03_connectivity}/README.txt (100%) rename examples/{connectivity => 03_connectivity}/plot_adhd_spheres.py (100%) rename examples/{connectivity => 03_connectivity}/plot_canica_resting_state.py (100%) rename examples/{connectivity => 03_connectivity}/plot_compare_resting_state_decomposition.py (100%) rename examples/{connectivity => 03_connectivity}/plot_connectivity_measures.py (100%) create mode 100644 examples/03_connectivity/plot_extract_regions_canica_maps.py rename examples/{connectivity => 03_connectivity}/plot_inverse_covariance_connectome.py (100%) rename examples/{connectivity => 03_connectivity}/plot_multi_subject_connectome.py (100%) rename examples/{connectivity => 03_connectivity}/plot_probabilistic_atlas_extraction.py (100%) rename examples/{connectivity => 03_connectivity}/plot_rest_clustering.py (100%) rename examples/{connectivity => 03_connectivity}/plot_signal_extraction.py (100%) rename examples/{connectivity => 03_connectivity}/plot_simulated_connectome.py (100%) create mode 100644 examples/04_manipulating_images/README.txt create mode 100644 examples/04_manipulating_images/plot_affine_transformation.py create mode 100644 examples/04_manipulating_images/plot_extract_rois_smith_atlas.py create mode 100644 examples/04_manipulating_images/plot_extract_rois_statistical_maps.py create mode 100644 examples/04_manipulating_images/plot_mask_computation.py create mode 100644 examples/04_manipulating_images/plot_nifti_simple.py create mode 100644 examples/04_manipulating_images/plot_roi_extraction.py create mode 100644 examples/04_manipulating_images/plot_smooth_mean_image.py create mode 100644 examples/05_advanced/README.txt create mode 100644 examples/05_advanced/plot_haxby_mass_univariate.py create mode 100644 examples/05_advanced/plot_ica_resting_state.py create mode 100644 examples/05_advanced/plot_localizer_mass_univariate_methods.py diff --git a/examples/01_plotting/README.txt b/examples/01_plotting/README.txt new file mode 100644 index 0000000000..eb0027b784 --- /dev/null +++ b/examples/01_plotting/README.txt @@ -0,0 +1,4 @@ +Visualization of brain images +----------------------------- + +See :ref:`plotting` for more details. diff --git a/examples/01_plotting/plot_atlas.py b/examples/01_plotting/plot_atlas.py new file mode 100644 index 0000000000..0f3c944efc --- /dev/null +++ b/examples/01_plotting/plot_atlas.py @@ -0,0 +1,17 @@ +""" +Basic Atlas plotting +======================= + +Plot the regions of a reference atlas (here the Harvard-Oxford atlas). +""" + +from nilearn import datasets +from nilearn import plotting + +dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm') +atlas_filename = dataset.maps + +print('Atlas ROIs are located at: %s' % atlas_filename) + +plotting.plot_roi(atlas_filename, title="Harvard Oxford atlas") +plotting.show() diff --git a/examples/01_plotting/plot_demo_glass_brain.py b/examples/01_plotting/plot_demo_glass_brain.py new file mode 100644 index 0000000000..1784935073 --- /dev/null +++ b/examples/01_plotting/plot_demo_glass_brain.py @@ -0,0 +1,29 @@ +""" +Glass brain plotting in nilearn +=============================== + +See :ref:`plotting` for more plotting functionalities. +""" + + +############################################################################### +# Retrieve the data +from nilearn import datasets + +localizer_dataset = datasets.fetch_localizer_contrasts( + ["left vs right button press"], + n_subjects=2, + get_tmaps=True) +localizer_tmap_filename = localizer_dataset.tmaps[1] + +############################################################################### +# demo glass brain plotting +from nilearn import plotting + +plotting.plot_glass_brain(localizer_tmap_filename, threshold=3) + +plotting.plot_glass_brain( + localizer_tmap_filename, title='plot_glass_brain', + black_bg=True, display_mode='xz', threshold=3) + +plotting.show() diff --git a/examples/01_plotting/plot_demo_glass_brain_extensive.py b/examples/01_plotting/plot_demo_glass_brain_extensive.py new file mode 100644 index 0000000000..51881ccb80 --- /dev/null +++ b/examples/01_plotting/plot_demo_glass_brain_extensive.py @@ -0,0 +1,38 @@ +""" +Glass brain plotting in nilearn (all options) +============================================= + +This example goes through different options of the :func:`nilearn.plotting.plot_glass_brain` function +(including plotting negative values). +See :ref:`plotting` for more plotting functionalities. +""" + + +############################################################################### +# Retrieve the data +from nilearn import datasets + +localizer_dataset = datasets.fetch_localizer_contrasts( + ["left vs right button press"], + n_subjects=2, + get_tmaps=True) +localizer_tmap_filename = localizer_dataset.tmaps[1] + +############################################################################### +# demo glass brain plotting +from nilearn import plotting + +plotting.plot_glass_brain(localizer_tmap_filename, threshold=3) + +plotting.plot_glass_brain(localizer_tmap_filename, threshold=3, colorbar=True) + +plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain', + black_bg=True, display_mode='xz', threshold=3) + +plotting.plot_glass_brain(localizer_tmap_filename, threshold=0, colorbar=True, + plot_abs=False) + +plotting.plot_glass_brain(localizer_tmap_filename, threshold=3, + colorbar=True, plot_abs=False) + +plotting.show() diff --git a/examples/01_plotting/plot_demo_more_plotting.py b/examples/01_plotting/plot_demo_more_plotting.py new file mode 100644 index 0000000000..84f005d2d5 --- /dev/null +++ b/examples/01_plotting/plot_demo_more_plotting.py @@ -0,0 +1,94 @@ +""" +More nilearn plotting +===================== + +See :ref:`plotting` for more details. +""" + +# The imports from nilearn plotting and image processing +from nilearn import plotting, image + +############################################################################### +# Retrieve the data: haxby dataset to have EPI images and masks, and +# localizer dataset to have contrast maps + +from nilearn import datasets +haxby_dataset = datasets.fetch_haxby(n_subjects=1) +haxby_anat_filename = haxby_dataset.anat[0] +haxby_mask_filename = haxby_dataset.mask_vt[0] +haxby_func_filename = haxby_dataset.func[0] + +localizer_dataset = datasets.fetch_localizer_contrasts( + ["left vs right button press"], + n_subjects=2, + get_anats=True) +localizer_anat_filename = localizer_dataset.anats[1] +localizer_cmap_filename = localizer_dataset.cmaps[1] + +######################################## +plotting.plot_stat_map(localizer_cmap_filename, display_mode='ortho', + cut_coords=(36, -27, 60), + title="display_mode='ortho', cut_coords=(36, -27, 60)") + +######################################## +plotting.plot_stat_map(localizer_cmap_filename, display_mode='z', cut_coords=5, + title="display_mode='z', cut_coords=5") + +######################################## +plotting.plot_stat_map(localizer_cmap_filename, display_mode='x', + cut_coords=(-36, 36), + title="display_mode='x', cut_coords=(-36, 36)") + +######################################## +plotting.plot_stat_map(localizer_cmap_filename, display_mode='y', cut_coords=1, + title="display_mode='x', cut_coords=(-36, 36)") + +######################################## +plotting.plot_stat_map(localizer_cmap_filename, display_mode='z', + cut_coords=1, colorbar=False, + title="display_mode='z', cut_coords=1, colorbar=False") + +######################################## +plotting.plot_stat_map(localizer_cmap_filename, display_mode='xz', + cut_coords=(36, 60), + title="display_mode='xz', cut_coords=(36, 60)") + +######################################## +plotting.plot_stat_map(localizer_cmap_filename, display_mode='yx', + cut_coords=(-27, 36), + title="display_mode='yx', cut_coords=(-27, 36)") + +######################################## +plotting.plot_stat_map(localizer_cmap_filename, display_mode='yz', + cut_coords=(-27, 60), + title="display_mode='yz', cut_coords=(-27, 60)") + +############################################################################### +# demo display objects with add_* methods +mean_haxby_img = image.mean_img(haxby_func_filename) + +# Plot T1 outline on top of the mean EPI (useful for checking coregistration) +display = plotting.plot_anat(mean_haxby_img, title="add_edges") +display.add_edges(haxby_anat_filename) + +######################################## +# Plotting outline of the mask on top of the EPI +display = plotting.plot_anat(mean_haxby_img, title="add_contours", + cut_coords=(28, -34, -22)) +display.add_contours(haxby_mask_filename, levels=[0.5], colors='r') + +############################################################################### +# demo saving plots to file + +plotting.plot_stat_map(localizer_cmap_filename, + title='Using plot_stat_map output_file', + output_file='plot_stat_map.png') + +######################################## +display = plotting.plot_stat_map(localizer_cmap_filename, + title='Using display savefig') +display.savefig('plot_stat_map_from_display.png') +# In non-interactive settings make sure you close your displays +display.close() + +plotting.show() diff --git a/examples/01_plotting/plot_demo_plotting.py b/examples/01_plotting/plot_demo_plotting.py new file mode 100644 index 0000000000..c92d168c3d --- /dev/null +++ b/examples/01_plotting/plot_demo_plotting.py @@ -0,0 +1,62 @@ +""" +Plotting in nilearn +========================== + +Nilearn comes with a set of plotting function for Nifti-like images, +see :ref:`plotting` for more details. +""" + +# Import plotting and image processing tools +from nilearn import plotting, image + +############################################################################### +# Retrieve the data: haxby dataset to have EPI images and masks, and +# localizer dataset to have contrast maps + +from nilearn import datasets +haxby_dataset = datasets.fetch_haxby(n_subjects=1) + +# print basic information on the dataset +print('First subject anatomical nifti image (3D) is at: %s' % + haxby_dataset.anat[0]) +print('First subject functional nifti image (4D) is at: %s' % + haxby_dataset.func[0]) # 4D data + +haxby_anat_filename = haxby_dataset.anat[0] +haxby_mask_filename = haxby_dataset.mask_vt[0] +haxby_func_filename = haxby_dataset.func[0] + +localizer_dataset = datasets.fetch_localizer_contrasts( + ["left vs right button press"], + n_subjects=2, + get_anats=True, + get_tmaps=True) +localizer_anat_filename = localizer_dataset.anats[1] +localizer_tmap_filename = localizer_dataset.tmaps[1] + +############################################################################### +# Plotting statistical maps +plotting.plot_stat_map(localizer_tmap_filename, bg_img=localizer_anat_filename, + threshold=3, title="plot_stat_map", + cut_coords=(36, -27, 66)) + +############################################################################### +# Plotting glass brain +plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain', + threshold=3) + +############################################################################### +# Plotting anatomical maps +plotting.plot_anat(haxby_anat_filename, title="plot_anat") + +############################################################################### +# Plotting ROIs (here the mask) +plotting.plot_roi(haxby_mask_filename, bg_img=haxby_anat_filename, + title="plot_roi") + +############################################################################### +# Plotting EPI haxby +mean_haxby_img = image.mean_img(haxby_func_filename) +plotting.plot_epi(mean_haxby_img, title="plot_epi") + +plotting.show() diff --git a/examples/01_plotting/plot_dim_plotting.py b/examples/01_plotting/plot_dim_plotting.py new file mode 100644 index 0000000000..13439bcf58 --- /dev/null +++ b/examples/01_plotting/plot_dim_plotting.py @@ -0,0 +1,54 @@ +""" +Controling the contrast of the background when plotting +========================================================= + +The `dim` argument controls the contrast of the background. + +*dim* modifies the contrast of this image: dim=0 leaves the image +unchanged, negative values of *dim* enhance it, and positive values +decrease it (dim the background). + +This *dim* argument may also be useful for the plot_roi function used to +display ROIs on top of a background image. +""" + +# Retrieve the data: the localizer dataset with contrast maps + +from nilearn import datasets + +localizer_dataset = datasets.fetch_localizer_contrasts( + ["left vs right button press"], + n_subjects=2, + get_anats=True, + get_tmaps=True) +localizer_anat_filename = localizer_dataset.anats[1] +localizer_tmap_filename = localizer_dataset.tmaps[1] + +# Plotting: vary the 'dim' of the background +from nilearn import plotting + +plotting.plot_stat_map(localizer_tmap_filename, + bg_img=localizer_anat_filename, + cut_coords=(36, -27, 66), + threshold=3, title="dim=-.5", + dim=-.5) + +plotting.plot_stat_map(localizer_tmap_filename, + bg_img=localizer_anat_filename, + cut_coords=(36, -27, 66), + threshold=3, title="dim=0", + dim=0) + +plotting.plot_stat_map(localizer_tmap_filename, + bg_img=localizer_anat_filename, + cut_coords=(36, -27, 66), + threshold=3, title="dim=.5", + dim=.5) + +plotting.plot_stat_map(localizer_tmap_filename, + bg_img=localizer_anat_filename, + cut_coords=(36, -27, 66), + threshold=3, title="dim=1", + dim=1) + +plotting.show() diff --git a/examples/01_plotting/plot_haxby_masks.py b/examples/01_plotting/plot_haxby_masks.py new file mode 100644 index 0000000000..8930d595c0 --- /dev/null +++ b/examples/01_plotting/plot_haxby_masks.py @@ -0,0 +1,55 @@ +""" +Plot Haxby masks +================= + +Small script to plot the masks of the Haxby dataset. +""" +import numpy as np +from scipy import linalg +import matplotlib.pyplot as plt + +from nilearn import datasets +haxby_dataset = datasets.fetch_haxby() + +# print basic information on the dataset +print('First subject anatomical nifti image (3D) is at: %s' % + haxby_dataset.anat[0]) +print('First subject functional nifti image (4D) is at: %s' % + haxby_dataset.func[0]) # 4D data + +# Build the mean image because we have no anatomic data +from nilearn import image +func_filename = haxby_dataset.func[0] +mean_img = image.mean_img(func_filename) + +z_slice = -24 +from nilearn.image.resampling import coord_transform +affine = mean_img.get_affine() +_, _, k_slice = coord_transform(0, 0, z_slice, + linalg.inv(affine)) +k_slice = np.round(k_slice) + +fig = plt.figure(figsize=(4, 5.4), facecolor='k') + +from nilearn.plotting import plot_anat, show +display = plot_anat(mean_img, display_mode='z', cut_coords=[z_slice], + figure=fig) +mask_vt_filename = haxby_dataset.mask_vt[0] +mask_house_filename = haxby_dataset.mask_house[0] +mask_face_filename = haxby_dataset.mask_face[0] +display.add_contours(mask_vt_filename, contours=1, antialiased=False, + linewidths=4., levels=[0], colors=['red']) +display.add_contours(mask_house_filename, contours=1, antialiased=False, + linewidths=4., levels=[0], colors=['blue']) +display.add_contours(mask_face_filename, contours=1, antialiased=False, + linewidths=4., levels=[0], colors=['limegreen']) + +# We generate a legend using the trick described on +# http://matplotlib.sourceforge.net/users/legend_guide.httpml#using-proxy-artist +from matplotlib.patches import Rectangle +p_v = Rectangle((0, 0), 1, 1, fc="red") +p_h = Rectangle((0, 0), 1, 1, fc="blue") +p_f = Rectangle((0, 0), 1, 1, fc="limegreen") +plt.legend([p_v, p_h, p_f], ["vt", "house", "face"]) + +show() diff --git a/examples/01_plotting/plot_overlay.py b/examples/01_plotting/plot_overlay.py new file mode 100644 index 0000000000..2701d91391 --- /dev/null +++ b/examples/01_plotting/plot_overlay.py @@ -0,0 +1,41 @@ +""" +Visualizing a probablistic atlas: the default mode in the MSDL atlas +===================================================================== + +Visualizing a probablistic atlas requires visualizing the different +maps that compose it. + +Here we represent the nodes constituting the default mode network in the +`MSDL atlas +`_. + +The tools that we need to leverage are: + + * :func:`nilearn.image.index_img` to retrieve the various maps composing + the atlas + + * Adding overlays on an existing brain display, to plot each of these + maps + +""" + +from nilearn import datasets, plotting, image + +atlas_data = datasets.fetch_atlas_msdl() +atlas_filename = atlas_data.maps + +# First plot the map for the PCC: index 4 in the atlas +display = plotting.plot_stat_map(image.index_img(atlas_filename, 4), + colorbar=False, + title="DMN nodes in MSDL atlas") + +# Now add as an overlay the maps for the ACC and the left and right +# parietal nodes +display.add_overlay(image.index_img(atlas_filename, 5), + cmap=plotting.cm.black_blue) +display.add_overlay(image.index_img(atlas_filename, 6), + cmap=plotting.cm.black_green) +display.add_overlay(image.index_img(atlas_filename, 3), + cmap=plotting.cm.black_pink) + +plotting.show() diff --git a/examples/01_plotting/plot_prob_atlas.py b/examples/01_plotting/plot_prob_atlas.py new file mode 100644 index 0000000000..e3c76c0a7c --- /dev/null +++ b/examples/01_plotting/plot_prob_atlas.py @@ -0,0 +1,54 @@ +""" +Visualizing 4D probabilistic atlas maps +======================================= + +This example shows how to visualize probabilistic atlases made of 4D images. +There are 3 different display types: + +1. "contours", which means maps or ROIs are shown as contours delineated by \ + colored lines. + +2. "filled_contours", maps are shown as contours same as above but with \ + fillings inside the contours. + +3. "continuous", maps are shown as just color overlays. + +The :func:`nilearn.plotting.plot_prob_atlas` function displays each map +with each different color which are picked randomly from the colormap +which is already defined. + +See :ref:`plotting` for more information to know how to tune the parameters. +""" +# Load 4D probabilistic atlases +from nilearn import datasets + +# Harvard Oxford Atlas +harvard_oxford = datasets.fetch_atlas_harvard_oxford('cort-prob-2mm') +harvard_oxford_sub = datasets.fetch_atlas_harvard_oxford('sub-prob-2mm') + +# Multi Subject Dictionary Learning Atlas +msdl = datasets.fetch_atlas_msdl() + +# Smith ICA Atlas and Brain Maps 2009 +smith = datasets.fetch_atlas_smith_2009() + +# ICBM tissue probability +icbm = datasets.fetch_icbm152_2009() + +# Visualization +from nilearn import plotting + +atlas_types = {'Harvard_Oxford': harvard_oxford.maps, + 'Harvard_Oxford sub': harvard_oxford_sub.maps, + 'MSDL': msdl.maps, 'Smith 2009 10 RSNs': smith.rsn10, + 'Smith2009 20 RSNs': smith.rsn20, + 'Smith2009 70 RSNs': smith.rsn70, + 'Smith2009 10 Brainmap': smith.bm10, + 'Smith2009 20 Brainmap': smith.bm20, + 'Smith2009 70 Brainmap': smith.bm70, + 'ICBM tissues': (icbm['wm'], icbm['gm'], icbm['csf'])} + +for name, atlas in sorted(atlas_types.items()): + plotting.plot_prob_atlas(atlas, title=name) + +plotting.show() diff --git a/examples/01_plotting/plot_visualization.py b/examples/01_plotting/plot_visualization.py new file mode 100644 index 0000000000..2076202b0c --- /dev/null +++ b/examples/01_plotting/plot_visualization.py @@ -0,0 +1,60 @@ +""" +NeuroImaging volumes visualization +================================== + +Simple example to show Nifti data visualization. +""" + +############################################################################## +# Fetch data +from nilearn import datasets + +haxby_dataset = datasets.fetch_haxby(n_subjects=1) + +# print basic information on the dataset +print('First anatomical nifti image (3D) located is at: %s' % + haxby_dataset.anat[0]) +print('First functional nifti image (4D) is located at: %s' % + haxby_dataset.func[0]) + +############################################################################## +# Visualization +from nilearn.image.image import mean_img + +# Compute the mean EPI: we do the mean along the axis 3, which is time +func_filename = haxby_dataset.func[0] +mean_haxby = mean_img(func_filename) + +from nilearn.plotting import plot_epi, show +plot_epi(mean_haxby) + +############################################################################## +# Extracting a brain mask + +# Simple computation of a mask from the fMRI data +from nilearn.masking import compute_epi_mask +mask_img = compute_epi_mask(func_filename) + +# Visualize it as an ROI +from nilearn.plotting import plot_roi +plot_roi(mask_img, mean_haxby) + +############################################################################## +# Applying the mask to extract the corresponding time series + +from nilearn.masking import apply_mask +masked_data = apply_mask(func_filename, mask_img) + +# masked_data shape is (timepoints, voxels). We can plot the first 150 +# timepoints from two voxels + +# And now plot a few of these +import matplotlib.pyplot as plt +plt.figure(figsize=(7, 5)) +plt.plot(masked_data[:2, :150].T) +plt.xlabel('Time [TRs]', fontsize=16) +plt.ylabel('Intensity', fontsize=16) +plt.xlim(0, 150) +plt.subplots_adjust(bottom=.12, top=.95, right=.95, left=.12) + +show() diff --git a/examples/decoding/README.txt b/examples/02_decoding/README.txt similarity index 100% rename from examples/decoding/README.txt rename to examples/02_decoding/README.txt diff --git a/examples/decoding/plot_haxby_anova_svm.py b/examples/02_decoding/plot_haxby_anova_svm.py similarity index 100% rename from examples/decoding/plot_haxby_anova_svm.py rename to examples/02_decoding/plot_haxby_anova_svm.py diff --git a/examples/decoding/plot_haxby_different_estimators.py b/examples/02_decoding/plot_haxby_different_estimators.py similarity index 100% rename from examples/decoding/plot_haxby_different_estimators.py rename to examples/02_decoding/plot_haxby_different_estimators.py diff --git a/examples/decoding/plot_haxby_full_analysis.py b/examples/02_decoding/plot_haxby_full_analysis.py similarity index 100% rename from examples/decoding/plot_haxby_full_analysis.py rename to examples/02_decoding/plot_haxby_full_analysis.py diff --git a/examples/decoding/plot_haxby_grid_search.py b/examples/02_decoding/plot_haxby_grid_search.py similarity index 100% rename from examples/decoding/plot_haxby_grid_search.py rename to examples/02_decoding/plot_haxby_grid_search.py diff --git a/examples/decoding/plot_haxby_multiclass.py b/examples/02_decoding/plot_haxby_multiclass.py similarity index 100% rename from examples/decoding/plot_haxby_multiclass.py rename to examples/02_decoding/plot_haxby_multiclass.py diff --git a/examples/decoding/plot_haxby_searchlight.py b/examples/02_decoding/plot_haxby_searchlight.py similarity index 100% rename from examples/decoding/plot_haxby_searchlight.py rename to examples/02_decoding/plot_haxby_searchlight.py diff --git a/examples/decoding/plot_haxby_space_net.py b/examples/02_decoding/plot_haxby_space_net.py similarity index 100% rename from examples/decoding/plot_haxby_space_net.py rename to examples/02_decoding/plot_haxby_space_net.py diff --git a/examples/decoding/plot_haxby_stimuli.py b/examples/02_decoding/plot_haxby_stimuli.py similarity index 100% rename from examples/decoding/plot_haxby_stimuli.py rename to examples/02_decoding/plot_haxby_stimuli.py diff --git a/examples/decoding/plot_mixed_gambles_space_net.py b/examples/02_decoding/plot_mixed_gambles_space_net.py similarity index 100% rename from examples/decoding/plot_mixed_gambles_space_net.py rename to examples/02_decoding/plot_mixed_gambles_space_net.py diff --git a/examples/decoding/plot_miyawaki_reconstruction.py b/examples/02_decoding/plot_miyawaki_reconstruction.py similarity index 100% rename from examples/decoding/plot_miyawaki_reconstruction.py rename to examples/02_decoding/plot_miyawaki_reconstruction.py diff --git a/examples/decoding/plot_oasis_vbm.py b/examples/02_decoding/plot_oasis_vbm.py similarity index 100% rename from examples/decoding/plot_oasis_vbm.py rename to examples/02_decoding/plot_oasis_vbm.py diff --git a/examples/decoding/plot_oasis_vbm_space_net.py b/examples/02_decoding/plot_oasis_vbm_space_net.py similarity index 100% rename from examples/decoding/plot_oasis_vbm_space_net.py rename to examples/02_decoding/plot_oasis_vbm_space_net.py diff --git a/examples/decoding/plot_simulated_data.py b/examples/02_decoding/plot_simulated_data.py similarity index 100% rename from examples/decoding/plot_simulated_data.py rename to examples/02_decoding/plot_simulated_data.py diff --git a/examples/connectivity/README.txt b/examples/03_connectivity/README.txt similarity index 100% rename from examples/connectivity/README.txt rename to examples/03_connectivity/README.txt diff --git a/examples/connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py similarity index 100% rename from examples/connectivity/plot_adhd_spheres.py rename to examples/03_connectivity/plot_adhd_spheres.py diff --git a/examples/connectivity/plot_canica_resting_state.py b/examples/03_connectivity/plot_canica_resting_state.py similarity index 100% rename from examples/connectivity/plot_canica_resting_state.py rename to examples/03_connectivity/plot_canica_resting_state.py diff --git a/examples/connectivity/plot_compare_resting_state_decomposition.py b/examples/03_connectivity/plot_compare_resting_state_decomposition.py similarity index 100% rename from examples/connectivity/plot_compare_resting_state_decomposition.py rename to examples/03_connectivity/plot_compare_resting_state_decomposition.py diff --git a/examples/connectivity/plot_connectivity_measures.py b/examples/03_connectivity/plot_connectivity_measures.py similarity index 100% rename from examples/connectivity/plot_connectivity_measures.py rename to examples/03_connectivity/plot_connectivity_measures.py diff --git a/examples/03_connectivity/plot_extract_regions_canica_maps.py b/examples/03_connectivity/plot_extract_regions_canica_maps.py new file mode 100644 index 0000000000..023131c453 --- /dev/null +++ b/examples/03_connectivity/plot_extract_regions_canica_maps.py @@ -0,0 +1,149 @@ +""" +Regions extraction using Canonical ICA maps and functional connectomes +====================================================================== + +This example shows how to use :class:`nilearn.regions.RegionExtractor` +to extract connected brain regions from whole brain ICA maps and +use them to estimate a connectome. + +We used 20 resting state ADHD functional datasets from :func:`nilearn.datasets.fetch_adhd` +and :class:`nilearn.decomposition.CanICA` for whole brain ICA maps. + +Please see the related documentation of :class:`nilearn.regions.RegionExtractor` +for more details. +""" + +################################################################################ +# Fetching ADHD resting state functional datasets by loading from datasets +# utilities +from nilearn import datasets + +adhd_dataset = datasets.fetch_adhd(n_subjects=20) +func_filenames = adhd_dataset.func +confounds = adhd_dataset.confounds + +################################################################################ +# Canonical ICA decomposition of functional datasets by importing CanICA from +# decomposition module +from nilearn.decomposition import CanICA + +# Initialize canica parameters +canica = CanICA(n_components=5, smoothing_fwhm=6., + memory="nilearn_cache", memory_level=2, + random_state=0) +# Fit to the data +canica.fit(func_filenames) +# ICA maps +components_img = canica.masker_.inverse_transform(canica.components_) + +# Visualization +# Show ICA maps by using plotting utilities +from nilearn import plotting + +plotting.plot_prob_atlas(components_img, view_type='filled_contours', + title='ICA components') + +################################################################################ +# Extracting regions from ICA maps and then timeseries signals from those +# regions, both can be done by importing Region Extractor from regions module. +# threshold=0.5 indicates that we keep nominal of amount nonzero voxels across all +# maps, less the threshold means that more intense non-voxels will be survived. +from nilearn.regions import RegionExtractor + +extractor = RegionExtractor(components_img, threshold=0.5, + thresholding_strategy='ratio_n_voxels', + extractor='local_regions', + standardize=True, min_region_size=1350) +# Just call fit() to process for regions extraction +extractor.fit() +# Extracted regions are stored in regions_img_ +regions_extracted_img = extractor.regions_img_ +# Each region index is stored in index_ +regions_index = extractor.index_ +# Total number of regions extracted +n_regions_extracted = regions_extracted_img.shape[-1] + +# Visualization +# Show region extraction results +title = ('%d regions are extracted from %d ICA components.' + '\nEach separate color of region indicates extracted region' + % (n_regions_extracted, 5)) +plotting.plot_prob_atlas(regions_extracted_img, view_type='filled_contours', + title=title) + +################################################################################ +# Computing correlation coefficients +# First we need to do subjects timeseries signals extraction and then estimating +# correlation matrices on those signals. +# To extract timeseries signals, we call transform() from RegionExtractor object +# onto each subject functional data stored in func_filenames. +# To estimate correlation matrices we import connectome utilities from nilearn +from nilearn.connectome import ConnectivityMeasure + +correlations = [] +# Initializing ConnectivityMeasure object with kind='correlation' +connectome_measure = ConnectivityMeasure(kind='correlation') +for filename, confound in zip(func_filenames, confounds): + # call transform from RegionExtractor object to extract timeseries signals + timeseries_each_subject = extractor.transform(filename, confounds=confound) + # call fit_transform from ConnectivityMeasure object + correlation = connectome_measure.fit_transform([timeseries_each_subject]) + # saving each subject correlation to correlations + correlations.append(correlation) + +# Mean of all correlations +import numpy as np + +mean_correlations = np.mean(correlations, axis=0).reshape(n_regions_extracted, + n_regions_extracted) + +# Visualization +# Showing mean correlation results +# Import image utilities in utilising to operate on 4th dimension +import matplotlib.pyplot as plt +from nilearn import image + +regions_imgs = image.iter_img(regions_extracted_img) +coords_connectome = [plotting.find_xyz_cut_coords(img) for img in regions_imgs] +title = 'Correlation interactions between %d regions' % n_regions_extracted +plt.figure() +plt.imshow(mean_correlations, interpolation="nearest", + vmax=1, vmin=-1, cmap=plt.cm.bwr) +plt.colorbar() +plt.title(title) +plotting.plot_connectome(mean_correlations, coords_connectome, + edge_threshold='90%', title=title) + +################################################################################ +# Showing Default Mode Network (DMN) regions before and after region extraction +# by manually identifying the index of DMN in ICA decomposed components +from nilearn._utils.compat import izip + +# First we plot DMN without region extraction, interested in only index=[3] +img = image.index_img(components_img, 3) +coords = plotting.find_xyz_cut_coords(img) +display = plotting.plot_stat_map(img, cut_coords=((0, -52, 29)), + colorbar=False, title='ICA map: DMN mode') + +# Now, we plot DMN after region extraction to show that connected regions are +# nicely separated. Each brain extracted region is indicated with separate color + +# For this, we take the indices of the all regions extracted related to original +# ICA map 3. +regions_indices_of_map3 = np.where(np.array(regions_index) == 3) + +display = plotting.plot_anat(cut_coords=((0, -52, 29)), title='Extracted regions in DMN mode') + +# Now add as an overlay by looping over all the regions for right +# temporoparietal function, posterior cingulate cortex, medial prefrontal +# cortex, left temporoparietal junction +color_list = [[0., 1., 0.29, 1.], [0., 1., 0.54, 1.], + [0., 1., 0.78, 1.], [0., 0.96, 1., 1.], + [0., 0.73, 1., 1.], [0., 0.47, 1., 1.], + [0., 0.22, 1., 1.], [0.01, 0., 1., 1.], + [0.26, 0., 1., 1.]] +for each_index_of_map3, color in izip(regions_indices_of_map3[0], color_list): + display.add_overlay(image.index_img(regions_extracted_img, each_index_of_map3), + cmap=plotting.cm.alpha_cmap(color)) + +plotting.show() diff --git a/examples/connectivity/plot_inverse_covariance_connectome.py b/examples/03_connectivity/plot_inverse_covariance_connectome.py similarity index 100% rename from examples/connectivity/plot_inverse_covariance_connectome.py rename to examples/03_connectivity/plot_inverse_covariance_connectome.py diff --git a/examples/connectivity/plot_multi_subject_connectome.py b/examples/03_connectivity/plot_multi_subject_connectome.py similarity index 100% rename from examples/connectivity/plot_multi_subject_connectome.py rename to examples/03_connectivity/plot_multi_subject_connectome.py diff --git a/examples/connectivity/plot_probabilistic_atlas_extraction.py b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py similarity index 100% rename from examples/connectivity/plot_probabilistic_atlas_extraction.py rename to examples/03_connectivity/plot_probabilistic_atlas_extraction.py diff --git a/examples/connectivity/plot_rest_clustering.py b/examples/03_connectivity/plot_rest_clustering.py similarity index 100% rename from examples/connectivity/plot_rest_clustering.py rename to examples/03_connectivity/plot_rest_clustering.py diff --git a/examples/connectivity/plot_signal_extraction.py b/examples/03_connectivity/plot_signal_extraction.py similarity index 100% rename from examples/connectivity/plot_signal_extraction.py rename to examples/03_connectivity/plot_signal_extraction.py diff --git a/examples/connectivity/plot_simulated_connectome.py b/examples/03_connectivity/plot_simulated_connectome.py similarity index 100% rename from examples/connectivity/plot_simulated_connectome.py rename to examples/03_connectivity/plot_simulated_connectome.py diff --git a/examples/04_manipulating_images/README.txt b/examples/04_manipulating_images/README.txt new file mode 100644 index 0000000000..3e9090f5fa --- /dev/null +++ b/examples/04_manipulating_images/README.txt @@ -0,0 +1,4 @@ +Manipulating brain image volumes +-------------------------------- + +See :ref:`data_manipulation` for more details. diff --git a/examples/04_manipulating_images/plot_affine_transformation.py b/examples/04_manipulating_images/plot_affine_transformation.py new file mode 100644 index 0000000000..d3cd88d2e0 --- /dev/null +++ b/examples/04_manipulating_images/plot_affine_transformation.py @@ -0,0 +1,128 @@ +""" +Visualization of affine resamplings +=================================== + +This example shows how an affine resampling works. + +A Nifti image contains, along with its 3D or 4D data content, a 4x4 matrix +encoding and affine transformation that maps the data array into millimeter +space. If (i, j, k) encodes an integer position (voxel) with the data array, +then adding 1 as a fourth entry, (i, j, k, 1), and multiplying by the affine +matrix yields (x, y, z, 1), a 4-vector containing the millimeter position of +the voxel. + +The resampling procedure in `resample_img` can attribute a new affine matrix +and a new shape to your Nifti image while keeping its representation in +millimeter space exactly the same (up to sampling error and possible +clipping). + +This example shows a 2D image in voxel space, and the position of the data in +millimeter space, as encoded by the affine matrix. The image is the resampled +in 3 ways and displayed in in millimeter space. + +1) 4x4 affine matrix and target shape given +2) 3x3 transformation matrix (only new voxel axes, no offset) + given and no shape given +3) 4x4 affine matrix given and no shape given + +While 1) needs no further explanation (it returns an image exactly as +specified, with a new view on the data in millimeter space), 2) and 3) are +missing some specification, which is subsequently inferred by `resample_img`: +If the affine offset is missing (3x3 transformation, case 2), then the new +image will be the closest bounding box possible around the data along the +new axes. If the affine offset is given, but no shape provided, the +resulting image will be the closest bounding box around the union of the +data points and the affine offset. + +Note that specifying a shape without specifying a 3x3 transformation matrix +causes an error message, because `resample_img` will not know where to start +the bounding box (there is no intelligent way of inferring this given the +bounding box shape). +""" + + +############################################################################# +# First make a simple synthetic image + +# Create the data with numpy +import numpy as np +grid = np.mgrid[0:192, 0:128] +circle = np.sum( + (grid - np.array([32, 32])[:, np.newaxis, np.newaxis]) ** 2, + axis=0) < 256 +diamond = np.sum( + np.abs(grid - np.array([128, 80])[:, np.newaxis, np.newaxis]), + axis=0) < 16 +rectangle = np.max(np.abs( + grid - np.array([64, 96])[:, np.newaxis, np.newaxis]), axis=0) < 16 + +image = np.zeros_like(circle) +image[16:160, 16:120] = 1. +image = image + 2 * circle + 3 * rectangle + 4 * diamond + 1 + +vmax = image.max() + +source_affine = np.eye(4) +# Use canonical vectors for affine +# Give the affine an offset +source_affine[:2, 3] = np.array([96, 64]) + +# Rotate it slightly +angle = np.pi / 180 * 15 +rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], + [np.sin(angle), np.cos(angle)]]) +source_affine[:2, :2] = rotation_matrix * 2.0 # 2.0mm voxel size + +# We need to turn this data into a nibabel image +import nibabel +img = nibabel.Nifti1Image(image[:, :, np.newaxis], affine=source_affine) + +############################################################################# +# Now resample the image +from nilearn.image import resample_img +img_in_mm_space = resample_img(img, target_affine=np.eye(4), + target_shape=(512, 512, 1)) + +target_affine_3x3 = np.eye(3) * 2 +target_affine_4x4 = np.eye(4) * 2 +target_affine_4x4[3, 3] = 1. +img_3d_affine = resample_img(img, target_affine=target_affine_3x3) +img_4d_affine = resample_img(img, target_affine=target_affine_4x4) +target_affine_mm_space_offset_changed = np.eye(4) +target_affine_mm_space_offset_changed[:3, 3] = \ + img_3d_affine.get_affine()[:3, 3] + +img_3d_affine_in_mm_space = resample_img( + img_3d_affine, + target_affine=target_affine_mm_space_offset_changed, + target_shape=(np.array(img_3d_affine.shape) * 2).astype(int)) + +img_4d_affine_in_mm_space = resample_img( + img_4d_affine, + target_affine=np.eye(4), + target_shape=(np.array(img_4d_affine.shape) * 2).astype(int)) + +############################################################################# +# Finally, visualize +import matplotlib.pyplot as plt +plt.figure() +plt.imshow(image, interpolation="nearest", vmin=0, vmax=vmax) +plt.title("The original data in voxel space") + +plt.figure() +plt.imshow(img_in_mm_space.get_data()[:, :, 0], vmin=0, vmax=vmax) +plt.title("The original data in mm space") + +plt.figure() +plt.imshow(img_3d_affine_in_mm_space.get_data()[:, :, 0], + vmin=0, vmax=vmax) +plt.title("Transformed using a 3x3 affine -\n leads to " + "re-estimation of bounding box") + +plt.figure() +plt.imshow(img_4d_affine_in_mm_space.get_data()[:, :, 0], + vmin=0, vmax=vmax) +plt.title("Transformed using a 4x4 affine -\n Uses affine anchor " + "and estimates bounding box size") + +plt.show() diff --git a/examples/04_manipulating_images/plot_extract_rois_smith_atlas.py b/examples/04_manipulating_images/plot_extract_rois_smith_atlas.py new file mode 100644 index 0000000000..d8914a4bf8 --- /dev/null +++ b/examples/04_manipulating_images/plot_extract_rois_smith_atlas.py @@ -0,0 +1,57 @@ +""" +Regions Extraction of Default Mode Networks using Smith Atlas +============================================================= + +This simple example shows how to extract regions from Smith atlas +resting state networks. + +In particular, we show how Default Mode Network regions are extracted +using :class:`nilearn.regions.RegionExtractor` from regions module +""" + +################################################################################ +# Fetching the smith ICA 10 RSN by importing datasets utilities +from nilearn import datasets + +smith_atlas = datasets.fetch_atlas_smith_2009() +atlas_networks = smith_atlas.rsn10 + +################################################################################ +# Import region extractor to extract atlas networks +from nilearn.regions import RegionExtractor + +# min_region_size in voxel volume mm^3 +extraction = RegionExtractor(atlas_networks, min_region_size=800, + threshold=98, thresholding_strategy='percentile') + +# Just call fit() to execute region extraction procedure +extraction.fit() +regions_img = extraction.regions_img_ + +################################################################################ +# Visualization +# Show region extraction results by importing image & plotting utilities +from nilearn import plotting +from nilearn.image import index_img +from nilearn.plotting import find_xyz_cut_coords + +# Showing region extraction results using 4D maps visualization tool +plotting.plot_prob_atlas(regions_img, display_mode='z', cut_coords=1, + view_type='contours', title="Regions extracted.") + +# To reduce the complexity, we choose to display all the regions +# extracted from network 3 +import numpy as np + +DMN_network = index_img(atlas_networks, 3) +plotting.plot_roi(DMN_network, display_mode='z', cut_coords=1, + title='Network 3') + +regions_indices_network3 = np.where(np.array(extraction.index_) == 3) +for index in regions_indices_network3[0]: + cur_img = index_img(extraction.regions_img_, index) + coords = find_xyz_cut_coords(cur_img) + plotting.plot_roi(cur_img, display_mode='z', cut_coords=coords[2:3], + title="Blob of network3") + +plotting.show() diff --git a/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py b/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py new file mode 100644 index 0000000000..3b57f30469 --- /dev/null +++ b/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py @@ -0,0 +1,74 @@ +""" +Region Extraction using a t-statistical map (3D) +================================================ + +This example shows how to extract regions or separate the regions +from a statistical map. + +We use localizer t-statistic maps from :func:`nilearn.datasets.fetch_localizer_contrasts` +as an input image. + +The idea is to threshold an image to get foreground objects using a +function :func:`nilearn.image.threshold_img` and extract objects using a function +:func:`nilearn.regions.connected_regions`. +""" + +################################################################################ +# Fetching t-statistic image of localizer constrasts by loading from datasets +# utilities +from nilearn import datasets + +n_subjects = 3 +localizer_path = datasets.fetch_localizer_contrasts( + ['calculation (auditory cue)'], n_subjects=n_subjects, get_tmaps=True) +tmap_filename = localizer_path.tmaps[0] + +################################################################################ +# Threshold the t-statistic image by importing threshold function +from nilearn.image import threshold_img + +# Two types of strategies can be used from this threshold function +# Type 1: strategy used will be based on scoreatpercentile +threshold_percentile_img = threshold_img(tmap_filename, threshold='97%') + + +# Type 2: threshold strategy used will be based on image intensity +# Here, threshold value should be within the limits i.e. less than max value. +threshold_value_img = threshold_img(tmap_filename, threshold=4.) + +################################################################################ +# Visualization +# Showing thresholding results by importing plotting modules and its utilities +from nilearn import plotting + +# Showing percentile threshold image +plotting.plot_stat_map(threshold_percentile_img, display_mode='z', cut_coords=5, + title='Threshold image with string percentile', colorbar=False) + +# Showing intensity threshold image +plotting.plot_stat_map(threshold_value_img, display_mode='z', cut_coords=5, + title='Threshold image with intensity value', colorbar=False) + +################################################################################ +# Extracting the regions by importing connected regions function +from nilearn.regions import connected_regions + +regions_percentile_img, index = connected_regions(threshold_percentile_img, + min_region_size=1500) + +regions_value_img, index = connected_regions(threshold_value_img, + min_region_size=1500) + +################################################################################ +# Visualizing region extraction results +title = ("ROIs using percentile thresholding. " + "\n Each ROI in same color is an extracted region") +plotting.plot_prob_atlas(regions_percentile_img, anat_img=tmap_filename, + view_type='contours', display_mode='z', + cut_coords=5, title=title) +title = ("ROIs using image intensity thresholding. " + "\n Each ROI in same color is an extracted region") +plotting.plot_prob_atlas(regions_value_img, anat_img=tmap_filename, + view_type='contours', display_mode='z', + cut_coords=5, title=title) +plotting.show() diff --git a/examples/04_manipulating_images/plot_mask_computation.py b/examples/04_manipulating_images/plot_mask_computation.py new file mode 100644 index 0000000000..afd2f019e7 --- /dev/null +++ b/examples/04_manipulating_images/plot_mask_computation.py @@ -0,0 +1,101 @@ +""" +Understanding NiftiMasker and mask computation +================================================== + +In this example, the Nifti masker is used to automatically compute a mask. + +For data that has already been masked, the default strategy works out of +the box. + +However, for raw EPI, as in resting-state time series, we need to use the +'epi' strategy of the NiftiMasker. + +In addition, we show here how to tweak the different parameters of the +underlying mask extraction routine +:func:`nilearn.masking.compute_epi_mask`. + +""" + +import numpy as np + +import nibabel +from nilearn import datasets + + +############################################################################### +# From already masked data +from nilearn.input_data import NiftiMasker +import nilearn.image as image +from nilearn.plotting import plot_roi, show + +# Load Miyawaki dataset +miyawaki_dataset = datasets.fetch_miyawaki2008() + +# print basic information on the dataset +print('First functional nifti image (4D) is located at: %s' % + miyawaki_dataset.func[0]) # 4D data + +miyawaki_filename = miyawaki_dataset.func[0] +miyawaki_mean_img = image.mean_img(miyawaki_filename) + +# This time, we can use the NiftiMasker without changing the default mask +# strategy, as the data has already been masked, and thus lies on a +# homogeneous background + +masker = NiftiMasker() +masker.fit(miyawaki_filename) + +plot_roi(masker.mask_img_, miyawaki_mean_img, + title="Mask from already masked data") + + +############################################################################### +# From raw EPI data + +# Load NYU resting-state dataset +nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) +nyu_filename = nyu_dataset.func[0] +nyu_img = nibabel.load(nyu_filename) + +# Restrict nyu to 100 frames to speed up computation +from nilearn.image import index_img +nyu_img = index_img(nyu_img, slice(0, 100)) + +# To display the background +nyu_mean_img = image.mean_img(nyu_img) + + +# Simple mask extraction from EPI images +# We need to specify an 'epi' mask_strategy, as this is raw EPI data +masker = NiftiMasker(mask_strategy='epi') +masker.fit(nyu_img) +plot_roi(masker.mask_img_, nyu_mean_img, title='EPI automatic mask') + +# Generate mask with strong opening +masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10)) +masker.fit(nyu_img) +plot_roi(masker.mask_img_, nyu_mean_img, title='EPI Mask with strong opening') + +# Generate mask with a high lower cutoff +masker = NiftiMasker(mask_strategy='epi', + mask_args=dict(upper_cutoff=.9, lower_cutoff=.8, + opening=False)) +masker.fit(nyu_img) +plot_roi(masker.mask_img_, nyu_mean_img, + title='EPI Mask: high lower_cutoff') + +############################################################################### +# Extract time series + +# trended vs detrended +trended = NiftiMasker(mask_strategy='epi') +detrended = NiftiMasker(mask_strategy='epi', detrend=True) +trended_data = trended.fit_transform(nyu_img) +detrended_data = detrended.fit_transform(nyu_img) + +print("Trended: mean %.2f, std %.2f" % + (np.mean(trended_data), np.std(trended_data))) +print("Detrended: mean %.2f, std %.2f" % + (np.mean(detrended_data), np.std(detrended_data))) + +show() diff --git a/examples/04_manipulating_images/plot_nifti_simple.py b/examples/04_manipulating_images/plot_nifti_simple.py new file mode 100644 index 0000000000..b5ae2749c5 --- /dev/null +++ b/examples/04_manipulating_images/plot_nifti_simple.py @@ -0,0 +1,66 @@ +""" +Simple example of NiftiMasker use +================================== + +Here is a simple example of automatic mask computation using the nifti masker. +The mask is computed and visualized. +""" + +########################################################################### +# Retrieve the NYU test-retest dataset + +from nilearn import datasets +nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) +func_filename = nyu_dataset.func[0] + +# print basic information on the dataset +print('First anatomical nifti image (3D) is at: %s' % nyu_dataset.anat_anon[0]) +print('First functional nifti image (4D) is at: %s' % func_filename) + +########################################################################### +# Compute the mask +from nilearn.input_data import NiftiMasker + +# As this is raw resting-state EPI, the background is noisy and we cannot +# rely on the 'background' masking strategy. We need to use the 'epi' one +nifti_masker = NiftiMasker(standardize=False, mask_strategy='epi', + memory="nilearn_cache", memory_level=2) +nifti_masker.fit(func_filename) +mask_img = nifti_masker.mask_img_ + +########################################################################### +# Visualize the mask +from nilearn.plotting import plot_roi +from nilearn.image.image import mean_img + +# calculate mean image for the background +mean_func_img = mean_img(func_filename) + +plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask") + + +########################################################################### +# Preprocess data with the NiftiMasker +nifti_masker.fit(func_filename) +fmri_masked = nifti_masker.transform(func_filename) +# fmri_masked is now a 2D matrix, (n_voxels x n_time_points) + +########################################################################### +# Run an algorithm +from sklearn.decomposition import FastICA +n_components = 20 +ica = FastICA(n_components=n_components, random_state=42) +components_masked = ica.fit_transform(fmri_masked.T).T + +########################################################################### +# Reverse masking, and display the corresponding map +components = nifti_masker.inverse_transform(components_masked) + +# Visualize results +from nilearn.plotting import plot_stat_map, show +from nilearn.image import index_img + +plot_stat_map(index_img(components, 0), mean_func_img, + display_mode='y', cut_coords=4, title="Component 0") + +show() diff --git a/examples/04_manipulating_images/plot_roi_extraction.py b/examples/04_manipulating_images/plot_roi_extraction.py new file mode 100644 index 0000000000..d0068bbff8 --- /dev/null +++ b/examples/04_manipulating_images/plot_roi_extraction.py @@ -0,0 +1,151 @@ +""" +Computing an ROI mask +======================= + +Example showing how a T-test can be performed to compute an ROI +mask, and how simple operations can improve the quality of the mask +obtained. +""" + +############################################################################## +# Coordinates of the slice we will be displaying + +coronal = -24 +sagittal = -33 +axial = -17 +cut_coords = (coronal, sagittal, axial) + +############################################################################## +# Load the data + +# Fetch the data files from Internet +from nilearn import datasets +from nilearn.image import new_img_like + + +haxby_dataset = datasets.fetch_haxby(n_subjects=1) + +# print basic information on the dataset +print('First subject anatomical nifti image (3D) located is at: %s' % + haxby_dataset.anat[0]) +print('First subject functional nifti image (4D) is located at: %s' % + haxby_dataset.func[0]) + +# Second, load the labels +import numpy as np + +session_target = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ") +haxby_labels = session_target['labels'] + +import matplotlib.pyplot as plt +from nilearn.input_data import NiftiLabelsMasker + +############################################################################## +# Build a statistical test to find voxels of interest + +# Smooth the data +from nilearn import image +fmri_filename = haxby_dataset.func[0] +fmri_img = image.smooth_img(fmri_filename, fwhm=6) + +# Plot the mean image +from nilearn.plotting import plot_epi +mean_img = image.mean_img(fmri_img) +plot_epi(mean_img, title='Smoothed mean EPI', cut_coords=cut_coords) + +############################################################################## +# Run a T-test for face and houses +from scipy import stats +fmri_data = fmri_img.get_data() +_, p_values = stats.ttest_ind(fmri_data[..., haxby_labels == b'face'], + fmri_data[..., haxby_labels == b'house'], + axis=-1) + +# Use a log scale for p-values +log_p_values = -np.log10(p_values) +log_p_values[np.isnan(log_p_values)] = 0. +log_p_values[log_p_values > 10.] = 10. +from nilearn.plotting import plot_stat_map +plot_stat_map(new_img_like(fmri_img, log_p_values), + mean_img, title="p-values", cut_coords=cut_coords) + +############################################################################## +# Build a mask from this statistical map + +# Thresholding +log_p_values[log_p_values < 5] = 0 +plot_stat_map(new_img_like(fmri_img, log_p_values), + mean_img, title='Thresholded p-values', annotate=False, + colorbar=False, cut_coords=cut_coords) + +############################################################################## +# Binarization and intersection with VT mask +# (intersection corresponds to an "AND conjunction") +bin_p_values = (log_p_values != 0) +mask_vt_filename = haxby_dataset.mask_vt[0] +import nibabel +vt = nibabel.load(mask_vt_filename).get_data().astype(bool) +bin_p_values_and_vt = np.logical_and(bin_p_values, vt) + +from nilearn.plotting import plot_roi, show +plot_roi(new_img_like(fmri_img, bin_p_values_and_vt.astype(np.int)), + mean_img, title='Intersection with ventral temporal mask', + cut_coords=cut_coords) + +############################################################################## +# Dilation +from scipy import ndimage +dil_bin_p_values_and_vt = ndimage.binary_dilation(bin_p_values_and_vt) +plot_roi(new_img_like(fmri_img, dil_bin_p_values_and_vt.astype(np.int)), + mean_img, title='Dilated mask', cut_coords=cut_coords, + annotate=False) + + +############################################################################## +# Identification of connected components +labels, n_labels = ndimage.label(dil_bin_p_values_and_vt) +first_roi_data = (labels == 1).astype(np.int) +second_roi_data = (labels == 2).astype(np.int) +plot_roi(new_img_like(fmri_img, first_roi_data), + mean_img, title='Connected components: first ROI') + +plot_roi(new_img_like(fmri_img, second_roi_data), + mean_img, title='Connected components: second ROI') + + +############################################################################## +# Use the new ROIs to extract data maps in both ROIs +masker = NiftiLabelsMasker( + labels_img=new_img_like(fmri_img, labels), + resampling_target=None, + standardize=False, + detrend=False) +masker.fit() +condition_names = list(set(haxby_labels)) +n_cond_img = fmri_data[..., haxby_labels == b'house'].shape[-1] +n_conds = len(condition_names) + +X1, X2 = np.zeros((n_cond_img, n_conds)), np.zeros((n_cond_img, n_conds)) +for i, cond in enumerate(condition_names): + cond_maps = new_img_like( + fmri_img, fmri_data[..., haxby_labels == cond][..., :n_cond_img]) + mask_data = masker.transform(cond_maps) + X1[:, i], X2[:, i] = mask_data[:, 0], mask_data[:, 1] +condition_names[condition_names.index(b'scrambledpix')] = b'scrambled' + + +############################################################################## +# Plot the average in the different condition names +plt.figure(figsize=(15, 7)) +for i in np.arange(2): + plt.subplot(1, 2, i + 1) + plt.boxplot(X1 if i == 0 else X2) + plt.xticks(np.arange(len(condition_names)) + 1, condition_names, + rotation=25) + plt.title('Boxplots of data in ROI%i per condition' % (i + 1)) + +show() + +# save the ROI 'atlas' to a single output Nifti +nibabel.save(new_img_like(fmri_img, labels), + 'mask_atlas.nii') diff --git a/examples/04_manipulating_images/plot_smooth_mean_image.py b/examples/04_manipulating_images/plot_smooth_mean_image.py new file mode 100644 index 0000000000..1ff7dfc5e8 --- /dev/null +++ b/examples/04_manipulating_images/plot_smooth_mean_image.py @@ -0,0 +1,35 @@ +""" +Smoothing an image +=================== + +Here we smooth a mean EPI image and plot the result + +As we vary the smoothing FWHM, note how we decrease the amount of noise, +but also loose spatial details. In general, the best amount of smoothing +for a given analysis depends on the spatial extent of the effects that +are expected. + +""" + +from nilearn import datasets, plotting, image + +data = datasets.fetch_adhd(n_subjects=1) + +# Print basic information on the dataset +print('First subject functional nifti image (4D) are located at: %s' % + data.func[0]) + +first_epi_file = data.func[0] + +# First the compute the mean image, from the 4D series of image +mean_func = image.mean_img(first_epi_file) + +# Then we smooth, with a varying amount of smoothing, from none to 20mm +# by increments of 5mm +for smoothing in range(0, 25, 5): + smoothed_img = image.smooth_img(mean_func, smoothing) + plotting.plot_epi(smoothed_img, + title="Smoothing %imm" % smoothing) + + +plotting.show() diff --git a/examples/05_advanced/README.txt b/examples/05_advanced/README.txt new file mode 100644 index 0000000000..4d7571adc7 --- /dev/null +++ b/examples/05_advanced/README.txt @@ -0,0 +1,2 @@ +Advanced statistical analysis of brain images +--------------------------------------------- diff --git a/examples/05_advanced/plot_haxby_mass_univariate.py b/examples/05_advanced/plot_haxby_mass_univariate.py new file mode 100644 index 0000000000..0deee4d027 --- /dev/null +++ b/examples/05_advanced/plot_haxby_mass_univariate.py @@ -0,0 +1,173 @@ +""" +Massively univariate analysis of face vs house recognition +========================================================== + +A permuted Ordinary Least Squares algorithm is run at each voxel in +order to detemine whether or not it behaves differently under a "face +viewing" condition and a "house viewing" condition. +We consider the mean image per session and per condition. +Otherwise, the observations cannot be exchanged at random because +a time dependance exists between observations within a same session +(see [1] for more detailed explanations). + +The example shows the small differences that exist between +Bonferroni-corrected p-values and family-wise corrected p-values obtained +from a permutation test combined with a max-type procedure [2]. +Bonferroni correction is a bit conservative, as revealed by the presence of +a few false negative. + +References +---------- +[1] Winkler, A. M. et al. (2014). + Permutation inference for the general linear model. Neuroimage. + +[2] Anderson, M. J. & Robinson, J. (2001). + Permutation tests for linear models. + Australian & New Zealand Journal of Statistics, 43(1), 75-88. + (http://avesbiodiv.mncn.csic.es/estadistica/permut2.pdf) + +""" +# Author: Virgile Fritsch, , Feb. 2014 + +############################################################################## +# Load Haxby dataset +from nilearn import datasets +haxby_dataset = datasets.fetch_haxby_simple() + +# print basic information on the dataset +print('Mask nifti image (3D) is located at: %s' % haxby_dataset.mask) +print('Functional nifti image (4D) is located at: %s' % haxby_dataset.func[0]) + +############################################################################## +# Mask data +mask_filename = haxby_dataset.mask +from nilearn.input_data import NiftiMasker +nifti_masker = NiftiMasker( + mask_img=mask_filename, + memory='nilearn_cache', memory_level=1) # cache options +func_filename = haxby_dataset.func[0] +fmri_masked = nifti_masker.fit_transform(func_filename) + +############################################################################## +# Restrict to faces and houses +import numpy as np +conditions_encoded, sessions = np.loadtxt( + haxby_dataset.session_target[0]).astype("int").T +conditions = np.recfromtxt(haxby_dataset.conditions_target[0])['f0'] +condition_mask = np.logical_or(conditions == b'face', conditions == b'house') +conditions_encoded = conditions_encoded[condition_mask] +fmri_masked = fmri_masked[condition_mask] + +# We consider the mean image per session and per condition. +# Otherwise, the observations cannot be exchanged at random because +# a time dependence exists between observations within a same session. +n_sessions = np.unique(sessions).size +grouped_fmri_masked = np.empty((2 * n_sessions, # two conditions per session + fmri_masked.shape[1])) +grouped_conditions_encoded = np.empty((2 * n_sessions, 1)) + +for s in range(n_sessions): + session_mask = sessions[condition_mask] == s + session_house_mask = np.logical_and(session_mask, + conditions[condition_mask] == b'house') + session_face_mask = np.logical_and(session_mask, + conditions[condition_mask] == b'face') + grouped_fmri_masked[2 * s] = fmri_masked[session_house_mask].mean(0) + grouped_fmri_masked[2 * s + 1] = fmri_masked[session_face_mask].mean(0) + grouped_conditions_encoded[2 * s] = conditions_encoded[ + session_house_mask][0] + grouped_conditions_encoded[2 * s + 1] = conditions_encoded[ + session_face_mask][0] + +############################################################################## +# Perform massively univariate analysis with permuted OLS +# +# We use a two-sided t-test to compute p-values, but we keep trace of the +# effect sign to add it back at the end and thus observe the signed effect +from nilearn.mass_univariate import permuted_ols +neg_log_pvals, t_scores_original_data, _ = permuted_ols( + grouped_conditions_encoded, grouped_fmri_masked, + # + intercept as a covariate by default + n_perm=10000, two_sided_test=True, + n_jobs=1) # can be changed to use more CPUs +signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data) +signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform( + signed_neg_log_pvals) + +############################################################################## +# scikit-learn F-scores for comparison +# +# F-test does not allow to observe the effect sign (pure two-sided test) +from nilearn._utils.fixes import f_regression +_, pvals_bonferroni = f_regression( + grouped_fmri_masked, + grouped_conditions_encoded) # f_regression implicitly adds intercept +pvals_bonferroni *= fmri_masked.shape[1] +pvals_bonferroni[np.isnan(pvals_bonferroni)] = 1 +pvals_bonferroni[pvals_bonferroni > 1] = 1 +neg_log_pvals_bonferroni = -np.log10(pvals_bonferroni) +neg_log_pvals_bonferroni_unmasked = nifti_masker.inverse_transform( + neg_log_pvals_bonferroni) + +############################################################################## +# Visualization +import matplotlib.pyplot as plt +from nilearn.plotting import plot_stat_map, show + +# Use the fmri mean image as a surrogate of anatomical data +from nilearn import image +mean_fmri_img = image.mean_img(func_filename) + +# Various plotting parameters +z_slice = -17 # plotted slice +from nilearn.image.resampling import coord_transform +affine = signed_neg_log_pvals_unmasked.get_affine() +from scipy import linalg +_, _, k_slice = coord_transform(0, 0, z_slice, + linalg.inv(affine)) +k_slice = np.round(k_slice) + +threshold = -np.log10(0.1) # 10% corrected + +vmax = min(signed_neg_log_pvals.max(), + neg_log_pvals_bonferroni.max()) + +# Plot thresholded p-values map corresponding to F-scores +fig = plt.figure(figsize=(4, 5.5), facecolor='k') + +display = plot_stat_map(neg_log_pvals_bonferroni_unmasked, mean_fmri_img, + threshold=threshold, cmap=plt.cm.RdBu_r, + display_mode='z', cut_coords=[z_slice], + figure=fig, vmax=vmax) + +neg_log_pvals_bonferroni_data = neg_log_pvals_bonferroni_unmasked.get_data() +neg_log_pvals_bonferroni_slice_data = \ + neg_log_pvals_bonferroni_data[..., k_slice] +n_detections = (neg_log_pvals_bonferroni_slice_data > threshold).sum() +title = ('Negative $\log_{10}$ p-values' + '\n(Parametric two-sided F-test' + '\n+ Bonferroni correction)' + '\n%d detections') % n_detections + +display.title(title, y=1.1) + +# Plot permutation p-values map +fig = plt.figure(figsize=(4, 5.5), facecolor='k') + +display = plot_stat_map(signed_neg_log_pvals_unmasked, mean_fmri_img, + threshold=threshold, cmap=plt.cm.RdBu_r, + display_mode='z', cut_coords=[z_slice], + figure=fig, vmax=vmax) + +signed_neg_log_pvals_data = signed_neg_log_pvals_unmasked.get_data() +signed_neg_log_pvals_slice_data = \ + signed_neg_log_pvals_data[..., k_slice, 0] +n_detections = (np.abs(signed_neg_log_pvals_slice_data) > threshold).sum() +title = ('Negative $\log_{10}$ p-values' + '\n(Non-parametric two-sided test' + '\n+ max-type correction)' + '\n%d detections') % n_detections + +display.title(title, y=1.1) + +show() diff --git a/examples/05_advanced/plot_ica_resting_state.py b/examples/05_advanced/plot_ica_resting_state.py new file mode 100644 index 0000000000..0aa7f501c4 --- /dev/null +++ b/examples/05_advanced/plot_ica_resting_state.py @@ -0,0 +1,81 @@ +""" +Multivariate decompositions: Independent component analysis of fMRI +=================================================================== + + +This example is meant to demonstrate nilearn as a low-level tools used to +combine feature extraction with a multivariate decomposition algorithm +for resting state. + +This example is a toy. To apply ICA to resting-state data, it is advised +to look at the example +:ref:`sphx_glr_auto_examples_03_connectivity_plot_canica_resting_state.py`. + +The example here applies the scikit-learn ICA to resting-state data. +Note that following the code in the example, any unsupervised +decomposition model, or other latent-factor models, can be applied to +the data, as the scikit-learn API enables to exchange them as almost +black box (though the relevant parameter for brain maps might no longer +be given by a call to fit_transform). + +""" + +### Load nyu_rest dataset ##################################################### +from nilearn import datasets +# Here we use only 3 subjects to get faster-running code. For better +# results, simply increase this number +# XXX: must get the code to run for more than 1 subject +nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) +func_filename = nyu_dataset.func[0] + +# print basic information on the dataset +print('First subject anatomical nifti image (3D) is at: %s' % + nyu_dataset.anat_anon[0]) +print('First subject functional nifti image (4D) is at: %s' % + nyu_dataset.func[0]) # 4D data + +### Preprocess ################################################################ +from nilearn.input_data import NiftiMasker + +# This is resting-state data: the background has not been removed yet, +# thus we need to use mask_strategy='epi' to compute the mask from the +# EPI images +masker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1, + mask_strategy='epi', standardize=False) +data_masked = masker.fit_transform(func_filename) + +# Concatenate all the subjects +# fmri_data = np.concatenate(data_masked, axis=1) +fmri_data = data_masked + + +### Apply ICA ################################################################# + +from sklearn.decomposition import FastICA +n_components = 20 +ica = FastICA(n_components=n_components, random_state=42) +components_masked = ica.fit_transform(data_masked.T).T + +# Normalize estimated components, for thresholding to make sense +components_masked -= components_masked.mean(axis=0) +components_masked /= components_masked.std(axis=0) +# Threshold +components_masked[components_masked < .8] = 0 + +# Now invert the masking operation, going back to a full 3D +# representation +component_img = masker.inverse_transform(components_masked) + +### Visualize the results ##################################################### +# Show some interesting components +from nilearn import image +from nilearn.plotting import plot_stat_map, show + +# Use the mean as a background +mean_img = image.mean_img(func_filename) + +plot_stat_map(image.index_img(component_img, 5), mean_img) + +plot_stat_map(image.index_img(component_img, 12), mean_img) + +show() diff --git a/examples/05_advanced/plot_localizer_mass_univariate_methods.py b/examples/05_advanced/plot_localizer_mass_univariate_methods.py new file mode 100644 index 0000000000..51631af342 --- /dev/null +++ b/examples/05_advanced/plot_localizer_mass_univariate_methods.py @@ -0,0 +1,122 @@ +""" +Massively univariate analysis of a motor task from the Localizer dataset +======================================================================== + +This example shows the results obtained in a massively univariate +analysis performed at the inter-subject level with various methods. +We use the [left button press (auditory cue)] task from the Localizer +dataset and seek association between the contrast values and a variate +that measures the speed of pseudo-word reading. No confounding variate +is included in the model. + +1. A standard Anova is performed. Data smoothed at 5 voxels FWHM are used. + +2. A permuted Ordinary Least Squares algorithm is run at each voxel. Data + smoothed at 5 voxels FWHM are used. + + +""" +# Author: Virgile Fritsch, , May. 2014 +import numpy as np +from scipy import linalg +import matplotlib.pyplot as plt +from nilearn import datasets +from nilearn.input_data import NiftiMasker +from nilearn.mass_univariate import permuted_ols + +### Load Localizer contrast ################################################### +n_samples = 94 +localizer_dataset = datasets.fetch_localizer_contrasts( + ['left button press (auditory cue)'], n_subjects=n_samples) + +# print basic information on the dataset +print('First contrast nifti image (3D) is located at: %s' % + localizer_dataset.cmaps[0]) + +tested_var = localizer_dataset.ext_vars['pseudo'] +# Quality check / Remove subjects with bad tested variate +mask_quality_check = np.where(tested_var != b'None')[0] +n_samples = mask_quality_check.size +contrast_map_filenames = [localizer_dataset.cmaps[i] + for i in mask_quality_check] +tested_var = tested_var[mask_quality_check].astype(float).reshape((-1, 1)) +print("Actual number of subjects after quality check: %d" % n_samples) + +### Mask data ################################################################# +nifti_masker = NiftiMasker( + smoothing_fwhm=5, + memory='nilearn_cache', memory_level=1) # cache options +fmri_masked = nifti_masker.fit_transform(contrast_map_filenames) + +### Anova (parametric F-scores) ############################################### +from nilearn._utils.fixes import f_regression +_, pvals_anova = f_regression(fmri_masked, tested_var, center=True) +pvals_anova *= fmri_masked.shape[1] +pvals_anova[np.isnan(pvals_anova)] = 1 +pvals_anova[pvals_anova > 1] = 1 +neg_log_pvals_anova = - np.log10(pvals_anova) +neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform( + neg_log_pvals_anova) + +### Perform massively univariate analysis with permuted OLS ################### +neg_log_pvals_permuted_ols, _, _ = permuted_ols( + tested_var, fmri_masked, + model_intercept=True, + n_perm=5000, # 5,000 for the sake of time. Idealy, this should be 10,000 + n_jobs=1) # can be changed to use more CPUs +neg_log_pvals_permuted_ols_unmasked = nifti_masker.inverse_transform( + np.ravel(neg_log_pvals_permuted_ols)) + +### Visualization ############################################################# +from nilearn.plotting import plot_stat_map, show + +# Various plotting parameters +z_slice = 12 # plotted slice +from nilearn.image.resampling import coord_transform +affine = neg_log_pvals_anova_unmasked.get_affine() +_, _, k_slice = coord_transform(0, 0, z_slice, + linalg.inv(affine)) +k_slice = np.round(k_slice) + +threshold = - np.log10(0.1) # 10% corrected +vmax = min(np.amax(neg_log_pvals_permuted_ols), + np.amax(neg_log_pvals_anova)) + +# Plot Anova p-values +fig = plt.figure(figsize=(5, 7), facecolor='k') + +display = plot_stat_map(neg_log_pvals_anova_unmasked, + threshold=threshold, cmap=plt.cm.autumn, + display_mode='z', cut_coords=[z_slice], + figure=fig, vmax=vmax, black_bg=True) + +neg_log_pvals_anova_data = neg_log_pvals_anova_unmasked.get_data() +neg_log_pvals_anova_slice_data = \ + neg_log_pvals_anova_data[..., k_slice] +n_detections = (neg_log_pvals_anova_slice_data > threshold).sum() +title = ('Negative $\log_{10}$ p-values' + '\n(Parametric + Bonferroni correction)' + '\n%d detections') % n_detections + +display.title(title, y=1.2) + +# Plot permuted OLS p-values +fig = plt.figure(figsize=(5, 7), facecolor='k') + +display = plot_stat_map(neg_log_pvals_permuted_ols_unmasked, + threshold=threshold, cmap=plt.cm.autumn, + display_mode='z', cut_coords=[z_slice], + figure=fig, vmax=vmax, black_bg=True) + +neg_log_pvals_permuted_ols_data = \ + neg_log_pvals_permuted_ols_unmasked.get_data() +neg_log_pvals_permuted_ols_slice_data = \ + neg_log_pvals_permuted_ols_data[..., k_slice] +n_detections = (neg_log_pvals_permuted_ols_slice_data > threshold).sum() +title = ('Negative $\log_{10}$ p-values' + '\n(Non-parametric + max-type correction)' + '\n%d detections') % n_detections + +display.title(title, y=1.2) + +show() From 92eac759cb88af60b26bf49b89033137276bf0a0 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 11 Jan 2016 16:06:09 +0100 Subject: [PATCH 0043/1925] Removing files according to change directories comment --- examples/advanced/README.txt | 2 - .../advanced/plot_haxby_mass_univariate.py | 173 ------------------ examples/advanced/plot_ica_resting_state.py | 81 -------- .../plot_localizer_mass_univariate_methods.py | 122 ------------ .../plot_extract_regions_canica_maps.py | 149 --------------- examples/manipulating_images/README.txt | 4 - .../plot_affine_transformation.py | 128 ------------- .../plot_extract_rois_smith_atlas.py | 57 ------ .../plot_extract_rois_statistical_maps.py | 74 -------- .../plot_mask_computation.py | 101 ---------- .../manipulating_images/plot_nifti_simple.py | 66 ------- .../plot_roi_extraction.py | 151 --------------- .../plot_smooth_mean_image.py | 35 ---- examples/plotting/README.txt | 4 - examples/plotting/plot_atlas.py | 17 -- examples/plotting/plot_demo_glass_brain.py | 29 --- .../plot_demo_glass_brain_extensive.py | 38 ---- examples/plotting/plot_demo_more_plotting.py | 94 ---------- examples/plotting/plot_demo_plotting.py | 62 ------- examples/plotting/plot_dim_plotting.py | 54 ------ examples/plotting/plot_haxby_masks.py | 55 ------ examples/plotting/plot_overlay.py | 41 ----- examples/plotting/plot_prob_atlas.py | 54 ------ examples/plotting/plot_visualization.py | 60 ------ 24 files changed, 1651 deletions(-) delete mode 100644 examples/advanced/README.txt delete mode 100644 examples/advanced/plot_haxby_mass_univariate.py delete mode 100644 examples/advanced/plot_ica_resting_state.py delete mode 100644 examples/advanced/plot_localizer_mass_univariate_methods.py delete mode 100644 examples/connectivity/plot_extract_regions_canica_maps.py delete mode 100644 examples/manipulating_images/README.txt delete mode 100644 examples/manipulating_images/plot_affine_transformation.py delete mode 100644 examples/manipulating_images/plot_extract_rois_smith_atlas.py delete mode 100644 examples/manipulating_images/plot_extract_rois_statistical_maps.py delete mode 100644 examples/manipulating_images/plot_mask_computation.py delete mode 100644 examples/manipulating_images/plot_nifti_simple.py delete mode 100644 examples/manipulating_images/plot_roi_extraction.py delete mode 100644 examples/manipulating_images/plot_smooth_mean_image.py delete mode 100644 examples/plotting/README.txt delete mode 100644 examples/plotting/plot_atlas.py delete mode 100644 examples/plotting/plot_demo_glass_brain.py delete mode 100644 examples/plotting/plot_demo_glass_brain_extensive.py delete mode 100644 examples/plotting/plot_demo_more_plotting.py delete mode 100644 examples/plotting/plot_demo_plotting.py delete mode 100644 examples/plotting/plot_dim_plotting.py delete mode 100644 examples/plotting/plot_haxby_masks.py delete mode 100644 examples/plotting/plot_overlay.py delete mode 100644 examples/plotting/plot_prob_atlas.py delete mode 100644 examples/plotting/plot_visualization.py diff --git a/examples/advanced/README.txt b/examples/advanced/README.txt deleted file mode 100644 index 4d7571adc7..0000000000 --- a/examples/advanced/README.txt +++ /dev/null @@ -1,2 +0,0 @@ -Advanced statistical analysis of brain images ---------------------------------------------- diff --git a/examples/advanced/plot_haxby_mass_univariate.py b/examples/advanced/plot_haxby_mass_univariate.py deleted file mode 100644 index 0deee4d027..0000000000 --- a/examples/advanced/plot_haxby_mass_univariate.py +++ /dev/null @@ -1,173 +0,0 @@ -""" -Massively univariate analysis of face vs house recognition -========================================================== - -A permuted Ordinary Least Squares algorithm is run at each voxel in -order to detemine whether or not it behaves differently under a "face -viewing" condition and a "house viewing" condition. -We consider the mean image per session and per condition. -Otherwise, the observations cannot be exchanged at random because -a time dependance exists between observations within a same session -(see [1] for more detailed explanations). - -The example shows the small differences that exist between -Bonferroni-corrected p-values and family-wise corrected p-values obtained -from a permutation test combined with a max-type procedure [2]. -Bonferroni correction is a bit conservative, as revealed by the presence of -a few false negative. - -References ----------- -[1] Winkler, A. M. et al. (2014). - Permutation inference for the general linear model. Neuroimage. - -[2] Anderson, M. J. & Robinson, J. (2001). - Permutation tests for linear models. - Australian & New Zealand Journal of Statistics, 43(1), 75-88. - (http://avesbiodiv.mncn.csic.es/estadistica/permut2.pdf) - -""" -# Author: Virgile Fritsch, , Feb. 2014 - -############################################################################## -# Load Haxby dataset -from nilearn import datasets -haxby_dataset = datasets.fetch_haxby_simple() - -# print basic information on the dataset -print('Mask nifti image (3D) is located at: %s' % haxby_dataset.mask) -print('Functional nifti image (4D) is located at: %s' % haxby_dataset.func[0]) - -############################################################################## -# Mask data -mask_filename = haxby_dataset.mask -from nilearn.input_data import NiftiMasker -nifti_masker = NiftiMasker( - mask_img=mask_filename, - memory='nilearn_cache', memory_level=1) # cache options -func_filename = haxby_dataset.func[0] -fmri_masked = nifti_masker.fit_transform(func_filename) - -############################################################################## -# Restrict to faces and houses -import numpy as np -conditions_encoded, sessions = np.loadtxt( - haxby_dataset.session_target[0]).astype("int").T -conditions = np.recfromtxt(haxby_dataset.conditions_target[0])['f0'] -condition_mask = np.logical_or(conditions == b'face', conditions == b'house') -conditions_encoded = conditions_encoded[condition_mask] -fmri_masked = fmri_masked[condition_mask] - -# We consider the mean image per session and per condition. -# Otherwise, the observations cannot be exchanged at random because -# a time dependence exists between observations within a same session. -n_sessions = np.unique(sessions).size -grouped_fmri_masked = np.empty((2 * n_sessions, # two conditions per session - fmri_masked.shape[1])) -grouped_conditions_encoded = np.empty((2 * n_sessions, 1)) - -for s in range(n_sessions): - session_mask = sessions[condition_mask] == s - session_house_mask = np.logical_and(session_mask, - conditions[condition_mask] == b'house') - session_face_mask = np.logical_and(session_mask, - conditions[condition_mask] == b'face') - grouped_fmri_masked[2 * s] = fmri_masked[session_house_mask].mean(0) - grouped_fmri_masked[2 * s + 1] = fmri_masked[session_face_mask].mean(0) - grouped_conditions_encoded[2 * s] = conditions_encoded[ - session_house_mask][0] - grouped_conditions_encoded[2 * s + 1] = conditions_encoded[ - session_face_mask][0] - -############################################################################## -# Perform massively univariate analysis with permuted OLS -# -# We use a two-sided t-test to compute p-values, but we keep trace of the -# effect sign to add it back at the end and thus observe the signed effect -from nilearn.mass_univariate import permuted_ols -neg_log_pvals, t_scores_original_data, _ = permuted_ols( - grouped_conditions_encoded, grouped_fmri_masked, - # + intercept as a covariate by default - n_perm=10000, two_sided_test=True, - n_jobs=1) # can be changed to use more CPUs -signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data) -signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform( - signed_neg_log_pvals) - -############################################################################## -# scikit-learn F-scores for comparison -# -# F-test does not allow to observe the effect sign (pure two-sided test) -from nilearn._utils.fixes import f_regression -_, pvals_bonferroni = f_regression( - grouped_fmri_masked, - grouped_conditions_encoded) # f_regression implicitly adds intercept -pvals_bonferroni *= fmri_masked.shape[1] -pvals_bonferroni[np.isnan(pvals_bonferroni)] = 1 -pvals_bonferroni[pvals_bonferroni > 1] = 1 -neg_log_pvals_bonferroni = -np.log10(pvals_bonferroni) -neg_log_pvals_bonferroni_unmasked = nifti_masker.inverse_transform( - neg_log_pvals_bonferroni) - -############################################################################## -# Visualization -import matplotlib.pyplot as plt -from nilearn.plotting import plot_stat_map, show - -# Use the fmri mean image as a surrogate of anatomical data -from nilearn import image -mean_fmri_img = image.mean_img(func_filename) - -# Various plotting parameters -z_slice = -17 # plotted slice -from nilearn.image.resampling import coord_transform -affine = signed_neg_log_pvals_unmasked.get_affine() -from scipy import linalg -_, _, k_slice = coord_transform(0, 0, z_slice, - linalg.inv(affine)) -k_slice = np.round(k_slice) - -threshold = -np.log10(0.1) # 10% corrected - -vmax = min(signed_neg_log_pvals.max(), - neg_log_pvals_bonferroni.max()) - -# Plot thresholded p-values map corresponding to F-scores -fig = plt.figure(figsize=(4, 5.5), facecolor='k') - -display = plot_stat_map(neg_log_pvals_bonferroni_unmasked, mean_fmri_img, - threshold=threshold, cmap=plt.cm.RdBu_r, - display_mode='z', cut_coords=[z_slice], - figure=fig, vmax=vmax) - -neg_log_pvals_bonferroni_data = neg_log_pvals_bonferroni_unmasked.get_data() -neg_log_pvals_bonferroni_slice_data = \ - neg_log_pvals_bonferroni_data[..., k_slice] -n_detections = (neg_log_pvals_bonferroni_slice_data > threshold).sum() -title = ('Negative $\log_{10}$ p-values' - '\n(Parametric two-sided F-test' - '\n+ Bonferroni correction)' - '\n%d detections') % n_detections - -display.title(title, y=1.1) - -# Plot permutation p-values map -fig = plt.figure(figsize=(4, 5.5), facecolor='k') - -display = plot_stat_map(signed_neg_log_pvals_unmasked, mean_fmri_img, - threshold=threshold, cmap=plt.cm.RdBu_r, - display_mode='z', cut_coords=[z_slice], - figure=fig, vmax=vmax) - -signed_neg_log_pvals_data = signed_neg_log_pvals_unmasked.get_data() -signed_neg_log_pvals_slice_data = \ - signed_neg_log_pvals_data[..., k_slice, 0] -n_detections = (np.abs(signed_neg_log_pvals_slice_data) > threshold).sum() -title = ('Negative $\log_{10}$ p-values' - '\n(Non-parametric two-sided test' - '\n+ max-type correction)' - '\n%d detections') % n_detections - -display.title(title, y=1.1) - -show() diff --git a/examples/advanced/plot_ica_resting_state.py b/examples/advanced/plot_ica_resting_state.py deleted file mode 100644 index 69a994df73..0000000000 --- a/examples/advanced/plot_ica_resting_state.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Multivariate decompositions: Independent component analysis of fMRI -=================================================================== - - -This example is meant to demonstrate nilearn as a low-level tools used to -combine feature extraction with a multivariate decomposition algorithm -for resting state. - -This example is a toy. To apply ICA to resting-state data, it is advised -to look at the example -:ref:`sphx_glr_auto_examples_connectivity_plot_canica_resting_state.py`. - -The example here applies the scikit-learn ICA to resting-state data. -Note that following the code in the example, any unsupervised -decomposition model, or other latent-factor models, can be applied to -the data, as the scikit-learn API enables to exchange them as almost -black box (though the relevant parameter for brain maps might no longer -be given by a call to fit_transform). - -""" - -### Load nyu_rest dataset ##################################################### -from nilearn import datasets -# Here we use only 3 subjects to get faster-running code. For better -# results, simply increase this number -# XXX: must get the code to run for more than 1 subject -nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) -func_filename = nyu_dataset.func[0] - -# print basic information on the dataset -print('First subject anatomical nifti image (3D) is at: %s' % - nyu_dataset.anat_anon[0]) -print('First subject functional nifti image (4D) is at: %s' % - nyu_dataset.func[0]) # 4D data - -### Preprocess ################################################################ -from nilearn.input_data import NiftiMasker - -# This is resting-state data: the background has not been removed yet, -# thus we need to use mask_strategy='epi' to compute the mask from the -# EPI images -masker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1, - mask_strategy='epi', standardize=False) -data_masked = masker.fit_transform(func_filename) - -# Concatenate all the subjects -# fmri_data = np.concatenate(data_masked, axis=1) -fmri_data = data_masked - - -### Apply ICA ################################################################# - -from sklearn.decomposition import FastICA -n_components = 20 -ica = FastICA(n_components=n_components, random_state=42) -components_masked = ica.fit_transform(data_masked.T).T - -# Normalize estimated components, for thresholding to make sense -components_masked -= components_masked.mean(axis=0) -components_masked /= components_masked.std(axis=0) -# Threshold -components_masked[components_masked < .8] = 0 - -# Now invert the masking operation, going back to a full 3D -# representation -component_img = masker.inverse_transform(components_masked) - -### Visualize the results ##################################################### -# Show some interesting components -from nilearn import image -from nilearn.plotting import plot_stat_map, show - -# Use the mean as a background -mean_img = image.mean_img(func_filename) - -plot_stat_map(image.index_img(component_img, 5), mean_img) - -plot_stat_map(image.index_img(component_img, 12), mean_img) - -show() diff --git a/examples/advanced/plot_localizer_mass_univariate_methods.py b/examples/advanced/plot_localizer_mass_univariate_methods.py deleted file mode 100644 index 51631af342..0000000000 --- a/examples/advanced/plot_localizer_mass_univariate_methods.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -Massively univariate analysis of a motor task from the Localizer dataset -======================================================================== - -This example shows the results obtained in a massively univariate -analysis performed at the inter-subject level with various methods. -We use the [left button press (auditory cue)] task from the Localizer -dataset and seek association between the contrast values and a variate -that measures the speed of pseudo-word reading. No confounding variate -is included in the model. - -1. A standard Anova is performed. Data smoothed at 5 voxels FWHM are used. - -2. A permuted Ordinary Least Squares algorithm is run at each voxel. Data - smoothed at 5 voxels FWHM are used. - - -""" -# Author: Virgile Fritsch, , May. 2014 -import numpy as np -from scipy import linalg -import matplotlib.pyplot as plt -from nilearn import datasets -from nilearn.input_data import NiftiMasker -from nilearn.mass_univariate import permuted_ols - -### Load Localizer contrast ################################################### -n_samples = 94 -localizer_dataset = datasets.fetch_localizer_contrasts( - ['left button press (auditory cue)'], n_subjects=n_samples) - -# print basic information on the dataset -print('First contrast nifti image (3D) is located at: %s' % - localizer_dataset.cmaps[0]) - -tested_var = localizer_dataset.ext_vars['pseudo'] -# Quality check / Remove subjects with bad tested variate -mask_quality_check = np.where(tested_var != b'None')[0] -n_samples = mask_quality_check.size -contrast_map_filenames = [localizer_dataset.cmaps[i] - for i in mask_quality_check] -tested_var = tested_var[mask_quality_check].astype(float).reshape((-1, 1)) -print("Actual number of subjects after quality check: %d" % n_samples) - -### Mask data ################################################################# -nifti_masker = NiftiMasker( - smoothing_fwhm=5, - memory='nilearn_cache', memory_level=1) # cache options -fmri_masked = nifti_masker.fit_transform(contrast_map_filenames) - -### Anova (parametric F-scores) ############################################### -from nilearn._utils.fixes import f_regression -_, pvals_anova = f_regression(fmri_masked, tested_var, center=True) -pvals_anova *= fmri_masked.shape[1] -pvals_anova[np.isnan(pvals_anova)] = 1 -pvals_anova[pvals_anova > 1] = 1 -neg_log_pvals_anova = - np.log10(pvals_anova) -neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform( - neg_log_pvals_anova) - -### Perform massively univariate analysis with permuted OLS ################### -neg_log_pvals_permuted_ols, _, _ = permuted_ols( - tested_var, fmri_masked, - model_intercept=True, - n_perm=5000, # 5,000 for the sake of time. Idealy, this should be 10,000 - n_jobs=1) # can be changed to use more CPUs -neg_log_pvals_permuted_ols_unmasked = nifti_masker.inverse_transform( - np.ravel(neg_log_pvals_permuted_ols)) - -### Visualization ############################################################# -from nilearn.plotting import plot_stat_map, show - -# Various plotting parameters -z_slice = 12 # plotted slice -from nilearn.image.resampling import coord_transform -affine = neg_log_pvals_anova_unmasked.get_affine() -_, _, k_slice = coord_transform(0, 0, z_slice, - linalg.inv(affine)) -k_slice = np.round(k_slice) - -threshold = - np.log10(0.1) # 10% corrected -vmax = min(np.amax(neg_log_pvals_permuted_ols), - np.amax(neg_log_pvals_anova)) - -# Plot Anova p-values -fig = plt.figure(figsize=(5, 7), facecolor='k') - -display = plot_stat_map(neg_log_pvals_anova_unmasked, - threshold=threshold, cmap=plt.cm.autumn, - display_mode='z', cut_coords=[z_slice], - figure=fig, vmax=vmax, black_bg=True) - -neg_log_pvals_anova_data = neg_log_pvals_anova_unmasked.get_data() -neg_log_pvals_anova_slice_data = \ - neg_log_pvals_anova_data[..., k_slice] -n_detections = (neg_log_pvals_anova_slice_data > threshold).sum() -title = ('Negative $\log_{10}$ p-values' - '\n(Parametric + Bonferroni correction)' - '\n%d detections') % n_detections - -display.title(title, y=1.2) - -# Plot permuted OLS p-values -fig = plt.figure(figsize=(5, 7), facecolor='k') - -display = plot_stat_map(neg_log_pvals_permuted_ols_unmasked, - threshold=threshold, cmap=plt.cm.autumn, - display_mode='z', cut_coords=[z_slice], - figure=fig, vmax=vmax, black_bg=True) - -neg_log_pvals_permuted_ols_data = \ - neg_log_pvals_permuted_ols_unmasked.get_data() -neg_log_pvals_permuted_ols_slice_data = \ - neg_log_pvals_permuted_ols_data[..., k_slice] -n_detections = (neg_log_pvals_permuted_ols_slice_data > threshold).sum() -title = ('Negative $\log_{10}$ p-values' - '\n(Non-parametric + max-type correction)' - '\n%d detections') % n_detections - -display.title(title, y=1.2) - -show() diff --git a/examples/connectivity/plot_extract_regions_canica_maps.py b/examples/connectivity/plot_extract_regions_canica_maps.py deleted file mode 100644 index 023131c453..0000000000 --- a/examples/connectivity/plot_extract_regions_canica_maps.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Regions extraction using Canonical ICA maps and functional connectomes -====================================================================== - -This example shows how to use :class:`nilearn.regions.RegionExtractor` -to extract connected brain regions from whole brain ICA maps and -use them to estimate a connectome. - -We used 20 resting state ADHD functional datasets from :func:`nilearn.datasets.fetch_adhd` -and :class:`nilearn.decomposition.CanICA` for whole brain ICA maps. - -Please see the related documentation of :class:`nilearn.regions.RegionExtractor` -for more details. -""" - -################################################################################ -# Fetching ADHD resting state functional datasets by loading from datasets -# utilities -from nilearn import datasets - -adhd_dataset = datasets.fetch_adhd(n_subjects=20) -func_filenames = adhd_dataset.func -confounds = adhd_dataset.confounds - -################################################################################ -# Canonical ICA decomposition of functional datasets by importing CanICA from -# decomposition module -from nilearn.decomposition import CanICA - -# Initialize canica parameters -canica = CanICA(n_components=5, smoothing_fwhm=6., - memory="nilearn_cache", memory_level=2, - random_state=0) -# Fit to the data -canica.fit(func_filenames) -# ICA maps -components_img = canica.masker_.inverse_transform(canica.components_) - -# Visualization -# Show ICA maps by using plotting utilities -from nilearn import plotting - -plotting.plot_prob_atlas(components_img, view_type='filled_contours', - title='ICA components') - -################################################################################ -# Extracting regions from ICA maps and then timeseries signals from those -# regions, both can be done by importing Region Extractor from regions module. -# threshold=0.5 indicates that we keep nominal of amount nonzero voxels across all -# maps, less the threshold means that more intense non-voxels will be survived. -from nilearn.regions import RegionExtractor - -extractor = RegionExtractor(components_img, threshold=0.5, - thresholding_strategy='ratio_n_voxels', - extractor='local_regions', - standardize=True, min_region_size=1350) -# Just call fit() to process for regions extraction -extractor.fit() -# Extracted regions are stored in regions_img_ -regions_extracted_img = extractor.regions_img_ -# Each region index is stored in index_ -regions_index = extractor.index_ -# Total number of regions extracted -n_regions_extracted = regions_extracted_img.shape[-1] - -# Visualization -# Show region extraction results -title = ('%d regions are extracted from %d ICA components.' - '\nEach separate color of region indicates extracted region' - % (n_regions_extracted, 5)) -plotting.plot_prob_atlas(regions_extracted_img, view_type='filled_contours', - title=title) - -################################################################################ -# Computing correlation coefficients -# First we need to do subjects timeseries signals extraction and then estimating -# correlation matrices on those signals. -# To extract timeseries signals, we call transform() from RegionExtractor object -# onto each subject functional data stored in func_filenames. -# To estimate correlation matrices we import connectome utilities from nilearn -from nilearn.connectome import ConnectivityMeasure - -correlations = [] -# Initializing ConnectivityMeasure object with kind='correlation' -connectome_measure = ConnectivityMeasure(kind='correlation') -for filename, confound in zip(func_filenames, confounds): - # call transform from RegionExtractor object to extract timeseries signals - timeseries_each_subject = extractor.transform(filename, confounds=confound) - # call fit_transform from ConnectivityMeasure object - correlation = connectome_measure.fit_transform([timeseries_each_subject]) - # saving each subject correlation to correlations - correlations.append(correlation) - -# Mean of all correlations -import numpy as np - -mean_correlations = np.mean(correlations, axis=0).reshape(n_regions_extracted, - n_regions_extracted) - -# Visualization -# Showing mean correlation results -# Import image utilities in utilising to operate on 4th dimension -import matplotlib.pyplot as plt -from nilearn import image - -regions_imgs = image.iter_img(regions_extracted_img) -coords_connectome = [plotting.find_xyz_cut_coords(img) for img in regions_imgs] -title = 'Correlation interactions between %d regions' % n_regions_extracted -plt.figure() -plt.imshow(mean_correlations, interpolation="nearest", - vmax=1, vmin=-1, cmap=plt.cm.bwr) -plt.colorbar() -plt.title(title) -plotting.plot_connectome(mean_correlations, coords_connectome, - edge_threshold='90%', title=title) - -################################################################################ -# Showing Default Mode Network (DMN) regions before and after region extraction -# by manually identifying the index of DMN in ICA decomposed components -from nilearn._utils.compat import izip - -# First we plot DMN without region extraction, interested in only index=[3] -img = image.index_img(components_img, 3) -coords = plotting.find_xyz_cut_coords(img) -display = plotting.plot_stat_map(img, cut_coords=((0, -52, 29)), - colorbar=False, title='ICA map: DMN mode') - -# Now, we plot DMN after region extraction to show that connected regions are -# nicely separated. Each brain extracted region is indicated with separate color - -# For this, we take the indices of the all regions extracted related to original -# ICA map 3. -regions_indices_of_map3 = np.where(np.array(regions_index) == 3) - -display = plotting.plot_anat(cut_coords=((0, -52, 29)), title='Extracted regions in DMN mode') - -# Now add as an overlay by looping over all the regions for right -# temporoparietal function, posterior cingulate cortex, medial prefrontal -# cortex, left temporoparietal junction -color_list = [[0., 1., 0.29, 1.], [0., 1., 0.54, 1.], - [0., 1., 0.78, 1.], [0., 0.96, 1., 1.], - [0., 0.73, 1., 1.], [0., 0.47, 1., 1.], - [0., 0.22, 1., 1.], [0.01, 0., 1., 1.], - [0.26, 0., 1., 1.]] -for each_index_of_map3, color in izip(regions_indices_of_map3[0], color_list): - display.add_overlay(image.index_img(regions_extracted_img, each_index_of_map3), - cmap=plotting.cm.alpha_cmap(color)) - -plotting.show() diff --git a/examples/manipulating_images/README.txt b/examples/manipulating_images/README.txt deleted file mode 100644 index 3e9090f5fa..0000000000 --- a/examples/manipulating_images/README.txt +++ /dev/null @@ -1,4 +0,0 @@ -Manipulating brain image volumes --------------------------------- - -See :ref:`data_manipulation` for more details. diff --git a/examples/manipulating_images/plot_affine_transformation.py b/examples/manipulating_images/plot_affine_transformation.py deleted file mode 100644 index d3cd88d2e0..0000000000 --- a/examples/manipulating_images/plot_affine_transformation.py +++ /dev/null @@ -1,128 +0,0 @@ -""" -Visualization of affine resamplings -=================================== - -This example shows how an affine resampling works. - -A Nifti image contains, along with its 3D or 4D data content, a 4x4 matrix -encoding and affine transformation that maps the data array into millimeter -space. If (i, j, k) encodes an integer position (voxel) with the data array, -then adding 1 as a fourth entry, (i, j, k, 1), and multiplying by the affine -matrix yields (x, y, z, 1), a 4-vector containing the millimeter position of -the voxel. - -The resampling procedure in `resample_img` can attribute a new affine matrix -and a new shape to your Nifti image while keeping its representation in -millimeter space exactly the same (up to sampling error and possible -clipping). - -This example shows a 2D image in voxel space, and the position of the data in -millimeter space, as encoded by the affine matrix. The image is the resampled -in 3 ways and displayed in in millimeter space. - -1) 4x4 affine matrix and target shape given -2) 3x3 transformation matrix (only new voxel axes, no offset) - given and no shape given -3) 4x4 affine matrix given and no shape given - -While 1) needs no further explanation (it returns an image exactly as -specified, with a new view on the data in millimeter space), 2) and 3) are -missing some specification, which is subsequently inferred by `resample_img`: -If the affine offset is missing (3x3 transformation, case 2), then the new -image will be the closest bounding box possible around the data along the -new axes. If the affine offset is given, but no shape provided, the -resulting image will be the closest bounding box around the union of the -data points and the affine offset. - -Note that specifying a shape without specifying a 3x3 transformation matrix -causes an error message, because `resample_img` will not know where to start -the bounding box (there is no intelligent way of inferring this given the -bounding box shape). -""" - - -############################################################################# -# First make a simple synthetic image - -# Create the data with numpy -import numpy as np -grid = np.mgrid[0:192, 0:128] -circle = np.sum( - (grid - np.array([32, 32])[:, np.newaxis, np.newaxis]) ** 2, - axis=0) < 256 -diamond = np.sum( - np.abs(grid - np.array([128, 80])[:, np.newaxis, np.newaxis]), - axis=0) < 16 -rectangle = np.max(np.abs( - grid - np.array([64, 96])[:, np.newaxis, np.newaxis]), axis=0) < 16 - -image = np.zeros_like(circle) -image[16:160, 16:120] = 1. -image = image + 2 * circle + 3 * rectangle + 4 * diamond + 1 - -vmax = image.max() - -source_affine = np.eye(4) -# Use canonical vectors for affine -# Give the affine an offset -source_affine[:2, 3] = np.array([96, 64]) - -# Rotate it slightly -angle = np.pi / 180 * 15 -rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], - [np.sin(angle), np.cos(angle)]]) -source_affine[:2, :2] = rotation_matrix * 2.0 # 2.0mm voxel size - -# We need to turn this data into a nibabel image -import nibabel -img = nibabel.Nifti1Image(image[:, :, np.newaxis], affine=source_affine) - -############################################################################# -# Now resample the image -from nilearn.image import resample_img -img_in_mm_space = resample_img(img, target_affine=np.eye(4), - target_shape=(512, 512, 1)) - -target_affine_3x3 = np.eye(3) * 2 -target_affine_4x4 = np.eye(4) * 2 -target_affine_4x4[3, 3] = 1. -img_3d_affine = resample_img(img, target_affine=target_affine_3x3) -img_4d_affine = resample_img(img, target_affine=target_affine_4x4) -target_affine_mm_space_offset_changed = np.eye(4) -target_affine_mm_space_offset_changed[:3, 3] = \ - img_3d_affine.get_affine()[:3, 3] - -img_3d_affine_in_mm_space = resample_img( - img_3d_affine, - target_affine=target_affine_mm_space_offset_changed, - target_shape=(np.array(img_3d_affine.shape) * 2).astype(int)) - -img_4d_affine_in_mm_space = resample_img( - img_4d_affine, - target_affine=np.eye(4), - target_shape=(np.array(img_4d_affine.shape) * 2).astype(int)) - -############################################################################# -# Finally, visualize -import matplotlib.pyplot as plt -plt.figure() -plt.imshow(image, interpolation="nearest", vmin=0, vmax=vmax) -plt.title("The original data in voxel space") - -plt.figure() -plt.imshow(img_in_mm_space.get_data()[:, :, 0], vmin=0, vmax=vmax) -plt.title("The original data in mm space") - -plt.figure() -plt.imshow(img_3d_affine_in_mm_space.get_data()[:, :, 0], - vmin=0, vmax=vmax) -plt.title("Transformed using a 3x3 affine -\n leads to " - "re-estimation of bounding box") - -plt.figure() -plt.imshow(img_4d_affine_in_mm_space.get_data()[:, :, 0], - vmin=0, vmax=vmax) -plt.title("Transformed using a 4x4 affine -\n Uses affine anchor " - "and estimates bounding box size") - -plt.show() diff --git a/examples/manipulating_images/plot_extract_rois_smith_atlas.py b/examples/manipulating_images/plot_extract_rois_smith_atlas.py deleted file mode 100644 index d8914a4bf8..0000000000 --- a/examples/manipulating_images/plot_extract_rois_smith_atlas.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Regions Extraction of Default Mode Networks using Smith Atlas -============================================================= - -This simple example shows how to extract regions from Smith atlas -resting state networks. - -In particular, we show how Default Mode Network regions are extracted -using :class:`nilearn.regions.RegionExtractor` from regions module -""" - -################################################################################ -# Fetching the smith ICA 10 RSN by importing datasets utilities -from nilearn import datasets - -smith_atlas = datasets.fetch_atlas_smith_2009() -atlas_networks = smith_atlas.rsn10 - -################################################################################ -# Import region extractor to extract atlas networks -from nilearn.regions import RegionExtractor - -# min_region_size in voxel volume mm^3 -extraction = RegionExtractor(atlas_networks, min_region_size=800, - threshold=98, thresholding_strategy='percentile') - -# Just call fit() to execute region extraction procedure -extraction.fit() -regions_img = extraction.regions_img_ - -################################################################################ -# Visualization -# Show region extraction results by importing image & plotting utilities -from nilearn import plotting -from nilearn.image import index_img -from nilearn.plotting import find_xyz_cut_coords - -# Showing region extraction results using 4D maps visualization tool -plotting.plot_prob_atlas(regions_img, display_mode='z', cut_coords=1, - view_type='contours', title="Regions extracted.") - -# To reduce the complexity, we choose to display all the regions -# extracted from network 3 -import numpy as np - -DMN_network = index_img(atlas_networks, 3) -plotting.plot_roi(DMN_network, display_mode='z', cut_coords=1, - title='Network 3') - -regions_indices_network3 = np.where(np.array(extraction.index_) == 3) -for index in regions_indices_network3[0]: - cur_img = index_img(extraction.regions_img_, index) - coords = find_xyz_cut_coords(cur_img) - plotting.plot_roi(cur_img, display_mode='z', cut_coords=coords[2:3], - title="Blob of network3") - -plotting.show() diff --git a/examples/manipulating_images/plot_extract_rois_statistical_maps.py b/examples/manipulating_images/plot_extract_rois_statistical_maps.py deleted file mode 100644 index 3b57f30469..0000000000 --- a/examples/manipulating_images/plot_extract_rois_statistical_maps.py +++ /dev/null @@ -1,74 +0,0 @@ -""" -Region Extraction using a t-statistical map (3D) -================================================ - -This example shows how to extract regions or separate the regions -from a statistical map. - -We use localizer t-statistic maps from :func:`nilearn.datasets.fetch_localizer_contrasts` -as an input image. - -The idea is to threshold an image to get foreground objects using a -function :func:`nilearn.image.threshold_img` and extract objects using a function -:func:`nilearn.regions.connected_regions`. -""" - -################################################################################ -# Fetching t-statistic image of localizer constrasts by loading from datasets -# utilities -from nilearn import datasets - -n_subjects = 3 -localizer_path = datasets.fetch_localizer_contrasts( - ['calculation (auditory cue)'], n_subjects=n_subjects, get_tmaps=True) -tmap_filename = localizer_path.tmaps[0] - -################################################################################ -# Threshold the t-statistic image by importing threshold function -from nilearn.image import threshold_img - -# Two types of strategies can be used from this threshold function -# Type 1: strategy used will be based on scoreatpercentile -threshold_percentile_img = threshold_img(tmap_filename, threshold='97%') - - -# Type 2: threshold strategy used will be based on image intensity -# Here, threshold value should be within the limits i.e. less than max value. -threshold_value_img = threshold_img(tmap_filename, threshold=4.) - -################################################################################ -# Visualization -# Showing thresholding results by importing plotting modules and its utilities -from nilearn import plotting - -# Showing percentile threshold image -plotting.plot_stat_map(threshold_percentile_img, display_mode='z', cut_coords=5, - title='Threshold image with string percentile', colorbar=False) - -# Showing intensity threshold image -plotting.plot_stat_map(threshold_value_img, display_mode='z', cut_coords=5, - title='Threshold image with intensity value', colorbar=False) - -################################################################################ -# Extracting the regions by importing connected regions function -from nilearn.regions import connected_regions - -regions_percentile_img, index = connected_regions(threshold_percentile_img, - min_region_size=1500) - -regions_value_img, index = connected_regions(threshold_value_img, - min_region_size=1500) - -################################################################################ -# Visualizing region extraction results -title = ("ROIs using percentile thresholding. " - "\n Each ROI in same color is an extracted region") -plotting.plot_prob_atlas(regions_percentile_img, anat_img=tmap_filename, - view_type='contours', display_mode='z', - cut_coords=5, title=title) -title = ("ROIs using image intensity thresholding. " - "\n Each ROI in same color is an extracted region") -plotting.plot_prob_atlas(regions_value_img, anat_img=tmap_filename, - view_type='contours', display_mode='z', - cut_coords=5, title=title) -plotting.show() diff --git a/examples/manipulating_images/plot_mask_computation.py b/examples/manipulating_images/plot_mask_computation.py deleted file mode 100644 index afd2f019e7..0000000000 --- a/examples/manipulating_images/plot_mask_computation.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -Understanding NiftiMasker and mask computation -================================================== - -In this example, the Nifti masker is used to automatically compute a mask. - -For data that has already been masked, the default strategy works out of -the box. - -However, for raw EPI, as in resting-state time series, we need to use the -'epi' strategy of the NiftiMasker. - -In addition, we show here how to tweak the different parameters of the -underlying mask extraction routine -:func:`nilearn.masking.compute_epi_mask`. - -""" - -import numpy as np - -import nibabel -from nilearn import datasets - - -############################################################################### -# From already masked data -from nilearn.input_data import NiftiMasker -import nilearn.image as image -from nilearn.plotting import plot_roi, show - -# Load Miyawaki dataset -miyawaki_dataset = datasets.fetch_miyawaki2008() - -# print basic information on the dataset -print('First functional nifti image (4D) is located at: %s' % - miyawaki_dataset.func[0]) # 4D data - -miyawaki_filename = miyawaki_dataset.func[0] -miyawaki_mean_img = image.mean_img(miyawaki_filename) - -# This time, we can use the NiftiMasker without changing the default mask -# strategy, as the data has already been masked, and thus lies on a -# homogeneous background - -masker = NiftiMasker() -masker.fit(miyawaki_filename) - -plot_roi(masker.mask_img_, miyawaki_mean_img, - title="Mask from already masked data") - - -############################################################################### -# From raw EPI data - -# Load NYU resting-state dataset -nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) -nyu_filename = nyu_dataset.func[0] -nyu_img = nibabel.load(nyu_filename) - -# Restrict nyu to 100 frames to speed up computation -from nilearn.image import index_img -nyu_img = index_img(nyu_img, slice(0, 100)) - -# To display the background -nyu_mean_img = image.mean_img(nyu_img) - - -# Simple mask extraction from EPI images -# We need to specify an 'epi' mask_strategy, as this is raw EPI data -masker = NiftiMasker(mask_strategy='epi') -masker.fit(nyu_img) -plot_roi(masker.mask_img_, nyu_mean_img, title='EPI automatic mask') - -# Generate mask with strong opening -masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10)) -masker.fit(nyu_img) -plot_roi(masker.mask_img_, nyu_mean_img, title='EPI Mask with strong opening') - -# Generate mask with a high lower cutoff -masker = NiftiMasker(mask_strategy='epi', - mask_args=dict(upper_cutoff=.9, lower_cutoff=.8, - opening=False)) -masker.fit(nyu_img) -plot_roi(masker.mask_img_, nyu_mean_img, - title='EPI Mask: high lower_cutoff') - -############################################################################### -# Extract time series - -# trended vs detrended -trended = NiftiMasker(mask_strategy='epi') -detrended = NiftiMasker(mask_strategy='epi', detrend=True) -trended_data = trended.fit_transform(nyu_img) -detrended_data = detrended.fit_transform(nyu_img) - -print("Trended: mean %.2f, std %.2f" % - (np.mean(trended_data), np.std(trended_data))) -print("Detrended: mean %.2f, std %.2f" % - (np.mean(detrended_data), np.std(detrended_data))) - -show() diff --git a/examples/manipulating_images/plot_nifti_simple.py b/examples/manipulating_images/plot_nifti_simple.py deleted file mode 100644 index b5ae2749c5..0000000000 --- a/examples/manipulating_images/plot_nifti_simple.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -Simple example of NiftiMasker use -================================== - -Here is a simple example of automatic mask computation using the nifti masker. -The mask is computed and visualized. -""" - -########################################################################### -# Retrieve the NYU test-retest dataset - -from nilearn import datasets -nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1) -func_filename = nyu_dataset.func[0] - -# print basic information on the dataset -print('First anatomical nifti image (3D) is at: %s' % nyu_dataset.anat_anon[0]) -print('First functional nifti image (4D) is at: %s' % func_filename) - -########################################################################### -# Compute the mask -from nilearn.input_data import NiftiMasker - -# As this is raw resting-state EPI, the background is noisy and we cannot -# rely on the 'background' masking strategy. We need to use the 'epi' one -nifti_masker = NiftiMasker(standardize=False, mask_strategy='epi', - memory="nilearn_cache", memory_level=2) -nifti_masker.fit(func_filename) -mask_img = nifti_masker.mask_img_ - -########################################################################### -# Visualize the mask -from nilearn.plotting import plot_roi -from nilearn.image.image import mean_img - -# calculate mean image for the background -mean_func_img = mean_img(func_filename) - -plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask") - - -########################################################################### -# Preprocess data with the NiftiMasker -nifti_masker.fit(func_filename) -fmri_masked = nifti_masker.transform(func_filename) -# fmri_masked is now a 2D matrix, (n_voxels x n_time_points) - -########################################################################### -# Run an algorithm -from sklearn.decomposition import FastICA -n_components = 20 -ica = FastICA(n_components=n_components, random_state=42) -components_masked = ica.fit_transform(fmri_masked.T).T - -########################################################################### -# Reverse masking, and display the corresponding map -components = nifti_masker.inverse_transform(components_masked) - -# Visualize results -from nilearn.plotting import plot_stat_map, show -from nilearn.image import index_img - -plot_stat_map(index_img(components, 0), mean_func_img, - display_mode='y', cut_coords=4, title="Component 0") - -show() diff --git a/examples/manipulating_images/plot_roi_extraction.py b/examples/manipulating_images/plot_roi_extraction.py deleted file mode 100644 index d0068bbff8..0000000000 --- a/examples/manipulating_images/plot_roi_extraction.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -Computing an ROI mask -======================= - -Example showing how a T-test can be performed to compute an ROI -mask, and how simple operations can improve the quality of the mask -obtained. -""" - -############################################################################## -# Coordinates of the slice we will be displaying - -coronal = -24 -sagittal = -33 -axial = -17 -cut_coords = (coronal, sagittal, axial) - -############################################################################## -# Load the data - -# Fetch the data files from Internet -from nilearn import datasets -from nilearn.image import new_img_like - - -haxby_dataset = datasets.fetch_haxby(n_subjects=1) - -# print basic information on the dataset -print('First subject anatomical nifti image (3D) located is at: %s' % - haxby_dataset.anat[0]) -print('First subject functional nifti image (4D) is located at: %s' % - haxby_dataset.func[0]) - -# Second, load the labels -import numpy as np - -session_target = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ") -haxby_labels = session_target['labels'] - -import matplotlib.pyplot as plt -from nilearn.input_data import NiftiLabelsMasker - -############################################################################## -# Build a statistical test to find voxels of interest - -# Smooth the data -from nilearn import image -fmri_filename = haxby_dataset.func[0] -fmri_img = image.smooth_img(fmri_filename, fwhm=6) - -# Plot the mean image -from nilearn.plotting import plot_epi -mean_img = image.mean_img(fmri_img) -plot_epi(mean_img, title='Smoothed mean EPI', cut_coords=cut_coords) - -############################################################################## -# Run a T-test for face and houses -from scipy import stats -fmri_data = fmri_img.get_data() -_, p_values = stats.ttest_ind(fmri_data[..., haxby_labels == b'face'], - fmri_data[..., haxby_labels == b'house'], - axis=-1) - -# Use a log scale for p-values -log_p_values = -np.log10(p_values) -log_p_values[np.isnan(log_p_values)] = 0. -log_p_values[log_p_values > 10.] = 10. -from nilearn.plotting import plot_stat_map -plot_stat_map(new_img_like(fmri_img, log_p_values), - mean_img, title="p-values", cut_coords=cut_coords) - -############################################################################## -# Build a mask from this statistical map - -# Thresholding -log_p_values[log_p_values < 5] = 0 -plot_stat_map(new_img_like(fmri_img, log_p_values), - mean_img, title='Thresholded p-values', annotate=False, - colorbar=False, cut_coords=cut_coords) - -############################################################################## -# Binarization and intersection with VT mask -# (intersection corresponds to an "AND conjunction") -bin_p_values = (log_p_values != 0) -mask_vt_filename = haxby_dataset.mask_vt[0] -import nibabel -vt = nibabel.load(mask_vt_filename).get_data().astype(bool) -bin_p_values_and_vt = np.logical_and(bin_p_values, vt) - -from nilearn.plotting import plot_roi, show -plot_roi(new_img_like(fmri_img, bin_p_values_and_vt.astype(np.int)), - mean_img, title='Intersection with ventral temporal mask', - cut_coords=cut_coords) - -############################################################################## -# Dilation -from scipy import ndimage -dil_bin_p_values_and_vt = ndimage.binary_dilation(bin_p_values_and_vt) -plot_roi(new_img_like(fmri_img, dil_bin_p_values_and_vt.astype(np.int)), - mean_img, title='Dilated mask', cut_coords=cut_coords, - annotate=False) - - -############################################################################## -# Identification of connected components -labels, n_labels = ndimage.label(dil_bin_p_values_and_vt) -first_roi_data = (labels == 1).astype(np.int) -second_roi_data = (labels == 2).astype(np.int) -plot_roi(new_img_like(fmri_img, first_roi_data), - mean_img, title='Connected components: first ROI') - -plot_roi(new_img_like(fmri_img, second_roi_data), - mean_img, title='Connected components: second ROI') - - -############################################################################## -# Use the new ROIs to extract data maps in both ROIs -masker = NiftiLabelsMasker( - labels_img=new_img_like(fmri_img, labels), - resampling_target=None, - standardize=False, - detrend=False) -masker.fit() -condition_names = list(set(haxby_labels)) -n_cond_img = fmri_data[..., haxby_labels == b'house'].shape[-1] -n_conds = len(condition_names) - -X1, X2 = np.zeros((n_cond_img, n_conds)), np.zeros((n_cond_img, n_conds)) -for i, cond in enumerate(condition_names): - cond_maps = new_img_like( - fmri_img, fmri_data[..., haxby_labels == cond][..., :n_cond_img]) - mask_data = masker.transform(cond_maps) - X1[:, i], X2[:, i] = mask_data[:, 0], mask_data[:, 1] -condition_names[condition_names.index(b'scrambledpix')] = b'scrambled' - - -############################################################################## -# Plot the average in the different condition names -plt.figure(figsize=(15, 7)) -for i in np.arange(2): - plt.subplot(1, 2, i + 1) - plt.boxplot(X1 if i == 0 else X2) - plt.xticks(np.arange(len(condition_names)) + 1, condition_names, - rotation=25) - plt.title('Boxplots of data in ROI%i per condition' % (i + 1)) - -show() - -# save the ROI 'atlas' to a single output Nifti -nibabel.save(new_img_like(fmri_img, labels), - 'mask_atlas.nii') diff --git a/examples/manipulating_images/plot_smooth_mean_image.py b/examples/manipulating_images/plot_smooth_mean_image.py deleted file mode 100644 index 1ff7dfc5e8..0000000000 --- a/examples/manipulating_images/plot_smooth_mean_image.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Smoothing an image -=================== - -Here we smooth a mean EPI image and plot the result - -As we vary the smoothing FWHM, note how we decrease the amount of noise, -but also loose spatial details. In general, the best amount of smoothing -for a given analysis depends on the spatial extent of the effects that -are expected. - -""" - -from nilearn import datasets, plotting, image - -data = datasets.fetch_adhd(n_subjects=1) - -# Print basic information on the dataset -print('First subject functional nifti image (4D) are located at: %s' % - data.func[0]) - -first_epi_file = data.func[0] - -# First the compute the mean image, from the 4D series of image -mean_func = image.mean_img(first_epi_file) - -# Then we smooth, with a varying amount of smoothing, from none to 20mm -# by increments of 5mm -for smoothing in range(0, 25, 5): - smoothed_img = image.smooth_img(mean_func, smoothing) - plotting.plot_epi(smoothed_img, - title="Smoothing %imm" % smoothing) - - -plotting.show() diff --git a/examples/plotting/README.txt b/examples/plotting/README.txt deleted file mode 100644 index eb0027b784..0000000000 --- a/examples/plotting/README.txt +++ /dev/null @@ -1,4 +0,0 @@ -Visualization of brain images ------------------------------ - -See :ref:`plotting` for more details. diff --git a/examples/plotting/plot_atlas.py b/examples/plotting/plot_atlas.py deleted file mode 100644 index 0f3c944efc..0000000000 --- a/examples/plotting/plot_atlas.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Basic Atlas plotting -======================= - -Plot the regions of a reference atlas (here the Harvard-Oxford atlas). -""" - -from nilearn import datasets -from nilearn import plotting - -dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm') -atlas_filename = dataset.maps - -print('Atlas ROIs are located at: %s' % atlas_filename) - -plotting.plot_roi(atlas_filename, title="Harvard Oxford atlas") -plotting.show() diff --git a/examples/plotting/plot_demo_glass_brain.py b/examples/plotting/plot_demo_glass_brain.py deleted file mode 100644 index 1784935073..0000000000 --- a/examples/plotting/plot_demo_glass_brain.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Glass brain plotting in nilearn -=============================== - -See :ref:`plotting` for more plotting functionalities. -""" - - -############################################################################### -# Retrieve the data -from nilearn import datasets - -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_tmaps=True) -localizer_tmap_filename = localizer_dataset.tmaps[1] - -############################################################################### -# demo glass brain plotting -from nilearn import plotting - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=3) - -plotting.plot_glass_brain( - localizer_tmap_filename, title='plot_glass_brain', - black_bg=True, display_mode='xz', threshold=3) - -plotting.show() diff --git a/examples/plotting/plot_demo_glass_brain_extensive.py b/examples/plotting/plot_demo_glass_brain_extensive.py deleted file mode 100644 index 51881ccb80..0000000000 --- a/examples/plotting/plot_demo_glass_brain_extensive.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -Glass brain plotting in nilearn (all options) -============================================= - -This example goes through different options of the :func:`nilearn.plotting.plot_glass_brain` function -(including plotting negative values). -See :ref:`plotting` for more plotting functionalities. -""" - - -############################################################################### -# Retrieve the data -from nilearn import datasets - -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_tmaps=True) -localizer_tmap_filename = localizer_dataset.tmaps[1] - -############################################################################### -# demo glass brain plotting -from nilearn import plotting - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=3) - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=3, colorbar=True) - -plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain', - black_bg=True, display_mode='xz', threshold=3) - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=0, colorbar=True, - plot_abs=False) - -plotting.plot_glass_brain(localizer_tmap_filename, threshold=3, - colorbar=True, plot_abs=False) - -plotting.show() diff --git a/examples/plotting/plot_demo_more_plotting.py b/examples/plotting/plot_demo_more_plotting.py deleted file mode 100644 index 84f005d2d5..0000000000 --- a/examples/plotting/plot_demo_more_plotting.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -More nilearn plotting -===================== - -See :ref:`plotting` for more details. -""" - -# The imports from nilearn plotting and image processing -from nilearn import plotting, image - -############################################################################### -# Retrieve the data: haxby dataset to have EPI images and masks, and -# localizer dataset to have contrast maps - -from nilearn import datasets -haxby_dataset = datasets.fetch_haxby(n_subjects=1) -haxby_anat_filename = haxby_dataset.anat[0] -haxby_mask_filename = haxby_dataset.mask_vt[0] -haxby_func_filename = haxby_dataset.func[0] - -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_anats=True) -localizer_anat_filename = localizer_dataset.anats[1] -localizer_cmap_filename = localizer_dataset.cmaps[1] - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='ortho', - cut_coords=(36, -27, 60), - title="display_mode='ortho', cut_coords=(36, -27, 60)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='z', cut_coords=5, - title="display_mode='z', cut_coords=5") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='x', - cut_coords=(-36, 36), - title="display_mode='x', cut_coords=(-36, 36)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='y', cut_coords=1, - title="display_mode='x', cut_coords=(-36, 36)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='z', - cut_coords=1, colorbar=False, - title="display_mode='z', cut_coords=1, colorbar=False") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='xz', - cut_coords=(36, 60), - title="display_mode='xz', cut_coords=(36, 60)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='yx', - cut_coords=(-27, 36), - title="display_mode='yx', cut_coords=(-27, 36)") - -######################################## -plotting.plot_stat_map(localizer_cmap_filename, display_mode='yz', - cut_coords=(-27, 60), - title="display_mode='yz', cut_coords=(-27, 60)") - -############################################################################### -# demo display objects with add_* methods -mean_haxby_img = image.mean_img(haxby_func_filename) - -# Plot T1 outline on top of the mean EPI (useful for checking coregistration) -display = plotting.plot_anat(mean_haxby_img, title="add_edges") -display.add_edges(haxby_anat_filename) - -######################################## -# Plotting outline of the mask on top of the EPI -display = plotting.plot_anat(mean_haxby_img, title="add_contours", - cut_coords=(28, -34, -22)) -display.add_contours(haxby_mask_filename, levels=[0.5], colors='r') - -############################################################################### -# demo saving plots to file - -plotting.plot_stat_map(localizer_cmap_filename, - title='Using plot_stat_map output_file', - output_file='plot_stat_map.png') - -######################################## -display = plotting.plot_stat_map(localizer_cmap_filename, - title='Using display savefig') -display.savefig('plot_stat_map_from_display.png') -# In non-interactive settings make sure you close your displays -display.close() - -plotting.show() diff --git a/examples/plotting/plot_demo_plotting.py b/examples/plotting/plot_demo_plotting.py deleted file mode 100644 index c92d168c3d..0000000000 --- a/examples/plotting/plot_demo_plotting.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -Plotting in nilearn -========================== - -Nilearn comes with a set of plotting function for Nifti-like images, -see :ref:`plotting` for more details. -""" - -# Import plotting and image processing tools -from nilearn import plotting, image - -############################################################################### -# Retrieve the data: haxby dataset to have EPI images and masks, and -# localizer dataset to have contrast maps - -from nilearn import datasets -haxby_dataset = datasets.fetch_haxby(n_subjects=1) - -# print basic information on the dataset -print('First subject anatomical nifti image (3D) is at: %s' % - haxby_dataset.anat[0]) -print('First subject functional nifti image (4D) is at: %s' % - haxby_dataset.func[0]) # 4D data - -haxby_anat_filename = haxby_dataset.anat[0] -haxby_mask_filename = haxby_dataset.mask_vt[0] -haxby_func_filename = haxby_dataset.func[0] - -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_anats=True, - get_tmaps=True) -localizer_anat_filename = localizer_dataset.anats[1] -localizer_tmap_filename = localizer_dataset.tmaps[1] - -############################################################################### -# Plotting statistical maps -plotting.plot_stat_map(localizer_tmap_filename, bg_img=localizer_anat_filename, - threshold=3, title="plot_stat_map", - cut_coords=(36, -27, 66)) - -############################################################################### -# Plotting glass brain -plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain', - threshold=3) - -############################################################################### -# Plotting anatomical maps -plotting.plot_anat(haxby_anat_filename, title="plot_anat") - -############################################################################### -# Plotting ROIs (here the mask) -plotting.plot_roi(haxby_mask_filename, bg_img=haxby_anat_filename, - title="plot_roi") - -############################################################################### -# Plotting EPI haxby -mean_haxby_img = image.mean_img(haxby_func_filename) -plotting.plot_epi(mean_haxby_img, title="plot_epi") - -plotting.show() diff --git a/examples/plotting/plot_dim_plotting.py b/examples/plotting/plot_dim_plotting.py deleted file mode 100644 index 13439bcf58..0000000000 --- a/examples/plotting/plot_dim_plotting.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Controling the contrast of the background when plotting -========================================================= - -The `dim` argument controls the contrast of the background. - -*dim* modifies the contrast of this image: dim=0 leaves the image -unchanged, negative values of *dim* enhance it, and positive values -decrease it (dim the background). - -This *dim* argument may also be useful for the plot_roi function used to -display ROIs on top of a background image. -""" - -# Retrieve the data: the localizer dataset with contrast maps - -from nilearn import datasets - -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_anats=True, - get_tmaps=True) -localizer_anat_filename = localizer_dataset.anats[1] -localizer_tmap_filename = localizer_dataset.tmaps[1] - -# Plotting: vary the 'dim' of the background -from nilearn import plotting - -plotting.plot_stat_map(localizer_tmap_filename, - bg_img=localizer_anat_filename, - cut_coords=(36, -27, 66), - threshold=3, title="dim=-.5", - dim=-.5) - -plotting.plot_stat_map(localizer_tmap_filename, - bg_img=localizer_anat_filename, - cut_coords=(36, -27, 66), - threshold=3, title="dim=0", - dim=0) - -plotting.plot_stat_map(localizer_tmap_filename, - bg_img=localizer_anat_filename, - cut_coords=(36, -27, 66), - threshold=3, title="dim=.5", - dim=.5) - -plotting.plot_stat_map(localizer_tmap_filename, - bg_img=localizer_anat_filename, - cut_coords=(36, -27, 66), - threshold=3, title="dim=1", - dim=1) - -plotting.show() diff --git a/examples/plotting/plot_haxby_masks.py b/examples/plotting/plot_haxby_masks.py deleted file mode 100644 index 8930d595c0..0000000000 --- a/examples/plotting/plot_haxby_masks.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Plot Haxby masks -================= - -Small script to plot the masks of the Haxby dataset. -""" -import numpy as np -from scipy import linalg -import matplotlib.pyplot as plt - -from nilearn import datasets -haxby_dataset = datasets.fetch_haxby() - -# print basic information on the dataset -print('First subject anatomical nifti image (3D) is at: %s' % - haxby_dataset.anat[0]) -print('First subject functional nifti image (4D) is at: %s' % - haxby_dataset.func[0]) # 4D data - -# Build the mean image because we have no anatomic data -from nilearn import image -func_filename = haxby_dataset.func[0] -mean_img = image.mean_img(func_filename) - -z_slice = -24 -from nilearn.image.resampling import coord_transform -affine = mean_img.get_affine() -_, _, k_slice = coord_transform(0, 0, z_slice, - linalg.inv(affine)) -k_slice = np.round(k_slice) - -fig = plt.figure(figsize=(4, 5.4), facecolor='k') - -from nilearn.plotting import plot_anat, show -display = plot_anat(mean_img, display_mode='z', cut_coords=[z_slice], - figure=fig) -mask_vt_filename = haxby_dataset.mask_vt[0] -mask_house_filename = haxby_dataset.mask_house[0] -mask_face_filename = haxby_dataset.mask_face[0] -display.add_contours(mask_vt_filename, contours=1, antialiased=False, - linewidths=4., levels=[0], colors=['red']) -display.add_contours(mask_house_filename, contours=1, antialiased=False, - linewidths=4., levels=[0], colors=['blue']) -display.add_contours(mask_face_filename, contours=1, antialiased=False, - linewidths=4., levels=[0], colors=['limegreen']) - -# We generate a legend using the trick described on -# http://matplotlib.sourceforge.net/users/legend_guide.httpml#using-proxy-artist -from matplotlib.patches import Rectangle -p_v = Rectangle((0, 0), 1, 1, fc="red") -p_h = Rectangle((0, 0), 1, 1, fc="blue") -p_f = Rectangle((0, 0), 1, 1, fc="limegreen") -plt.legend([p_v, p_h, p_f], ["vt", "house", "face"]) - -show() diff --git a/examples/plotting/plot_overlay.py b/examples/plotting/plot_overlay.py deleted file mode 100644 index 2701d91391..0000000000 --- a/examples/plotting/plot_overlay.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Visualizing a probablistic atlas: the default mode in the MSDL atlas -===================================================================== - -Visualizing a probablistic atlas requires visualizing the different -maps that compose it. - -Here we represent the nodes constituting the default mode network in the -`MSDL atlas -`_. - -The tools that we need to leverage are: - - * :func:`nilearn.image.index_img` to retrieve the various maps composing - the atlas - - * Adding overlays on an existing brain display, to plot each of these - maps - -""" - -from nilearn import datasets, plotting, image - -atlas_data = datasets.fetch_atlas_msdl() -atlas_filename = atlas_data.maps - -# First plot the map for the PCC: index 4 in the atlas -display = plotting.plot_stat_map(image.index_img(atlas_filename, 4), - colorbar=False, - title="DMN nodes in MSDL atlas") - -# Now add as an overlay the maps for the ACC and the left and right -# parietal nodes -display.add_overlay(image.index_img(atlas_filename, 5), - cmap=plotting.cm.black_blue) -display.add_overlay(image.index_img(atlas_filename, 6), - cmap=plotting.cm.black_green) -display.add_overlay(image.index_img(atlas_filename, 3), - cmap=plotting.cm.black_pink) - -plotting.show() diff --git a/examples/plotting/plot_prob_atlas.py b/examples/plotting/plot_prob_atlas.py deleted file mode 100644 index e3c76c0a7c..0000000000 --- a/examples/plotting/plot_prob_atlas.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Visualizing 4D probabilistic atlas maps -======================================= - -This example shows how to visualize probabilistic atlases made of 4D images. -There are 3 different display types: - -1. "contours", which means maps or ROIs are shown as contours delineated by \ - colored lines. - -2. "filled_contours", maps are shown as contours same as above but with \ - fillings inside the contours. - -3. "continuous", maps are shown as just color overlays. - -The :func:`nilearn.plotting.plot_prob_atlas` function displays each map -with each different color which are picked randomly from the colormap -which is already defined. - -See :ref:`plotting` for more information to know how to tune the parameters. -""" -# Load 4D probabilistic atlases -from nilearn import datasets - -# Harvard Oxford Atlas -harvard_oxford = datasets.fetch_atlas_harvard_oxford('cort-prob-2mm') -harvard_oxford_sub = datasets.fetch_atlas_harvard_oxford('sub-prob-2mm') - -# Multi Subject Dictionary Learning Atlas -msdl = datasets.fetch_atlas_msdl() - -# Smith ICA Atlas and Brain Maps 2009 -smith = datasets.fetch_atlas_smith_2009() - -# ICBM tissue probability -icbm = datasets.fetch_icbm152_2009() - -# Visualization -from nilearn import plotting - -atlas_types = {'Harvard_Oxford': harvard_oxford.maps, - 'Harvard_Oxford sub': harvard_oxford_sub.maps, - 'MSDL': msdl.maps, 'Smith 2009 10 RSNs': smith.rsn10, - 'Smith2009 20 RSNs': smith.rsn20, - 'Smith2009 70 RSNs': smith.rsn70, - 'Smith2009 10 Brainmap': smith.bm10, - 'Smith2009 20 Brainmap': smith.bm20, - 'Smith2009 70 Brainmap': smith.bm70, - 'ICBM tissues': (icbm['wm'], icbm['gm'], icbm['csf'])} - -for name, atlas in sorted(atlas_types.items()): - plotting.plot_prob_atlas(atlas, title=name) - -plotting.show() diff --git a/examples/plotting/plot_visualization.py b/examples/plotting/plot_visualization.py deleted file mode 100644 index 2076202b0c..0000000000 --- a/examples/plotting/plot_visualization.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -NeuroImaging volumes visualization -================================== - -Simple example to show Nifti data visualization. -""" - -############################################################################## -# Fetch data -from nilearn import datasets - -haxby_dataset = datasets.fetch_haxby(n_subjects=1) - -# print basic information on the dataset -print('First anatomical nifti image (3D) located is at: %s' % - haxby_dataset.anat[0]) -print('First functional nifti image (4D) is located at: %s' % - haxby_dataset.func[0]) - -############################################################################## -# Visualization -from nilearn.image.image import mean_img - -# Compute the mean EPI: we do the mean along the axis 3, which is time -func_filename = haxby_dataset.func[0] -mean_haxby = mean_img(func_filename) - -from nilearn.plotting import plot_epi, show -plot_epi(mean_haxby) - -############################################################################## -# Extracting a brain mask - -# Simple computation of a mask from the fMRI data -from nilearn.masking import compute_epi_mask -mask_img = compute_epi_mask(func_filename) - -# Visualize it as an ROI -from nilearn.plotting import plot_roi -plot_roi(mask_img, mean_haxby) - -############################################################################## -# Applying the mask to extract the corresponding time series - -from nilearn.masking import apply_mask -masked_data = apply_mask(func_filename, mask_img) - -# masked_data shape is (timepoints, voxels). We can plot the first 150 -# timepoints from two voxels - -# And now plot a few of these -import matplotlib.pyplot as plt -plt.figure(figsize=(7, 5)) -plt.plot(masked_data[:2, :150].T) -plt.xlabel('Time [TRs]', fontsize=16) -plt.ylabel('Intensity', fontsize=16) -plt.xlim(0, 150) -plt.subplots_adjust(bottom=.12, top=.95, right=.95, left=.12) - -show() From e76dfbe2b35b8c952d9932dac8c292c6112b080d Mon Sep 17 00:00:00 2001 From: banilo Date: Tue, 12 Jan 2016 00:10:48 +0100 Subject: [PATCH 0044/1925] realize feedback --- nilearn/plotting/find_cuts.py | 13 +++++++------ nilearn/plotting/tests/test_find_cuts.py | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/nilearn/plotting/find_cuts.py b/nilearn/plotting/find_cuts.py index ed0f180434..be09217583 100644 --- a/nilearn/plotting/find_cuts.py +++ b/nilearn/plotting/find_cuts.py @@ -15,6 +15,8 @@ from ..image import new_img_like from .._utils.extmath import fast_abs_percentile from .._utils.numpy_conversions import as_ndarray +from .._utils import check_niimg_3d +from .._utils.niimg import _safe_get_data from ..image.resampling import get_mask_bounds, coord_transform from ..image.image import _smooth_array @@ -46,15 +48,14 @@ def find_xyz_cut_coords(img, mask=None, activation_threshold=None): z : float the z world coordinate. """ - data = img.get_data() + # if a pseudo-4D image or several images were passed (cf. #922), + # we reduce to a single 3D image to find the coordinates + img = check_niimg_3d(img) + data = _safe_get_data(img) + # To speed up computations, we work with partial views of the array, # and keep track of the offset offset = np.zeros(3) - - # if a pseudo-4D image or several images were passed (cf. #922), - # we reduce to a single 3D image to find the coordinates - if len(data.shape) > 3: - data = data[:, :, :, 0] # Deal with masked arrays: if hasattr(data, 'mask'): diff --git a/nilearn/plotting/tests/test_find_cuts.py b/nilearn/plotting/tests/test_find_cuts.py index 3f1293e226..a34a529b0a 100644 --- a/nilearn/plotting/tests/test_find_cuts.py +++ b/nilearn/plotting/tests/test_find_cuts.py @@ -41,7 +41,7 @@ def test_find_cut_coords(): np.testing.assert_array_equal( np.array([x, y, z]), 0.5 * np.array(data.shape).astype(np.float)) - + # regression test (cf. #922) # pseudo-4D images as input (i.e., X, Y, Z, 1) # previously raised "ValueError: too many values to unpack" From 2546c5eb9f96b3cbbde7b0fd77f16707e7429104 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Tue, 12 Jan 2016 11:14:39 +0100 Subject: [PATCH 0045/1925] Remove unused imports and unused variables --- nilearn/__init__.py | 2 +- nilearn/_utils/__init__.py | 5 ++++- nilearn/_utils/fixes/__init__.py | 2 ++ nilearn/_utils/niimg_conversions.py | 3 +-- nilearn/_utils/testing.py | 3 +-- nilearn/datasets/tests/test_atlas.py | 1 - nilearn/datasets/tests/test_func.py | 1 - nilearn/datasets/tests/test_struct.py | 1 - nilearn/decoding/__init__.py | 2 ++ nilearn/decoding/space_net.py | 2 +- nilearn/input_data/__init__.py | 3 +++ nilearn/mass_univariate/__init__.py | 2 ++ .../mass_univariate/tests/test_permuted_least_squares.py | 1 - nilearn/plotting/tests/test_displays.py | 2 -- nilearn/plotting/tests/test_find_cuts.py | 1 - nilearn/regions/tests/test_region_extractor.py | 6 +----- nilearn/tests/test_ndimage.py | 1 - nilearn/tests/test_niimg.py | 6 +++--- 18 files changed, 21 insertions(+), 23 deletions(-) diff --git a/nilearn/__init__.py b/nilearn/__init__.py index a861e8511d..fdf53c9e1d 100644 --- a/nilearn/__init__.py +++ b/nilearn/__init__.py @@ -39,7 +39,7 @@ 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', 'region', 'signal'] -from .version import _check_module_dependencies, __version__ +from .version import _check_module_dependencies _check_module_dependencies() diff --git a/nilearn/_utils/__init__.py b/nilearn/_utils/__init__.py index 5616b0f7cd..59aae3afc4 100644 --- a/nilearn/_utils/__init__.py +++ b/nilearn/_utils/__init__.py @@ -1,4 +1,3 @@ - from .niimg_conversions import (check_niimg, check_niimg_3d, concat_niimgs, check_niimg_4d) @@ -9,3 +8,7 @@ from .cache_mixin import CacheMixin from .logger import _compose_err_msg + +__all__ = ['check_niimg', 'check_niimg_3d', 'concat_niimgs', 'check_niimg_4d', + '_repr_niimgs', 'copy_img', 'load_niimg', + 'as_ndarray', 'CacheMixin', '_compose_err_msg'] diff --git a/nilearn/_utils/fixes/__init__.py b/nilearn/_utils/fixes/__init__.py index e76bdd404b..c273410677 100644 --- a/nilearn/_utils/fixes/__init__.py +++ b/nilearn/_utils/fixes/__init__.py @@ -20,3 +20,5 @@ from sklearn.metrics import roc_auc_score except ImportError: from sklearn.metrics import auc as roc_auc_score + +__all__ = ['f_regression', 'atleast2d_or_csr', 'roc_auc_score'] diff --git a/nilearn/_utils/niimg_conversions.py b/nilearn/_utils/niimg_conversions.py index 24c0e3848d..1b7a9f2f09 100644 --- a/nilearn/_utils/niimg_conversions.py +++ b/nilearn/_utils/niimg_conversions.py @@ -18,6 +18,7 @@ from .exceptions import DimensionError + def _check_fov(img, affine, shape): """ Return True if img's field of view correspond to given shape and affine, False elsewhere. @@ -48,8 +49,6 @@ def _check_same_fov(*args, **kwargs): raise_error: boolean, optional If True, an error will be raised in case of error. """ - from ..image import new_img_like # avoid circular imports - raise_error = kwargs.pop('raise_error', False) for i, arg in enumerate(args): kwargs['img_#%i' % i] = arg diff --git a/nilearn/_utils/testing.py b/nilearn/_utils/testing.py index 54c860797d..310f273f38 100644 --- a/nilearn/_utils/testing.py +++ b/nilearn/_utils/testing.py @@ -623,8 +623,7 @@ def is_nose_running(): return False # Now check that we have the loader in the call stask stack = inspect.stack() - from nose import loader - loader_file_name = loader.__file__ + loader_file_name = nose.loader.__file__ if loader_file_name.endswith('.pyc'): loader_file_name = loader_file_name[:-1] for _, file_name, _, _, _, _ in stack: diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index 30bc90a506..b379a8117d 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -7,7 +7,6 @@ import os import shutil import numpy as np -from tempfile import mkdtemp import nibabel diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 3259993f50..19127a7490 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -6,7 +6,6 @@ import os import numpy as np -from tempfile import mkdtemp import nibabel from sklearn.utils import check_random_state diff --git a/nilearn/datasets/tests/test_struct.py b/nilearn/datasets/tests/test_struct.py index 66833d092b..15e9e46048 100644 --- a/nilearn/datasets/tests/test_struct.py +++ b/nilearn/datasets/tests/test_struct.py @@ -7,7 +7,6 @@ import os import shutil import numpy as np -from tempfile import mkdtemp from nose import with_setup from nose.tools import assert_true, assert_equal, assert_not_equal diff --git a/nilearn/decoding/__init__.py b/nilearn/decoding/__init__.py index 50826353a2..caac41de05 100644 --- a/nilearn/decoding/__init__.py +++ b/nilearn/decoding/__init__.py @@ -4,3 +4,5 @@ from .searchlight import SearchLight from .space_net import SpaceNetClassifier, SpaceNetRegressor + +__all__ = ['SearchLight', 'SpaceNetClassifier', 'SpaceNetRegressor'] diff --git a/nilearn/decoding/space_net.py b/nilearn/decoding/space_net.py index 6c808a477d..fdaec85794 100644 --- a/nilearn/decoding/space_net.py +++ b/nilearn/decoding/space_net.py @@ -833,7 +833,7 @@ def fit(self, X, y): y = y[:, 0] # scores & mean weights map over all folds - self.cv_scores_ = [[] for _ in range(n_problems)] + self.cv_scores_ = [[] for i in range(n_problems)] w = np.zeros((n_problems, X.shape[1] + 1)) self.all_coef_ = np.ndarray((n_problems, n_folds, X.shape[1])) diff --git a/nilearn/input_data/__init__.py b/nilearn/input_data/__init__.py index ab5c9cfed8..4f635f2644 100644 --- a/nilearn/input_data/__init__.py +++ b/nilearn/input_data/__init__.py @@ -7,3 +7,6 @@ from .nifti_labels_masker import NiftiLabelsMasker from .nifti_maps_masker import NiftiMapsMasker from .nifti_spheres_masker import NiftiSpheresMasker + +__all__ = ['NiftiMasker', 'MultiNiftiMasker', 'NiftiLabelsMasker', + 'NiftiMapsMasker', 'NiftiSpheresMasker'] diff --git a/nilearn/mass_univariate/__init__.py b/nilearn/mass_univariate/__init__.py index 903cdaa668..201dc23f54 100644 --- a/nilearn/mass_univariate/__init__.py +++ b/nilearn/mass_univariate/__init__.py @@ -3,3 +3,5 @@ """ from .permuted_least_squares import permuted_ols + +__all__ = ['permuted_ols'] diff --git a/nilearn/mass_univariate/tests/test_permuted_least_squares.py b/nilearn/mass_univariate/tests/test_permuted_least_squares.py index b20269c13b..e873dc0a17 100644 --- a/nilearn/mass_univariate/tests/test_permuted_least_squares.py +++ b/nilearn/mass_univariate/tests/test_permuted_least_squares.py @@ -3,7 +3,6 @@ """ # Author: Virgile Fritsch, , Feb. 2014 -import nose import numpy as np from scipy import stats from sklearn.utils import check_random_state diff --git a/nilearn/plotting/tests/test_displays.py b/nilearn/plotting/tests/test_displays.py index e19fbf0c77..2fa209490d 100644 --- a/nilearn/plotting/tests/test_displays.py +++ b/nilearn/plotting/tests/test_displays.py @@ -2,8 +2,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: import tempfile -import numpy as np - import matplotlib.pyplot as plt from nilearn.plotting.displays import OrthoSlicer, XSlicer, OrthoProjector diff --git a/nilearn/plotting/tests/test_find_cuts.py b/nilearn/plotting/tests/test_find_cuts.py index a587fef832..c80677f89e 100644 --- a/nilearn/plotting/tests/test_find_cuts.py +++ b/nilearn/plotting/tests/test_find_cuts.py @@ -4,7 +4,6 @@ from nilearn.plotting.find_cuts import (find_xyz_cut_coords, find_cut_slices, _transform_cut_coords) from nilearn._utils.testing import assert_raises_regex, assert_warns -from nilearn.plotting.find_cuts import find_xyz_cut_coords def test_find_cut_coords(): diff --git a/nilearn/regions/tests/test_region_extractor.py b/nilearn/regions/tests/test_region_extractor.py index 268c95f0e6..3add53e5e3 100644 --- a/nilearn/regions/tests/test_region_extractor.py +++ b/nilearn/regions/tests/test_region_extractor.py @@ -3,11 +3,10 @@ import numpy as np import nibabel -from nose.tools import assert_raises, assert_equal, assert_true, assert_not_equal +from nose.tools import assert_equal, assert_true, assert_not_equal from nilearn.regions import connected_regions, RegionExtractor from nilearn.regions.region_extractor import _threshold_maps_ratio -from nilearn.image import iter_img from nilearn._utils.testing import assert_raises_regex, generate_maps @@ -74,7 +73,6 @@ def test_connected_regions(): map_img = np.zeros((30, 30, 30)) + 0.1 * np.random.randn(30, 30, 30) map_img = nibabel.Nifti1Image(map_img, affine=np.eye(4)) - valid_extract_types = ['connected_components', 'local_regions'] # smoke test for function connected_regions and also to check # if the regions extracted should be equal or more than already present. # 4D image case @@ -135,8 +133,6 @@ def test_region_extractor_fit_and_transform(): assert_true(extractor.regions_img_.shape[-1] >= 9) n_regions_extracted = extractor.regions_img_.shape[-1] - imgs = [] - signals = [] shape = (91, 109, 91, 7) expected_signal_shape = (7, n_regions_extracted) for id_ in range(n_subjects): diff --git a/nilearn/tests/test_ndimage.py b/nilearn/tests/test_ndimage.py index 6ad18d8c49..291b9822b5 100644 --- a/nilearn/tests/test_ndimage.py +++ b/nilearn/tests/test_ndimage.py @@ -3,7 +3,6 @@ This test file is in nilearn/tests because nosetests ignores modules whose name starts with an underscore """ -from scipy import ndimage from nose.tools import assert_raises import numpy as np diff --git a/nilearn/tests/test_niimg.py b/nilearn/tests/test_niimg.py index ea081b29db..12f6af0197 100644 --- a/nilearn/tests/test_niimg.py +++ b/nilearn/tests/test_niimg.py @@ -11,7 +11,6 @@ from nilearn._utils.testing import assert_raises_regex - currdir = os.path.dirname(os.path.abspath(__file__)) @@ -23,7 +22,7 @@ def test_copy_img(): def test_copy_img_side_effect(): img1 = Nifti1Image(np.ones((2, 2, 2, 2)), affine=np.eye(4)) hash1 = joblib.hash(img1) - img2 = niimg.copy_img(img1) + niimg.copy_img(img1) hash2 = joblib.hash(img1) assert_equal(hash1, hash2) @@ -31,6 +30,7 @@ def test_copy_img_side_effect(): def test_new_img_like_side_effect(): img1 = Nifti1Image(np.ones((2, 2, 2, 2)), affine=np.eye(4)) hash1 = joblib.hash(img1) - img2 = new_img_like(img1, np.ones((2, 2, 2, 2)), img1.get_affine().copy(), copy_header=True) + new_img_like(img1, np.ones((2, 2, 2, 2)), img1.get_affine().copy(), + copy_header=True) hash2 = joblib.hash(img1) assert_equal(hash1, hash2) From 39b051c8e088dfa869d5450e8917d6db927526e0 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Tue, 12 Jan 2016 11:29:27 +0100 Subject: [PATCH 0046/1925] Revert removing __version__ --- nilearn/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nilearn/__init__.py b/nilearn/__init__.py index fdf53c9e1d..883527b899 100644 --- a/nilearn/__init__.py +++ b/nilearn/__init__.py @@ -39,7 +39,7 @@ 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', 'region', 'signal'] -from .version import _check_module_dependencies +from .version import _check_module_dependencies, __version__ _check_module_dependencies() @@ -58,3 +58,5 @@ # structures # This is used in nilearn._utils.cache_mixin CHECK_CACHE_VERSION = True + +__all__ = ['__version__'] From 1c299705b1f43c8992c734ab377be681928ad1fc Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Tue, 12 Jan 2016 12:03:45 +0100 Subject: [PATCH 0047/1925] Integrate version in existing __all__ --- nilearn/__init__.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nilearn/__init__.py b/nilearn/__init__.py index 883527b899..797382a7be 100644 --- a/nilearn/__init__.py +++ b/nilearn/__init__.py @@ -34,11 +34,6 @@ import gzip -# list all submodules available in nilearn -__all__ = ['datasets', 'decoding', 'decomposition', 'connectome', - 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', - 'region', 'signal'] - from .version import _check_module_dependencies, __version__ _check_module_dependencies() @@ -59,4 +54,7 @@ # This is used in nilearn._utils.cache_mixin CHECK_CACHE_VERSION = True -__all__ = ['__version__'] +# list all submodules available in nilearn and version +__all__ = ['datasets', 'decoding', 'decomposition', 'connectome', + 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', + 'region', 'signal', '__version__'] From 63e16a933bcfc7576a4085750d10438f90d14819 Mon Sep 17 00:00:00 2001 From: banilo Date: Tue, 12 Jan 2016 22:36:49 +0100 Subject: [PATCH 0048/1925] added Loic's comment --- nilearn/plotting/tests/test_find_cuts.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nilearn/plotting/tests/test_find_cuts.py b/nilearn/plotting/tests/test_find_cuts.py index a34a529b0a..45cb8aa976 100644 --- a/nilearn/plotting/tests/test_find_cuts.py +++ b/nilearn/plotting/tests/test_find_cuts.py @@ -45,10 +45,13 @@ def test_find_cut_coords(): # regression test (cf. #922) # pseudo-4D images as input (i.e., X, Y, Z, 1) # previously raised "ValueError: too many values to unpack" - data = np.ones((36, 43, 36))[..., np.newaxis] + rng = np.random.RandomState(42) + data_3d = rng.randn(10, 10, 10) + data_4d = data_3d[..., np.newaxis] affine = np.eye(4) - img = nibabel.Nifti1Image(data, affine) - x, y, z = find_xyz_cut_coords(img, activation_threshold=1.1) + img_3d = nibabel.Nifti1Image(data_3d, affine) + img_4d = nibabel.Nifti1Image(data_4d, affine) + assert_equal(find_xyz_cut_coords(img_3d), find_xyz_cut_coords(img_4d)) def test_find_cut_slices(): From ce1056fa77ab4967130cfd924587d9b2bbdaa244 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Wed, 13 Jan 2016 09:52:11 +0100 Subject: [PATCH 0049/1925] Add -e option to echo to actually output a new line --- continuous_integration/flake8_diff.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/continuous_integration/flake8_diff.sh b/continuous_integration/flake8_diff.sh index 985355498e..a4b17f635f 100755 --- a/continuous_integration/flake8_diff.sh +++ b/continuous_integration/flake8_diff.sh @@ -28,7 +28,7 @@ echo Common ancestor is: git show $COMMIT --stat -echo '\nRunning flake8 on the diff in the range'\ +echo -e '\nRunning flake8 on the diff in the range'\ "$(git rev-parse --short $COMMIT)..$(git rev-parse --short @)" \ "($(git rev-list $COMMIT.. | wc -l) commit(s)):" echo '--------------------------------------------------------------------------------' From 8932c18ae4751fc1e675c410c577700ac4f20be0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Wed, 13 Jan 2016 13:27:24 +0100 Subject: [PATCH 0050/1925] Cosmetic tweaks to flake8_diff.sh --- continuous_integration/flake8_diff.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/continuous_integration/flake8_diff.sh b/continuous_integration/flake8_diff.sh index a4b17f635f..28857ba152 100755 --- a/continuous_integration/flake8_diff.sh +++ b/continuous_integration/flake8_diff.sh @@ -16,6 +16,10 @@ git remote set-branches --add $REMOTE master git fetch $REMOTE master REMOTE_MASTER_REF="$REMOTE/master" +echo -e '\nLast 2 commits:' +echo '--------------------------------------------------------------------------------' +git log -2 --pretty=short + # Find common ancestor between HEAD and remotes/$REMOTE/master COMMIT=$(git merge-base @ $REMOTE_MASTER_REF) || \ echo "No common ancestor found for $(git show @ -q) and $(git show $REMOTE_MASTER_REF -q)" @@ -24,9 +28,9 @@ if [ -z "$COMMIT" ]; then exit 1 fi -echo Common ancestor is: -git show $COMMIT --stat - +echo -e "\nCommon ancestor between HEAD and $REMOTE_MASTER_REF is:" +echo '--------------------------------------------------------------------------------' +git show --no-patch $COMMIT echo -e '\nRunning flake8 on the diff in the range'\ "$(git rev-parse --short $COMMIT)..$(git rev-parse --short @)" \ From 2bdbadde2c387a865f7904b45dbd2a904c219e08 Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Mon, 16 Nov 2015 16:43:24 -0800 Subject: [PATCH 0051/1925] BF: Check that mask is fitted before accessing fitted data. --- nilearn/input_data/base_masker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nilearn/input_data/base_masker.py b/nilearn/input_data/base_masker.py index e52ee5eaae..086911274b 100644 --- a/nilearn/input_data/base_masker.py +++ b/nilearn/input_data/base_masker.py @@ -219,6 +219,7 @@ def fit_transform(self, X, y=None, confounds=None, **fit_params): def inverse_transform(self, X): """ Transform the 2D data matrix back to an image in brain space. """ + self._check_fitted() img = self._cache(masking.unmask)(X, self.mask_img_) # Be robust again memmapping that will create read-only arrays in # internal structures of the header: remove the memmaped array From 7863bb3f1e81579a81852cc3f4bd9fc0fbe77968 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Wed, 13 Jan 2016 21:41:22 +0100 Subject: [PATCH 0052/1925] Documentation directories changed from 01_folder to folder --- .../connectome_extraction.rst | 0 .../functional_connectomes.rst | 0 doc/{03_connectivity => connectivity}/index.rst | 0 doc/{03_connectivity => connectivity}/parcellating.rst | 0 .../region_extraction.rst | 0 .../resting_state_networks.rst | 0 doc/{02_decoding => decoding}/decoding_simulated.rst | 0 doc/{02_decoding => decoding}/decoding_tutorial.rst | 2 +- doc/{02_decoding => decoding}/estimator_choice.rst | 0 doc/{02_decoding => decoding}/index.rst | 0 doc/{02_decoding => decoding}/searchlight.rst | 0 doc/{02_decoding => decoding}/space_net.rst | 0 .../data_preparation.rst | 0 .../index.rst | 0 .../manipulating_images.rst | 0 doc/{01_plotting => plotting}/index.rst | 0 doc/user_guide.rst | 8 ++++---- 17 files changed, 5 insertions(+), 5 deletions(-) rename doc/{03_connectivity => connectivity}/connectome_extraction.rst (100%) rename doc/{03_connectivity => connectivity}/functional_connectomes.rst (100%) rename doc/{03_connectivity => connectivity}/index.rst (100%) rename doc/{03_connectivity => connectivity}/parcellating.rst (100%) rename doc/{03_connectivity => connectivity}/region_extraction.rst (100%) rename doc/{03_connectivity => connectivity}/resting_state_networks.rst (100%) rename doc/{02_decoding => decoding}/decoding_simulated.rst (100%) rename doc/{02_decoding => decoding}/decoding_tutorial.rst (99%) rename doc/{02_decoding => decoding}/estimator_choice.rst (100%) rename doc/{02_decoding => decoding}/index.rst (100%) rename doc/{02_decoding => decoding}/searchlight.rst (100%) rename doc/{02_decoding => decoding}/space_net.rst (100%) rename doc/{04_manipulating_images => manipulating_images}/data_preparation.rst (100%) rename doc/{04_manipulating_images => manipulating_images}/index.rst (100%) rename doc/{04_manipulating_images => manipulating_images}/manipulating_images.rst (100%) rename doc/{01_plotting => plotting}/index.rst (100%) diff --git a/doc/03_connectivity/connectome_extraction.rst b/doc/connectivity/connectome_extraction.rst similarity index 100% rename from doc/03_connectivity/connectome_extraction.rst rename to doc/connectivity/connectome_extraction.rst diff --git a/doc/03_connectivity/functional_connectomes.rst b/doc/connectivity/functional_connectomes.rst similarity index 100% rename from doc/03_connectivity/functional_connectomes.rst rename to doc/connectivity/functional_connectomes.rst diff --git a/doc/03_connectivity/index.rst b/doc/connectivity/index.rst similarity index 100% rename from doc/03_connectivity/index.rst rename to doc/connectivity/index.rst diff --git a/doc/03_connectivity/parcellating.rst b/doc/connectivity/parcellating.rst similarity index 100% rename from doc/03_connectivity/parcellating.rst rename to doc/connectivity/parcellating.rst diff --git a/doc/03_connectivity/region_extraction.rst b/doc/connectivity/region_extraction.rst similarity index 100% rename from doc/03_connectivity/region_extraction.rst rename to doc/connectivity/region_extraction.rst diff --git a/doc/03_connectivity/resting_state_networks.rst b/doc/connectivity/resting_state_networks.rst similarity index 100% rename from doc/03_connectivity/resting_state_networks.rst rename to doc/connectivity/resting_state_networks.rst diff --git a/doc/02_decoding/decoding_simulated.rst b/doc/decoding/decoding_simulated.rst similarity index 100% rename from doc/02_decoding/decoding_simulated.rst rename to doc/decoding/decoding_simulated.rst diff --git a/doc/02_decoding/decoding_tutorial.rst b/doc/decoding/decoding_tutorial.rst similarity index 99% rename from doc/02_decoding/decoding_tutorial.rst rename to doc/decoding/decoding_tutorial.rst index 7671e4e1b0..3940ceb128 100644 --- a/doc/02_decoding/decoding_tutorial.rst +++ b/doc/decoding/decoding_tutorial.rst @@ -320,7 +320,7 @@ permutation testing on the labels, with .. topic:: **Putting it all together** The :ref:`ROI-based decoding example - ` does a decoding analysis per + ` does a decoding analysis per mask, giving the f1-score of the prediction for each object. It uses all the notions presented above, with ``for`` loop to iterate diff --git a/doc/02_decoding/estimator_choice.rst b/doc/decoding/estimator_choice.rst similarity index 100% rename from doc/02_decoding/estimator_choice.rst rename to doc/decoding/estimator_choice.rst diff --git a/doc/02_decoding/index.rst b/doc/decoding/index.rst similarity index 100% rename from doc/02_decoding/index.rst rename to doc/decoding/index.rst diff --git a/doc/02_decoding/searchlight.rst b/doc/decoding/searchlight.rst similarity index 100% rename from doc/02_decoding/searchlight.rst rename to doc/decoding/searchlight.rst diff --git a/doc/02_decoding/space_net.rst b/doc/decoding/space_net.rst similarity index 100% rename from doc/02_decoding/space_net.rst rename to doc/decoding/space_net.rst diff --git a/doc/04_manipulating_images/data_preparation.rst b/doc/manipulating_images/data_preparation.rst similarity index 100% rename from doc/04_manipulating_images/data_preparation.rst rename to doc/manipulating_images/data_preparation.rst diff --git a/doc/04_manipulating_images/index.rst b/doc/manipulating_images/index.rst similarity index 100% rename from doc/04_manipulating_images/index.rst rename to doc/manipulating_images/index.rst diff --git a/doc/04_manipulating_images/manipulating_images.rst b/doc/manipulating_images/manipulating_images.rst similarity index 100% rename from doc/04_manipulating_images/manipulating_images.rst rename to doc/manipulating_images/manipulating_images.rst diff --git a/doc/01_plotting/index.rst b/doc/plotting/index.rst similarity index 100% rename from doc/01_plotting/index.rst rename to doc/plotting/index.rst diff --git a/doc/user_guide.rst b/doc/user_guide.rst index 50ca03f7b7..1cb2be5cce 100644 --- a/doc/user_guide.rst +++ b/doc/user_guide.rst @@ -15,10 +15,10 @@ User guide: table of contents :numbered: introduction.rst - 02_decoding/index.rst - 03_connectivity/index.rst - 01_plotting/index.rst - 04_manipulating_images/index.rst + decoding/index.rst + connectivity/index.rst + plotting/index.rst + manipulating_images/index.rst building_blocks/index.rst modules/reference.rst From 7afac2132bb4680a3f9900302bb2f78101efef29 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Wed, 13 Jan 2016 22:18:39 +0100 Subject: [PATCH 0053/1925] Small correction in DictLearn documentation --- doc/connectivity/resting_state_networks.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/connectivity/resting_state_networks.rst b/doc/connectivity/resting_state_networks.rst index ea9c75457d..57812421a4 100644 --- a/doc/connectivity/resting_state_networks.rst +++ b/doc/connectivity/resting_state_networks.rst @@ -113,7 +113,7 @@ good extracted maps. Applying DictLearning --------------------- -:class:'DictLearning' is a ready-to-use class with the same interface as CanICA. +:class:`DictLearning` is a ready-to-use class with the same interface as CanICA. Sparsity of output map is controlled by a parameter alpha: using a larger alpha yields sparser maps. From 8f98754bee50342d4ca519d0191c2c7b46964d64 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 14 Jan 2016 01:45:37 +0100 Subject: [PATCH 0054/1925] Fix problem of small radius --- nilearn/input_data/nifti_spheres_masker.py | 23 +++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/nilearn/input_data/nifti_spheres_masker.py b/nilearn/input_data/nifti_spheres_masker.py index 239b95930f..0a3aeaf565 100644 --- a/nilearn/input_data/nifti_spheres_masker.py +++ b/nilearn/input_data/nifti_spheres_masker.py @@ -31,13 +31,25 @@ def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap, target_shape=niimg.shape[:3], interpolation='nearest') mask, _ = masking._load_mask_img(mask_img) - mask_coords = list(np.where(mask != 0)) + mask_coords = list(zip(*np.where(mask != 0))) X = masking._apply_mask_fmri(niimg, mask_img) else: - mask_coords = list(zip(*np.ndindex(niimg.shape[:3]))) + mask_coords = list(np.ndindex(niimg.shape[:3])) X = niimg.get_data().reshape([-1, niimg.shape[3]]).T - mask_coords = np.asarray(mask_coords) + + # For each seed, get coordinates of nearest voxel + nearests = [] + for sx, sy, sz in seeds: + nearest = np.round(coord_transform(sx, sy, sz, np.linalg.inv(affine))) + nearest = nearest.astype(int) + nearest = (nearest[0], nearest[1], nearest[2]) + try: + nearests.append(mask_coords.index(nearest)) + except ValueError: + nearests.append(None) + + mask_coords = np.asarray(list(zip(*mask_coords))) mask_coords = coord_transform(mask_coords[0], mask_coords[1], mask_coords[2], affine) mask_coords = np.asarray(mask_coords).T @@ -50,8 +62,13 @@ def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap, clf = neighbors.NearestNeighbors(radius=radius) A = clf.fit(mask_coords).radius_neighbors_graph(seeds) + for i, nearest in enumerate(nearests): + if nearest is None: + continue + A[i, nearest] = True A = A.tolil() # Include selfs + mask_coords = mask_coords.astype(int).tolist() for i, seed in enumerate(seeds): try: From 945605fa3cc996adb26968fc7f2e2e8875c9ca83 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Fri, 15 Jan 2016 10:46:12 +0100 Subject: [PATCH 0055/1925] Corrected missing reference in decoding documentation file --- doc/decoding/decoding_tutorial.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/decoding/decoding_tutorial.rst b/doc/decoding/decoding_tutorial.rst index 3940ceb128..7671e4e1b0 100644 --- a/doc/decoding/decoding_tutorial.rst +++ b/doc/decoding/decoding_tutorial.rst @@ -320,7 +320,7 @@ permutation testing on the labels, with .. topic:: **Putting it all together** The :ref:`ROI-based decoding example - ` does a decoding analysis per + ` does a decoding analysis per mask, giving the f1-score of the prediction for each object. It uses all the notions presented above, with ``for`` loop to iterate From 80028d9b576be1247f9961a0095bc8f44debf62b Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Wed, 16 Dec 2015 22:04:25 +0100 Subject: [PATCH 0056/1925] Trying to address Issue #904. Rewrite example to dict learning --- doc/connectivity/region_extraction.rst | 32 ++-- doc/connectivity/resting_state_networks.rst | 10 +- .../plot_extract_regions_dictlearn_maps.py | 150 ++++++++++++++++++ 3 files changed, 171 insertions(+), 21 deletions(-) create mode 100644 examples/connectivity/plot_extract_regions_dictlearn_maps.py diff --git a/doc/connectivity/region_extraction.rst b/doc/connectivity/region_extraction.rst index 6f5ca2ae34..9234b58aa9 100644 --- a/doc/connectivity/region_extraction.rst +++ b/doc/connectivity/region_extraction.rst @@ -7,9 +7,9 @@ Region Extraction for better brain parcellations .. topic:: **Page summary** This section shows how to use Region Extractor to extract each connected - brain regions/components into a separate brain activation regions and also + brain regions/components into a separate brain activation region and also shows how to learn functional connectivity interactions between each - separate regions. + separate region. .. contents:: **Contents** :local: @@ -40,13 +40,13 @@ datasets. .. currentmodule:: nilearn.decomposition -Data decomposition using Canonical ICA -====================================== +Brain maps using Dictionary Learning +==================================== -Here, we use :class:`CanICA`, a multi subject model to decompose previously -fetched multi subjects datasets. We do this by setting the parameters in the -object and calling fit on the functional filenames without necessarily -converting each filename to Nifti1Image object. +Here, we use :class:`DictLearning`, a multi subject model to decompose previously +fetched multi subjects datasets into functionally defined maps. We do this by setting +the parameters in the object and calling fit on the functional filenames without +necessarily converting each filename to Nifti1Image object. .. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py @@ -55,12 +55,12 @@ converting each filename to Nifti1Image object. .. currentmodule:: nilearn.plotting -Visualization of Canonical ICA maps -=================================== +Visualization of Dictionary Learning maps +========================================= -Showing ICA maps stored in components_img using nilearn plotting utilities. +Showing maps stored in components_img using nilearn plotting utilities. Here, we use :func:`plot_prob_atlas` for easy visualization of 4D atlas maps -onto the anatomical standard template. Each ICA map is displayed in different +onto the anatomical standard template. Each map is displayed in different color and colors are random and automatically picked. .. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py @@ -73,11 +73,11 @@ color and colors are random and automatically picked. .. currentmodule:: nilearn.regions -Region Extraction with CanICA maps -================================== +Region Extraction with Dictionary Learning maps +=============================================== We use object :class:`RegionExtractor` for extracting brain connected regions -from ICA maps into separated brain activation regions with automatic +from dictionary maps into separated brain activation regions with automatic thresholding strategy selected as thresholding_strategy='ratio_n_voxels'. We use thresholding strategy to first get foreground information present in the maps and then followed by robust region extraction on foreground information using @@ -166,7 +166,7 @@ Validating results ================== Showing only Default Mode Network (DMN) regions before and after region -extraction by manually identifying the index of DMN in ICA decomposed maps. +extraction by manually identifying the index of DMN in decomposed maps. Left image displays the DMN regions without region extraction and right image displays the DMN regions after region extraction. Here, we can validate that diff --git a/doc/connectivity/resting_state_networks.rst b/doc/connectivity/resting_state_networks.rst index 57812421a4..7819cda1c1 100644 --- a/doc/connectivity/resting_state_networks.rst +++ b/doc/connectivity/resting_state_networks.rst @@ -36,7 +36,7 @@ on data loading `): .. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # First we load the ADHD200 data - :end-before: # Here we apply CanICA on the data + :end-before: #################################################################### Applying CanICA --------------- @@ -49,7 +49,7 @@ and then fit it on the data. .. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # Here we apply CanICA on the data - :end-before: # To visualize we plot the outline of all components on one figure + :end-before: #################################################################### The components estimated are found as the `components_` attribute of the object. @@ -63,7 +63,7 @@ each component separately. .. literalinclude:: ../../examples/03_connectivity/plot_canica_resting_state.py :start-after: # To visualize we plot the outline of all components on one figure - :end-before: # Finally, we plot the map for each ICA component separately + :end-before: #################################################################### .. figure:: ../auto_examples/03_connectivity/images/sphx_glr_plot_canica_resting_state_001.png :align: center @@ -119,13 +119,13 @@ larger alpha yields sparser maps. .. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py :start-after: # Dictionary learning - :end-before: # CanICA + :end-before: ############################################################################### We can fit both estimators to compare them .. literalinclude:: ../../examples/03_connectivity/plot_compare_resting_state_decomposition.py :start-after: # Fit both estimators - :end-before: # Visualize the results + :end-before: ############################################################################### Visualizing the results ----------------------- diff --git a/examples/connectivity/plot_extract_regions_dictlearn_maps.py b/examples/connectivity/plot_extract_regions_dictlearn_maps.py new file mode 100644 index 0000000000..8f973302cc --- /dev/null +++ b/examples/connectivity/plot_extract_regions_dictlearn_maps.py @@ -0,0 +1,150 @@ +""" +Regions extraction using Dictionary Learning and functional connectomes +======================================================================= + +This example shows how to use :class:`nilearn.regions.RegionExtractor` +to extract connected brain regions from whole brain maps decomposed +using dictionary learning and use them to build a functional connectome. + +We used 20 resting state ADHD functional datasets from :func:`nilearn.datasets.fetch_adhd` +and :class:`nilearn.decomposition.DictLearning` for set of brain atlas maps. + +Please see the related documentation of :class:`nilearn.regions.RegionExtractor` +for more details. +""" + +################################################################################ +# Fetching ADHD resting state functional datasets by loading from datasets +# utilities +from nilearn import datasets + +adhd_dataset = datasets.fetch_adhd(n_subjects=20) +func_filenames = adhd_dataset.func +confounds = adhd_dataset.confounds + +################################################################################ +# Import dictionary learning algorithm from decomposition module and call the +# object and fit the model to the functional datasets +from nilearn.decomposition import DictLearning + +# Initialize DictLearning object and parameters +dict_learn = DictLearning(n_components=5, smoothing_fwhm=6., + memory="nilearn_cache", memory_level=2, + random_state=0) +# Fit to the data +dict_learn.fit(func_filenames) +# Dictionary maps +components_img = dict_learn.masker_.inverse_transform(dict_learn.components_) + +# Visualization +# Show maps by using plotting utilities +from nilearn import plotting + +plotting.plot_prob_atlas(components_img, view_type='filled_contours', + title='Dictionary Learning maps') + +################################################################################ +# Extracting regions from dictionary maps and then timeseries signals from those +# regions, both can be done by importing Region Extractor from regions module. +# threshold=0.5 indicates that we keep nominal of amount nonzero voxels across all +# maps, less the threshold means that more intense non-voxels will be survived. +from nilearn.regions import RegionExtractor + +extractor = RegionExtractor(components_img, threshold=0.5, + thresholding_strategy='ratio_n_voxels', + extractor='local_regions', + standardize=True, min_region_size=1350) +# Just call fit() to process for regions extraction +extractor.fit() +# Extracted regions are stored in regions_img_ +regions_extracted_img = extractor.regions_img_ +# Each region index is stored in index_ +regions_index = extractor.index_ +# Total number of regions extracted +n_regions_extracted = regions_extracted_img.shape[-1] + +# Visualization +# Show region extraction results +title = ('%d regions are extracted from %d components.' + '\nEach separate color of region indicates extracted region' + % (n_regions_extracted, 5)) +plotting.plot_prob_atlas(regions_extracted_img, view_type='filled_contours', + title=title) + +################################################################################ +# Computing correlation coefficients. +# First we need to do subjects timeseries signals extraction and then estimating +# correlation matrices on those signals. +# To extract timeseries signals, we call transform() from RegionExtractor object +# onto each subject functional data stored in func_filenames. +# To estimate correlation matrices we import connectome utilities from nilearn +from nilearn.connectome import ConnectivityMeasure + +correlations = [] +# Initializing ConnectivityMeasure object with kind='correlation' +connectome_measure = ConnectivityMeasure(kind='correlation') +for filename, confound in zip(func_filenames, confounds): + # call transform from RegionExtractor object to extract timeseries signals + timeseries_each_subject = extractor.transform(filename, confounds=confound) + # call fit_transform from ConnectivityMeasure object + correlation = connectome_measure.fit_transform([timeseries_each_subject]) + # saving each subject correlation to correlations + correlations.append(correlation) + +# Mean of all correlations +import numpy as np + +mean_correlations = np.mean(correlations, axis=0).reshape(n_regions_extracted, + n_regions_extracted) + +# Visualization +# Showing mean correlation results +# Import image utilities in utilising to operate on 4th dimension +import matplotlib.pyplot as plt +from nilearn import image + +regions_imgs = image.iter_img(regions_extracted_img) +coords_connectome = [plotting.find_xyz_cut_coords(img) for img in regions_imgs] +title = 'Correlation interactions between %d regions' % n_regions_extracted +plt.figure() +plt.imshow(mean_correlations, interpolation="nearest", + vmax=1, vmin=-1, cmap=plt.cm.bwr) +plt.colorbar() +plt.title(title) +plotting.plot_connectome(mean_correlations, coords_connectome, + edge_threshold='90%', title=title) + +################################################################################ +# Showing Default Mode Network (DMN) regions before and after region extraction +# by manually identifying the index of DMN in Dictionary Learning decomposed +# components +from nilearn._utils.compat import izip + +# First we plot DMN without region extraction, interested in only index=[4] +img = image.index_img(components_img, 4) +coords = plotting.find_xyz_cut_coords(img) +display = plotting.plot_stat_map(img, cut_coords=((0, -52, 29)), + colorbar=False, title='Dictionary map: DMN mode') + +# Now, we plot DMN after region extraction to show that connected regions are +# nicely separated. Each brain extracted region is indicated with separate color + +# For this, we take the indices of the all regions extracted related to original +# dictionary map 4. +regions_indices_of_map3 = np.where(np.array(regions_index) == 4) + +display = plotting.plot_anat(cut_coords=((0, -52, 29)), title='Extracted regions in DMN mode') + +# Now add as an overlay by looping over all the regions for right +# temporoparietal function, posterior cingulate cortex, medial prefrontal +# cortex, left temporoparietal junction +color_list = [[0., 1., 0.29, 1.], [0., 1., 0.54, 1.], + [0., 1., 0.78, 1.], [0., 0.96, 1., 1.], + [0., 0.73, 1., 1.], [0., 0.47, 1., 1.], + [0., 0.22, 1., 1.], [0.01, 0., 1., 1.], + [0.26, 0., 1., 1.]] +for each_index_of_map3, color in izip(regions_indices_of_map3[0], color_list): + display.add_overlay(image.index_img(regions_extracted_img, each_index_of_map3), + cmap=plotting.cm.alpha_cmap(color)) + +plotting.show() From dff11b30a99066b556f92e600fe283429e045dd0 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Thu, 17 Dec 2015 15:49:15 +0100 Subject: [PATCH 0057/1925] Added comment saying can also be used for CanICA maps --- examples/connectivity/plot_extract_regions_dictlearn_maps.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/connectivity/plot_extract_regions_dictlearn_maps.py b/examples/connectivity/plot_extract_regions_dictlearn_maps.py index 8f973302cc..d0d22baf2c 100644 --- a/examples/connectivity/plot_extract_regions_dictlearn_maps.py +++ b/examples/connectivity/plot_extract_regions_dictlearn_maps.py @@ -9,6 +9,10 @@ We used 20 resting state ADHD functional datasets from :func:`nilearn.datasets.fetch_adhd` and :class:`nilearn.decomposition.DictLearning` for set of brain atlas maps. +This example can also be inspired to apply the same steps to even regions extraction +using ICA maps. In that case, idea would be to replace dictionary learning to canonical +ICA decomposition using :class:`nilearn.decomposition.CanICA` + Please see the related documentation of :class:`nilearn.regions.RegionExtractor` for more details. """ From eba43befa23ec6a2555c1d0ca2467a4ee8b5c8de Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 12 Jan 2016 15:01:55 +0100 Subject: [PATCH 0058/1925] Addressed comments: restructured titles, DMN to one specific network --- .../plot_extract_regions_dictlearn_maps.py | 49 +++++++++---------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/examples/connectivity/plot_extract_regions_dictlearn_maps.py b/examples/connectivity/plot_extract_regions_dictlearn_maps.py index d0d22baf2c..058c43c19a 100644 --- a/examples/connectivity/plot_extract_regions_dictlearn_maps.py +++ b/examples/connectivity/plot_extract_regions_dictlearn_maps.py @@ -27,29 +27,32 @@ confounds = adhd_dataset.confounds ################################################################################ +# Extracting resting-state networks with DictionaryLearning + # Import dictionary learning algorithm from decomposition module and call the # object and fit the model to the functional datasets from nilearn.decomposition import DictLearning -# Initialize DictLearning object and parameters +# Initialize DictLearning object dict_learn = DictLearning(n_components=5, smoothing_fwhm=6., memory="nilearn_cache", memory_level=2, random_state=0) # Fit to the data dict_learn.fit(func_filenames) -# Dictionary maps +# Resting state networks/maps components_img = dict_learn.masker_.inverse_transform(dict_learn.components_) -# Visualization -# Show maps by using plotting utilities +# Visualization of resting state networks +# Show networks using plotting utilities from nilearn import plotting plotting.plot_prob_atlas(components_img, view_type='filled_contours', title='Dictionary Learning maps') ################################################################################ -# Extracting regions from dictionary maps and then timeseries signals from those -# regions, both can be done by importing Region Extractor from regions module. +# Extracting regions from networks + +# Import Region Extractor algorithm from regions module # threshold=0.5 indicates that we keep nominal of amount nonzero voxels across all # maps, less the threshold means that more intense non-voxels will be survived. from nilearn.regions import RegionExtractor @@ -67,8 +70,7 @@ # Total number of regions extracted n_regions_extracted = regions_extracted_img.shape[-1] -# Visualization -# Show region extraction results +# Visualization of region extraction results title = ('%d regions are extracted from %d components.' '\nEach separate color of region indicates extracted region' % (n_regions_extracted, 5)) @@ -76,7 +78,8 @@ title=title) ################################################################################ -# Computing correlation coefficients. +# Computing correlation coefficients and plotting a connectome + # First we need to do subjects timeseries signals extraction and then estimating # correlation matrices on those signals. # To extract timeseries signals, we call transform() from RegionExtractor object @@ -102,8 +105,7 @@ n_regions_extracted) # Visualization -# Showing mean correlation results -# Import image utilities in utilising to operate on 4th dimension +# Plotting connectome results import matplotlib.pyplot as plt from nilearn import image @@ -119,35 +121,32 @@ edge_threshold='90%', title=title) ################################################################################ -# Showing Default Mode Network (DMN) regions before and after region extraction -# by manually identifying the index of DMN in Dictionary Learning decomposed -# components -from nilearn._utils.compat import izip +# Plotting regions extracted for only one specific network -# First we plot DMN without region extraction, interested in only index=[4] +# First, we plot a network of index=4 without region extraction (left plot) img = image.index_img(components_img, 4) coords = plotting.find_xyz_cut_coords(img) -display = plotting.plot_stat_map(img, cut_coords=((0, -52, 29)), +display = plotting.plot_stat_map(img, cut_coords=coords, colorbar=False, title='Dictionary map: DMN mode') -# Now, we plot DMN after region extraction to show that connected regions are -# nicely separated. Each brain extracted region is indicated with separate color +# Now, we plot (right side) same network after region extraction to show that +# connected regions are nicely seperated. +# Each brain extracted region is identified as separate color. # For this, we take the indices of the all regions extracted related to original -# dictionary map 4. +# network given as 4. regions_indices_of_map3 = np.where(np.array(regions_index) == 4) -display = plotting.plot_anat(cut_coords=((0, -52, 29)), title='Extracted regions in DMN mode') +display = plotting.plot_anat(cut_coords=coords, title='Extracted regions in DMN mode') -# Now add as an overlay by looping over all the regions for right -# temporoparietal function, posterior cingulate cortex, medial prefrontal -# cortex, left temporoparietal junction +# Now add as an overlay by looping over all the regions of index 4 +# color list is random (you can choose your own color) color_list = [[0., 1., 0.29, 1.], [0., 1., 0.54, 1.], [0., 1., 0.78, 1.], [0., 0.96, 1., 1.], [0., 0.73, 1., 1.], [0., 0.47, 1., 1.], [0., 0.22, 1., 1.], [0.01, 0., 1., 1.], [0.26, 0., 1., 1.]] -for each_index_of_map3, color in izip(regions_indices_of_map3[0], color_list): +for each_index_of_map3, color in zip(regions_indices_of_map3[0], color_list): display.add_overlay(image.index_img(regions_extracted_img, each_index_of_map3), cmap=plotting.cm.alpha_cmap(color)) From 7992b3e26c271b7929800d600d0108585152f02c Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Wed, 13 Jan 2016 13:35:35 +0100 Subject: [PATCH 0059/1925] Documentation improvement in dictionary learning example --- doc/connectivity/region_extraction.rst | 12 ++++++------ .../plot_extract_regions_dictlearn_maps.py | 7 ++++--- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/doc/connectivity/region_extraction.rst b/doc/connectivity/region_extraction.rst index 9234b58aa9..27ec169e24 100644 --- a/doc/connectivity/region_extraction.rst +++ b/doc/connectivity/region_extraction.rst @@ -6,8 +6,8 @@ Region Extraction for better brain parcellations .. topic:: **Page summary** - This section shows how to use Region Extractor to extract each connected - brain regions/components into a separate brain activation region and also + This section shows how to use Region Extractor to extract brain connected + regions/components into a separate brain activation region and also shows how to learn functional connectivity interactions between each separate region. @@ -43,10 +43,10 @@ datasets. Brain maps using Dictionary Learning ==================================== -Here, we use :class:`DictLearning`, a multi subject model to decompose previously -fetched multi subjects datasets into functionally defined maps. We do this by setting -the parameters in the object and calling fit on the functional filenames without -necessarily converting each filename to Nifti1Image object. +Here, we use :class:`DictLearning`, a multi subject model to decompose multi +subjects fMRI datasets into functionally defined maps. We do this by setting +the parameters in the object and calling fit on the filenames of datasets without +necessarily converting each file to Nifti1Image object. .. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py diff --git a/examples/connectivity/plot_extract_regions_dictlearn_maps.py b/examples/connectivity/plot_extract_regions_dictlearn_maps.py index 058c43c19a..a72ee28069 100644 --- a/examples/connectivity/plot_extract_regions_dictlearn_maps.py +++ b/examples/connectivity/plot_extract_regions_dictlearn_maps.py @@ -3,7 +3,7 @@ ======================================================================= This example shows how to use :class:`nilearn.regions.RegionExtractor` -to extract connected brain regions from whole brain maps decomposed +to extract spatially constrained brain regions from whole brain maps decomposed using dictionary learning and use them to build a functional connectome. We used 20 resting state ADHD functional datasets from :func:`nilearn.datasets.fetch_adhd` @@ -127,7 +127,7 @@ img = image.index_img(components_img, 4) coords = plotting.find_xyz_cut_coords(img) display = plotting.plot_stat_map(img, cut_coords=coords, - colorbar=False, title='Dictionary map: DMN mode') + colorbar=False, title='Showing one specific network') # Now, we plot (right side) same network after region extraction to show that # connected regions are nicely seperated. @@ -137,7 +137,8 @@ # network given as 4. regions_indices_of_map3 = np.where(np.array(regions_index) == 4) -display = plotting.plot_anat(cut_coords=coords, title='Extracted regions in DMN mode') +display = plotting.plot_anat(cut_coords=coords, + title='Extracted regions in one specific network') # Now add as an overlay by looping over all the regions of index 4 # color list is random (you can choose your own color) From 0ddfa30921b8d1627da1811c0ef72e9668684675 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Fri, 15 Jan 2016 16:47:50 +0100 Subject: [PATCH 0060/1925] Fixing conflicts and shifted megatrawls example to 01_plotting --- doc/connectivity/region_extraction.rst | 72 ++++----- .../plot_visualize_megatrawls_netmats.py | 0 .../plot_extract_regions_canica_maps.py | 149 ------------------ .../plot_extract_regions_dictlearn_maps.py | 0 4 files changed, 36 insertions(+), 185 deletions(-) rename examples/{manipulating_visualizing => 01_plotting}/plot_visualize_megatrawls_netmats.py (100%) delete mode 100644 examples/03_connectivity/plot_extract_regions_canica_maps.py rename examples/{connectivity => 03_connectivity}/plot_extract_regions_dictlearn_maps.py (100%) diff --git a/doc/connectivity/region_extraction.rst b/doc/connectivity/region_extraction.rst index 27ec169e24..daf5b1188f 100644 --- a/doc/connectivity/region_extraction.rst +++ b/doc/connectivity/region_extraction.rst @@ -34,7 +34,7 @@ which is already preprocessed and publicly available at datasets. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py :start-after: # utilities :end-before: ################################################################################ @@ -43,15 +43,15 @@ datasets. Brain maps using Dictionary Learning ==================================== -Here, we use :class:`DictLearning`, a multi subject model to decompose multi +Here, we use object :class:`DictLearning`, a multi subject model to decompose multi subjects fMRI datasets into functionally defined maps. We do this by setting -the parameters in the object and calling fit on the filenames of datasets without +the parameters and calling the object fit on the filenames of datasets without necessarily converting each file to Nifti1Image object. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py - :start-after: # decomposition module - :end-before: # Visualization +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py + :start-after: # object and fit the model to the functional datasets + :end-before: # Visualization of resting state networks .. currentmodule:: nilearn.plotting @@ -63,12 +63,12 @@ Here, we use :func:`plot_prob_atlas` for easy visualization of 4D atlas maps onto the anatomical standard template. Each map is displayed in different color and colors are random and automatically picked. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py - :start-after: # Show ICA maps by using plotting utilities +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py + :start-after: # Show networks using plotting utilities :end-before: ################################################################################ -.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_001.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_001.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html :scale: 60 .. currentmodule:: nilearn.regions @@ -93,9 +93,9 @@ regions. We control the small spurious regions size by thresholding in voxel uni to adapt well to the resolution of the image. Please see the documentation of nilearn.regions.connected_regions for more details. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py - :start-after: # regions, both can be done by importing Region Extractor from regions module - :end-before: # Visualization +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py + :start-after: # maps, less the threshold means that more intense non-voxels will be survived. + :end-before: # Visualization of region extraction results .. currentmodule:: nilearn.plotting @@ -107,12 +107,12 @@ for visualizing extracted regions on a standard template. Each extracted brain region is assigned a color and as you can see that visual cortex area is extracted quite nicely into each hemisphere. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py - :start-after: # Show region extraction results +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py + :start-after: # Visualization of region extraction results :end-before: ################################################################################ -.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_002.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_002.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html :scale: 60 .. currentmodule:: nilearn.connectome @@ -133,7 +133,7 @@ shape=(176, 23) where 176 is the length of time series and 23 is the number of extracted regions. Likewise, we have a total of 20 subject specific time series signals. The third step, we compute the mean correlation across all subjects. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py :start-after: # To estimate correlation matrices we import connectome utilities from nilearn :end-before: # Visualization @@ -148,16 +148,16 @@ automatically the coordinates required, for plotting connectome relations. Left image is the correlations in a matrix form and right image is the connectivity relations to brain regions plotted using :func:`plot_connectome` -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py - :start-after: # Import image utilities in utilising to operate on 4th dimension +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py + :start-after: # Plotting connectome results :end-before: ################################################################################ -.. |matrix| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_003.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html +.. |matrix| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_003.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html :scale: 60 -.. |connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_004.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html +.. |connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_004.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html :scale: 60 .. centered:: |matrix| |connectome| @@ -165,22 +165,22 @@ connectivity relations to brain regions plotted using :func:`plot_connectome` Validating results ================== -Showing only Default Mode Network (DMN) regions before and after region -extraction by manually identifying the index of DMN in decomposed maps. +Showing only one specific network regions before and after region extraction. -Left image displays the DMN regions without region extraction and right image -displays the DMN regions after region extraction. Here, we can validate that -the DMN regions are nicely separated displaying each extracted region in different color. +Left image displays the regions of one specific resting network without region extraction +and right image displays the regions split apart after region extraction. Here, we can +validate that regions are nicely separated identified by each extracted region in different +color. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_canica_maps.py - :start-after: # First we plot DMN without region extraction, interested in only index=[3] +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py + :start-after: # Plotting regions extracted for only one specific network -.. |dmn| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_005.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html +.. |dmn| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_005.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html :scale: 50 -.. |dmn_reg| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_canica_maps_006.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_canica_maps.html +.. |dmn_reg| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_006.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html :scale: 50 .. centered:: |dmn| |dmn_reg| @@ -188,4 +188,4 @@ the DMN regions are nicely separated displaying each extracted region in differe .. seealso:: The full code can be found as an example: - :ref:`sphx_glr_auto_examples_03_connectivity_plot_extract_regions_canica_maps.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_extract_regions_dictlearn_maps.py` diff --git a/examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py b/examples/01_plotting/plot_visualize_megatrawls_netmats.py similarity index 100% rename from examples/manipulating_visualizing/plot_visualize_megatrawls_netmats.py rename to examples/01_plotting/plot_visualize_megatrawls_netmats.py diff --git a/examples/03_connectivity/plot_extract_regions_canica_maps.py b/examples/03_connectivity/plot_extract_regions_canica_maps.py deleted file mode 100644 index 023131c453..0000000000 --- a/examples/03_connectivity/plot_extract_regions_canica_maps.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Regions extraction using Canonical ICA maps and functional connectomes -====================================================================== - -This example shows how to use :class:`nilearn.regions.RegionExtractor` -to extract connected brain regions from whole brain ICA maps and -use them to estimate a connectome. - -We used 20 resting state ADHD functional datasets from :func:`nilearn.datasets.fetch_adhd` -and :class:`nilearn.decomposition.CanICA` for whole brain ICA maps. - -Please see the related documentation of :class:`nilearn.regions.RegionExtractor` -for more details. -""" - -################################################################################ -# Fetching ADHD resting state functional datasets by loading from datasets -# utilities -from nilearn import datasets - -adhd_dataset = datasets.fetch_adhd(n_subjects=20) -func_filenames = adhd_dataset.func -confounds = adhd_dataset.confounds - -################################################################################ -# Canonical ICA decomposition of functional datasets by importing CanICA from -# decomposition module -from nilearn.decomposition import CanICA - -# Initialize canica parameters -canica = CanICA(n_components=5, smoothing_fwhm=6., - memory="nilearn_cache", memory_level=2, - random_state=0) -# Fit to the data -canica.fit(func_filenames) -# ICA maps -components_img = canica.masker_.inverse_transform(canica.components_) - -# Visualization -# Show ICA maps by using plotting utilities -from nilearn import plotting - -plotting.plot_prob_atlas(components_img, view_type='filled_contours', - title='ICA components') - -################################################################################ -# Extracting regions from ICA maps and then timeseries signals from those -# regions, both can be done by importing Region Extractor from regions module. -# threshold=0.5 indicates that we keep nominal of amount nonzero voxels across all -# maps, less the threshold means that more intense non-voxels will be survived. -from nilearn.regions import RegionExtractor - -extractor = RegionExtractor(components_img, threshold=0.5, - thresholding_strategy='ratio_n_voxels', - extractor='local_regions', - standardize=True, min_region_size=1350) -# Just call fit() to process for regions extraction -extractor.fit() -# Extracted regions are stored in regions_img_ -regions_extracted_img = extractor.regions_img_ -# Each region index is stored in index_ -regions_index = extractor.index_ -# Total number of regions extracted -n_regions_extracted = regions_extracted_img.shape[-1] - -# Visualization -# Show region extraction results -title = ('%d regions are extracted from %d ICA components.' - '\nEach separate color of region indicates extracted region' - % (n_regions_extracted, 5)) -plotting.plot_prob_atlas(regions_extracted_img, view_type='filled_contours', - title=title) - -################################################################################ -# Computing correlation coefficients -# First we need to do subjects timeseries signals extraction and then estimating -# correlation matrices on those signals. -# To extract timeseries signals, we call transform() from RegionExtractor object -# onto each subject functional data stored in func_filenames. -# To estimate correlation matrices we import connectome utilities from nilearn -from nilearn.connectome import ConnectivityMeasure - -correlations = [] -# Initializing ConnectivityMeasure object with kind='correlation' -connectome_measure = ConnectivityMeasure(kind='correlation') -for filename, confound in zip(func_filenames, confounds): - # call transform from RegionExtractor object to extract timeseries signals - timeseries_each_subject = extractor.transform(filename, confounds=confound) - # call fit_transform from ConnectivityMeasure object - correlation = connectome_measure.fit_transform([timeseries_each_subject]) - # saving each subject correlation to correlations - correlations.append(correlation) - -# Mean of all correlations -import numpy as np - -mean_correlations = np.mean(correlations, axis=0).reshape(n_regions_extracted, - n_regions_extracted) - -# Visualization -# Showing mean correlation results -# Import image utilities in utilising to operate on 4th dimension -import matplotlib.pyplot as plt -from nilearn import image - -regions_imgs = image.iter_img(regions_extracted_img) -coords_connectome = [plotting.find_xyz_cut_coords(img) for img in regions_imgs] -title = 'Correlation interactions between %d regions' % n_regions_extracted -plt.figure() -plt.imshow(mean_correlations, interpolation="nearest", - vmax=1, vmin=-1, cmap=plt.cm.bwr) -plt.colorbar() -plt.title(title) -plotting.plot_connectome(mean_correlations, coords_connectome, - edge_threshold='90%', title=title) - -################################################################################ -# Showing Default Mode Network (DMN) regions before and after region extraction -# by manually identifying the index of DMN in ICA decomposed components -from nilearn._utils.compat import izip - -# First we plot DMN without region extraction, interested in only index=[3] -img = image.index_img(components_img, 3) -coords = plotting.find_xyz_cut_coords(img) -display = plotting.plot_stat_map(img, cut_coords=((0, -52, 29)), - colorbar=False, title='ICA map: DMN mode') - -# Now, we plot DMN after region extraction to show that connected regions are -# nicely separated. Each brain extracted region is indicated with separate color - -# For this, we take the indices of the all regions extracted related to original -# ICA map 3. -regions_indices_of_map3 = np.where(np.array(regions_index) == 3) - -display = plotting.plot_anat(cut_coords=((0, -52, 29)), title='Extracted regions in DMN mode') - -# Now add as an overlay by looping over all the regions for right -# temporoparietal function, posterior cingulate cortex, medial prefrontal -# cortex, left temporoparietal junction -color_list = [[0., 1., 0.29, 1.], [0., 1., 0.54, 1.], - [0., 1., 0.78, 1.], [0., 0.96, 1., 1.], - [0., 0.73, 1., 1.], [0., 0.47, 1., 1.], - [0., 0.22, 1., 1.], [0.01, 0., 1., 1.], - [0.26, 0., 1., 1.]] -for each_index_of_map3, color in izip(regions_indices_of_map3[0], color_list): - display.add_overlay(image.index_img(regions_extracted_img, each_index_of_map3), - cmap=plotting.cm.alpha_cmap(color)) - -plotting.show() diff --git a/examples/connectivity/plot_extract_regions_dictlearn_maps.py b/examples/03_connectivity/plot_extract_regions_dictlearn_maps.py similarity index 100% rename from examples/connectivity/plot_extract_regions_dictlearn_maps.py rename to examples/03_connectivity/plot_extract_regions_dictlearn_maps.py From 0e1c7b98730a88f63b2b4e00b64b12fa4a6eb6e0 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sat, 16 Jan 2016 13:42:33 +0100 Subject: [PATCH 0061/1925] dictlearn to dictlearning in filename --- doc/connectivity/region_extraction.rst | 42 +++++++++---------- ...plot_extract_regions_dictlearning_maps.py} | 0 2 files changed, 21 insertions(+), 21 deletions(-) rename examples/03_connectivity/{plot_extract_regions_dictlearn_maps.py => plot_extract_regions_dictlearning_maps.py} (100%) diff --git a/doc/connectivity/region_extraction.rst b/doc/connectivity/region_extraction.rst index daf5b1188f..bc5981cb5e 100644 --- a/doc/connectivity/region_extraction.rst +++ b/doc/connectivity/region_extraction.rst @@ -34,7 +34,7 @@ which is already preprocessed and publicly available at datasets. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py :start-after: # utilities :end-before: ################################################################################ @@ -49,7 +49,7 @@ the parameters and calling the object fit on the filenames of datasets without necessarily converting each file to Nifti1Image object. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py :start-after: # object and fit the model to the functional datasets :end-before: # Visualization of resting state networks @@ -63,12 +63,12 @@ Here, we use :func:`plot_prob_atlas` for easy visualization of 4D atlas maps onto the anatomical standard template. Each map is displayed in different color and colors are random and automatically picked. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py :start-after: # Show networks using plotting utilities :end-before: ################################################################################ -.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_001.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_001.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 60 .. currentmodule:: nilearn.regions @@ -93,7 +93,7 @@ regions. We control the small spurious regions size by thresholding in voxel uni to adapt well to the resolution of the image. Please see the documentation of nilearn.regions.connected_regions for more details. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py :start-after: # maps, less the threshold means that more intense non-voxels will be survived. :end-before: # Visualization of region extraction results @@ -107,12 +107,12 @@ for visualizing extracted regions on a standard template. Each extracted brain region is assigned a color and as you can see that visual cortex area is extracted quite nicely into each hemisphere. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py :start-after: # Visualization of region extraction results :end-before: ################################################################################ -.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_002.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_002.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 60 .. currentmodule:: nilearn.connectome @@ -133,7 +133,7 @@ shape=(176, 23) where 176 is the length of time series and 23 is the number of extracted regions. Likewise, we have a total of 20 subject specific time series signals. The third step, we compute the mean correlation across all subjects. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py :start-after: # To estimate correlation matrices we import connectome utilities from nilearn :end-before: # Visualization @@ -148,16 +148,16 @@ automatically the coordinates required, for plotting connectome relations. Left image is the correlations in a matrix form and right image is the connectivity relations to brain regions plotted using :func:`plot_connectome` -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py :start-after: # Plotting connectome results :end-before: ################################################################################ -.. |matrix| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_003.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html +.. |matrix| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_003.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 60 -.. |connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_004.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html +.. |connectome| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_004.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 60 .. centered:: |matrix| |connectome| @@ -172,15 +172,15 @@ and right image displays the regions split apart after region extraction. Here, validate that regions are nicely separated identified by each extracted region in different color. -.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearn_maps.py +.. literalinclude:: ../../examples/03_connectivity/plot_extract_regions_dictlearning_maps.py :start-after: # Plotting regions extracted for only one specific network -.. |dmn| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_005.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html +.. |dmn| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_005.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 50 -.. |dmn_reg| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearn_maps_006.png - :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearn_maps.html +.. |dmn_reg| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_extract_regions_dictlearning_maps_006.png + :target: ../auto_examples/03_connectivity/plot_extract_regions_dictlearning_maps.html :scale: 50 .. centered:: |dmn| |dmn_reg| @@ -188,4 +188,4 @@ color. .. seealso:: The full code can be found as an example: - :ref:`sphx_glr_auto_examples_03_connectivity_plot_extract_regions_dictlearn_maps.py` + :ref:`sphx_glr_auto_examples_03_connectivity_plot_extract_regions_dictlearning_maps.py` diff --git a/examples/03_connectivity/plot_extract_regions_dictlearn_maps.py b/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py similarity index 100% rename from examples/03_connectivity/plot_extract_regions_dictlearn_maps.py rename to examples/03_connectivity/plot_extract_regions_dictlearning_maps.py From d578c33dd2d370612eec4765370bd49feb411ba0 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sun, 17 Jan 2016 19:29:54 +0100 Subject: [PATCH 0062/1925] Bug fixes in region extractor - NAN values in the input images - A bug reported when labels are reordered, only a list of single integer is returned --- nilearn/_utils/segmentation.py | 2 +- nilearn/image/image.py | 8 ++++++- nilearn/image/tests/test_image.py | 23 +++++++++++++------ nilearn/regions/region_extractor.py | 9 ++++++-- .../regions/tests/test_region_extractor.py | 9 ++++++++ nilearn/tests/test_segmentation.py | 17 ++++++++++++++ 6 files changed, 57 insertions(+), 11 deletions(-) diff --git a/nilearn/_utils/segmentation.py b/nilearn/_utils/segmentation.py index b298d86004..93882d1a7d 100644 --- a/nilearn/_utils/segmentation.py +++ b/nilearn/_utils/segmentation.py @@ -268,7 +268,7 @@ def _random_walker(data, labels, beta=130, tol=1.e-3, copy=True, spacing=None): if np.any(np.diff(label_values) != 1): mask = labels >= 0 labels[mask] = np.searchsorted(np.unique(labels[mask]), - labels[mask])[0].astype(labels.dtype) + labels[mask]).astype(labels.dtype) labels = labels.astype(np.int32) # If the array has pruned zones, be sure that no isolated pixels diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 756f853a1b..32567f520e 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -603,7 +603,10 @@ def new_img_like(ref_niimg, data, affine=None, copy_header=False): def threshold_img(img, threshold, mask_img=None): - """ Thresholds the given input image based on specific strategy. + """ Threshold the given input image, mostly statistical or atlas images. + + Thresholding can be done based on direct image intensities or selection + threshold with given percentile. .. versionadded:: 0.2 @@ -638,6 +641,9 @@ def threshold_img(img, threshold, mask_img=None): img_data = _safe_get_data(img).copy() affine = img.get_affine() + if np.isnan(np.sum(img_data)): + img_data = np.nan_to_num(img_data) + if mask_img is not None: if not _check_same_fov(img, mask_img): mask_img = resampling.resample_img(mask_img, target_affine=affine, diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index 6055e54c55..37c3ee3d6e 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -406,13 +406,12 @@ def test_new_img_like(): def test_validity_threshold_value_in_threshold_img(): shape = (6, 8, 10) - maps = testing.generate_maps(shape, n_regions=2) - map_0 = maps[0] + maps, _ = testing.generate_maps(shape, n_regions=2) # testing to raise same error when threshold=None case testing.assert_raises_regex(ValueError, "The input parameter 'threshold' is empty. ", - threshold_img, map_0, threshold=None) + threshold_img, maps, threshold=None) invalid_threshold_values = ['90t%', 's%', 't', '0.1'] name = 'threshold' @@ -420,21 +419,31 @@ def test_validity_threshold_value_in_threshold_img(): testing.assert_raises_regex(ValueError, '{0}.+should be a number followed by ' 'the percent sign'.format(name), - threshold_img, map_0, threshold=thr) + threshold_img, maps, threshold=thr) def test_threshold_img(): # to check whether passes with valid threshold inputs shape = (10, 20, 30) - maps = testing.generate_maps(shape, n_regions=4) - map_0 = maps[0] + maps, _ = testing.generate_maps(shape, n_regions=4) affine = np.eye(4) mask_img = nibabel.Nifti1Image(np.ones((shape), dtype=np.int8), affine) - for img in iter_img(map_0): + for img in iter_img(maps): # when threshold is a float value thr_maps_img = threshold_img(img, threshold=0.8) # when we provide mask image thr_maps_percent = threshold_img(img, threshold=1, mask_img=mask_img) # when threshold is a percentile thr_maps_percent2 = threshold_img(img, threshold='2%') + + +def test_isnan_threshold_img_data(): + shape = (10, 10, 10) + maps, _ = testing.generate_maps(shape, n_regions=2) + data = maps.get_data() + data[:, :, 0] = np.nan + + maps_img = nibabel.Nifti1Image(data, np.eye(4)) + # test threshold_img to converge properly when input image has nans. + threshold_maps = threshold_img(maps_img, threshold=0.8) diff --git a/nilearn/regions/region_extractor.py b/nilearn/regions/region_extractor.py index bc6ea73e09..2bebae496f 100644 --- a/nilearn/regions/region_extractor.py +++ b/nilearn/regions/region_extractor.py @@ -16,6 +16,7 @@ from ..image import new_img_like, resample_img from ..image.image import _smooth_array, threshold_img from .._utils.niimg_conversions import concat_niimgs, _check_same_fov +from .._utils.niimg import _safe_get_data from .._utils.compat import _basestring from .._utils.ndimage import _peak_local_max from .._utils.segmentation import _random_walker @@ -53,7 +54,11 @@ def _threshold_maps_ratio(maps_img, threshold): else: ratio = threshold - maps_data = maps.get_data() + maps_data = _safe_get_data(maps).copy() + + if np.isnan(np.sum(maps_data)): + maps_data = np.nan_to_num(maps_data) + abs_maps = np.abs(maps_data) # thresholding cutoff_threshold = scoreatpercentile( @@ -116,7 +121,7 @@ def connected_regions(maps_img, min_region_size=1350, all_regions_imgs = [] index_of_each_map = [] maps_img = check_niimg(maps_img, atleast_4d=True) - maps = maps_img.get_data() + maps = _safe_get_data(maps_img).copy() affine = maps_img.get_affine() min_region_size = min_region_size / np.prod(np.diag(abs(affine[:3]))) diff --git a/nilearn/regions/tests/test_region_extractor.py b/nilearn/regions/tests/test_region_extractor.py index 3add53e5e3..0f60e1d20f 100644 --- a/nilearn/regions/tests/test_region_extractor.py +++ b/nilearn/regions/tests/test_region_extractor.py @@ -33,6 +33,15 @@ def test_invalid_thresholds_in_threshold_maps_ratio(): maps, threshold=invalid_threshold) +def test_nans_threshold_maps_ratio(): + maps, _ = generate_maps((10, 10, 10), n_regions=2) + data = maps.get_data() + data[:, :, 0] = np.nan + + maps_img = nibabel.Nifti1Image(data, np.eye(4)) + thr_maps = _threshold_maps_ratio(maps_img, threshold=0.8) + + def test_threshold_maps_ratio(): # smoke test for function _threshold_maps_ratio with randomly # generated maps diff --git a/nilearn/tests/test_segmentation.py b/nilearn/tests/test_segmentation.py index 80d227847c..40a7e4988a 100644 --- a/nilearn/tests/test_segmentation.py +++ b/nilearn/tests/test_segmentation.py @@ -54,3 +54,20 @@ def test_bad_inputs(): labels[6, 8] = 5 np.testing.assert_raises(ValueError, _random_walker, img, labels, spacing=(1,)) + + +def test_reorder_labels(): + # When labels have non-consecutive integers, we make them consecutive + # by reordering them to make no gaps/differences between integers. We expect + # labels to be of same shape even if they are reordered. + # Issue #938, comment #14. + data = np.zeros((5, 5)) + 0.1 * np.random.randn(5, 5) + data[1:5, 1:5] = 1 + + labels = np.zeros_like(data) + labels[3, 3] = 1 + labels[1, 4] = 4 # giving integer which is non-consecutive + + labels = _random_walker(data, labels) + assert data.shape == labels.shape + From 3096a69c3f1d2c0daf95bc89fef65348ed21913e Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Mon, 18 Jan 2016 12:52:26 +0100 Subject: [PATCH 0063/1925] Address comments and add tests --- nilearn/input_data/nifti_spheres_masker.py | 6 +-- .../tests/test_nifti_spheres_masker.py | 41 +++++++++++++++++-- 2 files changed, 40 insertions(+), 7 deletions(-) diff --git a/nilearn/input_data/nifti_spheres_masker.py b/nilearn/input_data/nifti_spheres_masker.py index 0a3aeaf565..b1d4f4638a 100644 --- a/nilearn/input_data/nifti_spheres_masker.py +++ b/nilearn/input_data/nifti_spheres_masker.py @@ -62,13 +62,13 @@ def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap, clf = neighbors.NearestNeighbors(radius=radius) A = clf.fit(mask_coords).radius_neighbors_graph(seeds) + A = A.tolil() for i, nearest in enumerate(nearests): if nearest is None: continue A[i, nearest] = True - A = A.tolil() - # Include selfs - + + # Include the voxel containing the seed itself if not masked mask_coords = mask_coords.astype(int).tolist() for i, seed in enumerate(seeds): try: diff --git a/nilearn/input_data/tests/test_nifti_spheres_masker.py b/nilearn/input_data/tests/test_nifti_spheres_masker.py index b2568a4205..98241431d7 100644 --- a/nilearn/input_data/tests/test_nifti_spheres_masker.py +++ b/nilearn/input_data/tests/test_nifti_spheres_masker.py @@ -82,13 +82,46 @@ def test_nifti_spheres_masker_overlap(): seeds = [(0, 0, 0), (2, 2, 2)] - overlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=True) + overlapping_masker = NiftiSpheresMasker(seeds, radius=1, + allow_overlap=True) overlapping_masker.fit_transform(fmri_img) - overlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=True) + overlapping_masker = NiftiSpheresMasker(seeds, radius=2, + allow_overlap=True) overlapping_masker.fit_transform(fmri_img) - noverlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=False) + noverlapping_masker = NiftiSpheresMasker(seeds, radius=1, + allow_overlap=False) noverlapping_masker.fit_transform(fmri_img) - noverlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=False) + noverlapping_masker = NiftiSpheresMasker(seeds, radius=2, + allow_overlap=False) assert_raises_regex(ValueError, 'Overlap detected', noverlapping_masker.fit_transform, fmri_img) + + +def test_small_radius(): + affine = np.eye(4) + shape = (3, 3, 3) + + data = np.random.random(shape) + mask = np.zeros(shape) + mask[1, 1, 1] = 1 + affine = np.eye(4) * 1.2 + seed = (1.4, 1.4, 1.4) + + masker = NiftiSpheresMasker([seed], radius=0.1, + mask_img=nibabel.Nifti1Image(mask, affine)) + masker.fit_transform(nibabel.Nifti1Image(data, affine)) + + # Test if masking is taken into account + mask[1, 1, 1] = 0 + mask[1, 1, 0] = 1 + + masker = NiftiSpheresMasker([seed], radius=0.1, + mask_img=nibabel.Nifti1Image(mask, affine)) + assert_raises_regex(ValueError, 'Sphere around seed #0 is empty', + masker.fit_transform, + nibabel.Nifti1Image(data, affine)) + + masker = NiftiSpheresMasker([seed], radius=1.6, + mask_img=nibabel.Nifti1Image(mask, affine)) + masker.fit_transform(nibabel.Nifti1Image(data, affine)) From 4e71ba96c0ae714dde7b0cded6a210ffc17fa123 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Mon, 18 Jan 2016 16:21:09 +0100 Subject: [PATCH 0064/1925] MISC: better repr for the MNI152Template To help having clean function signatures, and help with problems such as #952 --- nilearn/plotting/img_plotting.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nilearn/plotting/img_plotting.py b/nilearn/plotting/img_plotting.py index 4abd23bcab..e89fe24767 100644 --- a/nilearn/plotting/img_plotting.py +++ b/nilearn/plotting/img_plotting.py @@ -329,6 +329,8 @@ def get_shape(self): def __str__(self): return "" + def __repr__(self): + return "" # The constant that we use as a default in functions MNI152TEMPLATE = _MNI152Template() From f368617095a7eec9b4b730580484dde9f77120d6 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Mon, 18 Jan 2016 16:27:27 +0100 Subject: [PATCH 0065/1925] DOC: improve docstring on bg_img Partly addresses #952 --- nilearn/plotting/img_plotting.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/nilearn/plotting/img_plotting.py b/nilearn/plotting/img_plotting.py index e89fe24767..c4a6bca2dc 100644 --- a/nilearn/plotting/img_plotting.py +++ b/nilearn/plotting/img_plotting.py @@ -568,7 +568,8 @@ def plot_roi(roi_img, bg_img=MNI152TEMPLATE, cut_coords=None, bg_img : Niimg-like object See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. The background image that the ROI/mask will be plotted on top of. - If not specified MNI152 template will be used. + If nothing is specified, the MNI152 template will be used. + To turn off background image, just pass "bg_img=False". cut_coords: None, or a tuple of floats The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. @@ -654,8 +655,9 @@ def plot_prob_atlas(maps_img, anat_img=MNI152TEMPLATE, view_type='auto', 4D image of the probabilistic atlas maps anat_img : Niimg-like object See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. - The anatomical image to be used as a background. If None is - given, nilearn tries to find a T1 template. + The anatomical image to be used as a background. + If nothing is specified, the MNI152 template will be used. + To turn off background image, just pass "anat_img=False". view_type: {'auto', 'contours', 'filled_contours', 'continuous'}, optional By default view_type == 'auto', which means maps are overlayed as contours if number of maps to display are more or @@ -821,7 +823,8 @@ def plot_stat_map(stat_map_img, bg_img=MNI152TEMPLATE, cut_coords=None, bg_img : Niimg-like object See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg. The background image that the ROI/mask will be plotted on top of. - If not specified MNI152 template will be used. + If nothing is specified, the MNI152 template will be used. + To turn off background image, just pass "bg_img=False". cut_coords : None, a tuple of floats, or an integer The MNI coordinates of the point where the cut is performed If display_mode is 'ortho', this should be a 3-tuple: (x, y, z) From a362dc8bc5703f9465cf9eb021444a2baeb5d27b Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 19 Jan 2016 09:16:13 +0100 Subject: [PATCH 0066/1925] Removed doubling of copying --- nilearn/image/image.py | 5 ++--- nilearn/regions/region_extractor.py | 5 +---- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 32567f520e..94c0cb439b 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -638,11 +638,10 @@ def threshold_img(img, threshold, mask_img=None): from .. import masking img = check_niimg(img) - img_data = _safe_get_data(img).copy() + img_data = img.get_data() affine = img.get_affine() - if np.isnan(np.sum(img_data)): - img_data = np.nan_to_num(img_data) + img_data = np.nan_to_num(img_data) if mask_img is not None: if not _check_same_fov(img, mask_img): diff --git a/nilearn/regions/region_extractor.py b/nilearn/regions/region_extractor.py index 2bebae496f..f7f3591848 100644 --- a/nilearn/regions/region_extractor.py +++ b/nilearn/regions/region_extractor.py @@ -54,10 +54,7 @@ def _threshold_maps_ratio(maps_img, threshold): else: ratio = threshold - maps_data = _safe_get_data(maps).copy() - - if np.isnan(np.sum(maps_data)): - maps_data = np.nan_to_num(maps_data) + maps_data = np.nan_to_num(maps.get_data()) abs_maps = np.abs(maps_data) # thresholding From 03518f81d1448b4bc72a4127e6df6b948acea485 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Tue, 26 Jan 2016 08:54:13 +0100 Subject: [PATCH 0067/1925] BUG: don't ignore constant part of confounds When the signal is not detrended or normalized, we need to account for constant confounds --- nilearn/signal.py | 11 ++++++++++- nilearn/tests/test_signal.py | 9 +++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/nilearn/signal.py b/nilearn/signal.py index be1522035d..b07f8100db 100644 --- a/nilearn/signal.py +++ b/nilearn/signal.py @@ -473,7 +473,16 @@ def clean(signals, sessions=None, detrend=True, standardize=True, # Remove confounds if confounds is not None: confounds = _ensure_float(confounds) - confounds = _standardize(confounds, normalize=True, detrend=detrend) + confounds = _standardize(confounds, normalize=standardize, + detrend=detrend) + if not standardize: + # Improve numerical stability by controlling the range of + # confounds. We don't rely on _standardize as it removes any + # constant contribution to confounds + confound_max = np.maximum(confounds.max(axis=0), + -confounds.min(axis=0)) + confound_max[confound_max == 0] = 1 + confounds /= confound_max if (LooseVersion(scipy.__version__) > LooseVersion('0.9.0')): # Pivoting in qr decomposition was added in scipy 0.10 diff --git a/nilearn/tests/test_signal.py b/nilearn/tests/test_signal.py index 044e18b525..9be6f7f5d7 100644 --- a/nilearn/tests/test_signal.py +++ b/nilearn/tests/test_signal.py @@ -356,6 +356,15 @@ def test_clean_confounds(): assert_raises(TypeError, nisignal.clean, signals, confounds=[None]) + # Test without standardizing that constant parts of confounds are + # accounted for + np.testing.assert_almost_equal(nisignal.clean(np.ones((20, 2)), + standardize=False, + confounds=np.ones(20), + detrend=False, + ).mean(), + 0) + def test_high_variance_confounds(): # C and F order might take different paths in the function. Check that the From c1c83122fa05adfb7cd35c6f53a6b92524d0fe42 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Tue, 26 Jan 2016 10:22:47 +0100 Subject: [PATCH 0068/1925] DOC: explicite the use of LedoitWolf To answer questions like: https://neurostars.org/p/3640/ ie, "why do I get different results when I compute correlations by hand?" --- nilearn/connectome/connectivity_matrices.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index dd16269cb5..597de3dc89 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -261,7 +261,9 @@ class ConnectivityMeasure(BaseEstimator, TransformerMixin): Parameters ---------- cov_estimator : estimator object, optional. - The covariance estimator. + The covariance estimator. By default the LedoitWolf estimator + is used. This implies that correlations are slightly shrunk + towards zero compared to a maximum-likelihood estimate kind : {"correlation", "partial correlation", "tangent",\ "covariance", "precision"}, optional From dcd0500745161bd6877e3e529cd9fbe8de6e373f Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Tue, 26 Jan 2016 11:26:18 +0100 Subject: [PATCH 0069/1925] MISC: address nitpicks by @AlexandreAbraham --- nilearn/signal.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nilearn/signal.py b/nilearn/signal.py index b07f8100db..f277efc2e6 100644 --- a/nilearn/signal.py +++ b/nilearn/signal.py @@ -478,9 +478,8 @@ def clean(signals, sessions=None, detrend=True, standardize=True, if not standardize: # Improve numerical stability by controlling the range of # confounds. We don't rely on _standardize as it removes any - # constant contribution to confounds - confound_max = np.maximum(confounds.max(axis=0), - -confounds.min(axis=0)) + # constant contribution to confounds. + confound_max = np.max(np.abs(confounds), axis=0) confound_max[confound_max == 0] = 1 confounds /= confound_max From 3f8b84c4db8e870482ca99bad715f91c2d4f3605 Mon Sep 17 00:00:00 2001 From: Elvis DOHMATOB Date: Tue, 26 Jan 2016 15:38:39 +0100 Subject: [PATCH 0070/1925] REWRITE: dataset.func[0] ==> func_filenames[0] --- examples/03_connectivity/plot_canica_resting_state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/03_connectivity/plot_canica_resting_state.py b/examples/03_connectivity/plot_canica_resting_state.py index 3f151ae908..22ddff547b 100644 --- a/examples/03_connectivity/plot_canica_resting_state.py +++ b/examples/03_connectivity/plot_canica_resting_state.py @@ -30,7 +30,7 @@ # print basic information on the dataset print('First functional nifti image (4D) is at: %s' % - adhd_dataset.func[0]) # 4D data + func_filenames[0]) # 4D data #################################################################### From 674c5a501d917765faec4450172f1de1efb749d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 28 Jan 2016 10:39:50 +0100 Subject: [PATCH 0071/1925] Fix typo --- continuous_integration/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index 3fb30f6d99..c276df7cae 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -27,7 +27,7 @@ create_new_venv() { print_conda_requirements() { # Echo a conda requirement string for example - # "pip nose python='.7.3 scikit-learn=*". It has a hardcoded + # "pip nose python='2.7.3 scikit-learn=*". It has a hardcoded # list of possible packages to install and looks at _VERSION # environment variables to know whether to install a given package and # if yes which version to install. For example: From ace4d6b476cdd9fe6d1ac928066396c3c3ba702d Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Thu, 28 Jan 2016 10:49:00 +0100 Subject: [PATCH 0072/1925] MISC: tighter test Addresses @lesteve's comment --- nilearn/tests/test_signal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/tests/test_signal.py b/nilearn/tests/test_signal.py index 9be6f7f5d7..8c4f26440a 100644 --- a/nilearn/tests/test_signal.py +++ b/nilearn/tests/test_signal.py @@ -363,7 +363,7 @@ def test_clean_confounds(): confounds=np.ones(20), detrend=False, ).mean(), - 0) + np.zeros((20, 2))) def test_high_variance_confounds(): From d25f3eaf3d3038eedab24f03c077a80a177c1f09 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 28 Jan 2016 16:13:27 +0100 Subject: [PATCH 0073/1925] Fix Python 2.7 failure --- nilearn/input_data/tests/test_nifti_spheres_masker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nilearn/input_data/tests/test_nifti_spheres_masker.py b/nilearn/input_data/tests/test_nifti_spheres_masker.py index 98241431d7..b385ebc2c9 100644 --- a/nilearn/input_data/tests/test_nifti_spheres_masker.py +++ b/nilearn/input_data/tests/test_nifti_spheres_masker.py @@ -105,6 +105,7 @@ def test_small_radius(): data = np.random.random(shape) mask = np.zeros(shape) mask[1, 1, 1] = 1 + mask[2, 2, 2] = 1 affine = np.eye(4) * 1.2 seed = (1.4, 1.4, 1.4) From 398ab45ba1a611ace5c603f04ee4fa8ef1ff115c Mon Sep 17 00:00:00 2001 From: Salma Date: Fri, 29 Jan 2016 18:49:22 +0100 Subject: [PATCH 0074/1925] test connectivity measure dimension error --- nilearn/connectome/connectivity_matrices.py | 50 +++++--- .../tests/test_connectivity_matrices.py | 121 +++++++++--------- 2 files changed, 92 insertions(+), 79 deletions(-) diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index 597de3dc89..a647849305 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -6,6 +6,7 @@ from sklearn.base import BaseEstimator, TransformerMixin, clone from sklearn.covariance import LedoitWolf +from .. import signal from .._utils.extmath import is_spd @@ -287,7 +288,7 @@ class ConnectivityMeasure(BaseEstimator, TransformerMixin): in post-stroke patients using group-level covariance modeling, MICCAI 2010. """ - def __init__(self, cov_estimator=LedoitWolf(), + def __init__(self, cov_estimator=LedoitWolf(store_precision=False), kind='covariance'): self.cov_estimator = cov_estimator self.kind = kind @@ -298,7 +299,7 @@ def fit(self, X, y=None): Parameters ---------- - X : list of numpy.ndarray, shapes (n_samples, n_features) + X : array-like, shape (n_subjects, n_samples, n_features) The input subjects time series. Returns @@ -307,6 +308,9 @@ def fit(self, X, y=None): The object itself. Useful for chaining operations. """ self.cov_estimator_ = clone(self.cov_estimator) + if np.ndim(X) != 3: + raise ValueError("Expects a 3D array-like data, got array with" + " {} dimensions".format(np.ndim(X))) if self.kind == 'tangent': covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] @@ -330,25 +334,29 @@ def transform(self, X): output : numpy.ndarray, shape (n_samples, n_features, n_features) The transformed connectivity matrices. """ - covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] - covariances = np.array(covariances) - if self.kind == 'covariance': - connectivities = covariances - elif self.kind == 'tangent': - connectivities = [_map_eigenvalues(np.log, self.whitening_.dot( - cov).dot(self.whitening_)) - for cov in covariances] - elif self.kind == 'precision': - connectivities = [linalg.inv(cov) for cov in covariances] - elif self.kind == 'partial correlation': - connectivities = [_prec_to_partial(linalg.inv(cov)) - for cov in covariances] - elif self.kind == 'correlation': - connectivities = [_cov_to_corr(cov) for cov in covariances] + if self.kind == 'correlation': + covariances_std = [self.cov_estimator_.fit( + signal._standardize(x, detrend=False, normalize=True) + ).covariance_ for x in X] + connectivities = [_cov_to_corr(cov) for cov in covariances_std] else: - raise ValueError('Allowed connectivity kinds are "correlation", ' - '"partial correlation", "tangent", ' - '"covariance" and "precision", got kind ' - '"{}"'.format(self.kind)) + covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] + if self.kind == 'covariance': + connectivities = covariances + elif self.kind == 'tangent': + connectivities = [_map_eigenvalues(np.log, self.whitening_.dot( + cov).dot(self.whitening_)) + for cov in covariances] + elif self.kind == 'precision': + connectivities = [linalg.inv(cov) for cov in covariances] + elif self.kind == 'partial correlation': + connectivities = [_prec_to_partial(linalg.inv(cov)) + for cov in covariances] + else: + raise ValueError('Allowed connectivity kinds are ' + '"correlation", ' + '"partial correlation", "tangent", ' + '"covariance" and "precision", got kind ' + '"{}"'.format(self.kind)) return np.array(connectivities) diff --git a/nilearn/connectome/tests/test_connectivity_matrices.py b/nilearn/connectome/tests/test_connectivity_matrices.py index 68df0c1f32..1e6bf676f8 100644 --- a/nilearn/connectome/tests/test_connectivity_matrices.py +++ b/nilearn/connectome/tests/test_connectivity_matrices.py @@ -7,7 +7,7 @@ from numpy.testing import assert_array_almost_equal, assert_array_equal from nose.tools import assert_raises, assert_equal, assert_true from sklearn.utils import check_random_state -from sklearn.covariance import EmpiricalCovariance +from sklearn.covariance import EmpiricalCovariance, LedoitWolf from nilearn._utils.extmath import is_spd from nilearn.connectome.connectivity_matrices import ( @@ -70,13 +70,11 @@ def grad_geometric_mean(mats, init=None, max_iter=10, tol=1e-7): def test_check_square(): - """Test _check_square function""" non_square = np.ones((2, 3)) assert_raises(ValueError, _check_square, non_square) def test_check_spd(): - """Test _check_spd function""" non_sym = np.array([[0, 1], [0, 0]]) assert_raises(ValueError, _check_spd, non_sym) @@ -85,7 +83,6 @@ def test_check_spd(): def test_map_eigenvalues(): - """Test _map_eigenvalues function""" # Test on exp map sym = np.ones((2, 2)) sym_exp = exp(1.) * np.array([[cosh(1.), sinh(1.)], [sinh(1.), cosh(1.)]]) @@ -103,7 +100,6 @@ def test_map_eigenvalues(): def test_geometric_mean_couple(): - """Test _geometric_mean function for two matrices""" n_features = 7 spd1 = np.ones((n_features, n_features)) spd1 = spd1.dot(spd1) + n_features * np.eye(n_features) @@ -118,7 +114,6 @@ def test_geometric_mean_couple(): def test_geometric_mean_diagonal(): - """Test _geometric_mean function for diagonal matrices""" n_matrices = 20 n_features = 5 diags = [] @@ -133,7 +128,6 @@ def test_geometric_mean_diagonal(): def test_geometric_mean_geodesic(): - """Test geometric_mean function for single geodesic matrices""" n_matrices = 10 n_features = 6 sym = np.arange(n_features) / np.linalg.norm(np.arange(n_features)) @@ -246,8 +240,6 @@ def random_non_singular(p, sing_min=1., sing_max=2., random_state=0): def test_geometric_mean_properties(): - """Test _geometric_mean function for random spd matrices - """ n_matrices = 40 n_features = 15 spds = [] @@ -314,9 +306,7 @@ def test_geometric_mean_properties(): gmean = _geometric_mean(spds, max_iter=max_iter, tol=1e-5) -def test_geometric_mean_checks(): - """Errors check for _geometric_mean function - """ +def test_geometric_mean_errors(): n_features = 5 # Non square input matrix @@ -333,74 +323,89 @@ def test_geometric_mean_checks(): def test_sym_to_vec(): - """Test sym_to_vec function""" sym = np.ones((3, 3)) vec = np.array([1., sqrt(2), 1., sqrt(2), sqrt(2), 1.]) assert_array_almost_equal(sym_to_vec(sym), vec) def test_prec_to_partial(): - """Test prec_to_partial function""" prec = np.array([[2., -1., 1.], [-1., 2., -1.], [1., -1., 1.]]) partial = np.array([[1., .5, -sqrt(2.) / 2.], [.5, 1., sqrt(2.) / 2.], [-sqrt(2.) / 2., sqrt(2.) / 2., 1.]]) assert_array_almost_equal(_prec_to_partial(prec), partial) -def test_fit_transform(): - """Test fit_transform method for class ConnectivityMeasure""" +def test_connectivity_measure_errors(): + # Raising error for input with bad dimension + conn_measure = ConnectivityMeasure() + assert_raises(ValueError, conn_measure.fit, np.ones((4, 4))) + assert_raises(ValueError, conn_measure.fit, + np.ones((3, 4, 7, 5))) + + +def test_connectivity_measure_outputs(): n_subjects = 10 n_features = 49 n_samples = 200 - # Generate signals and compute empirical covariances - covs = [] + # Generate signals and compute covariances + emp_covs = [] + ledoit_covs = [] signals = [] random_state = check_random_state(0) + ledoit_estimator = LedoitWolf() for k in range(n_subjects): signal = random_state.randn(n_samples, n_features) signals.append(signal) signal -= signal.mean(axis=0) - covs.append((signal.T).dot(signal) / n_samples) + emp_covs.append((signal.T).dot(signal) / n_samples) + ledoit_covs.append(ledoit_estimator.fit(signal).covariance_) - input_covs = copy.copy(covs) kinds = ["correlation", "tangent", "precision", "partial correlation"] - for kind in kinds: - conn_measure = ConnectivityMeasure(kind=kind, - cov_estimator=EmpiricalCovariance()) - connectivities = conn_measure.fit_transform(signals) - - # Generic - assert_true(isinstance(connectivities, np.ndarray)) - assert_equal(len(connectivities), len(covs)) - - for k, cov_new in enumerate(connectivities): - assert_array_equal(input_covs[k], covs[k]) - assert(is_spd(covs[k], decimal=7)) - - # Positive definiteness if expected and output value checks - if kind == "tangent": - assert_array_almost_equal(cov_new, cov_new.T) - gmean_sqrt = _map_eigenvalues(np.sqrt, - conn_measure.mean_) - assert(is_spd(gmean_sqrt, decimal=7)) - assert(is_spd(conn_measure.whitening_, decimal=7)) - assert_array_almost_equal(conn_measure.whitening_.dot( - gmean_sqrt), np.eye(n_features)) - assert_array_almost_equal(gmean_sqrt.dot( - _map_eigenvalues(np.exp, cov_new)).dot(gmean_sqrt), - covs[k]) - elif kind == "precision": - assert(is_spd(cov_new, decimal=7)) - assert_array_almost_equal(cov_new.dot(covs[k]), - np.eye(n_features)) - elif kind == "correlation": - assert(is_spd(cov_new, decimal=7)) - d = np.sqrt(np.diag(np.diag(covs[k]))) - assert_array_almost_equal(d.dot(cov_new).dot(d), covs[k]) - elif kind == "partial correlation": - prec = linalg.inv(covs[k]) - d = np.sqrt(np.diag(np.diag(prec))) - assert_array_almost_equal(d.dot(cov_new).dot(d), -prec + - 2 * np.diag(np.diag(prec))) + + # Check outputs properties + for cov_estimator, covs in zip([EmpiricalCovariance(), LedoitWolf()], + [emp_covs, ledoit_covs]): + input_covs = copy.copy(covs) + for kind in kinds: + conn_measure = ConnectivityMeasure(kind=kind, + cov_estimator=cov_estimator) + connectivities = conn_measure.fit_transform(signals) + + # Generic + assert_true(isinstance(connectivities, np.ndarray)) + assert_equal(len(connectivities), len(covs)) + + for k, cov_new in enumerate(connectivities): + assert_array_equal(input_covs[k], covs[k]) + assert(is_spd(covs[k], decimal=7)) + + # Positive definiteness if expected and output value checks + if kind == "tangent": + assert_array_almost_equal(cov_new, cov_new.T) + gmean_sqrt = _map_eigenvalues(np.sqrt, + conn_measure.mean_) + assert(is_spd(gmean_sqrt, decimal=7)) + assert(is_spd(conn_measure.whitening_, decimal=7)) + assert_array_almost_equal(conn_measure.whitening_.dot( + gmean_sqrt), np.eye(n_features)) + assert_array_almost_equal(gmean_sqrt.dot( + _map_eigenvalues(np.exp, cov_new)).dot(gmean_sqrt), + covs[k]) + elif kind == "precision": + assert(is_spd(cov_new, decimal=7)) + assert_array_almost_equal(cov_new.dot(covs[k]), + np.eye(n_features)) + elif kind == "correlation": + assert(is_spd(cov_new, decimal=7)) + d = np.sqrt(np.diag(np.diag(covs[k]))) + if cov_estimator == EmpiricalCovariance(): + assert_array_almost_equal(d.dot(cov_new).dot(d), + covs[k]) + elif kind == "partial correlation": + prec = linalg.inv(covs[k]) + d = np.sqrt(np.diag(np.diag(prec))) + assert_array_almost_equal(d.dot(cov_new).dot(d), -prec + + 2 * np.diag(np.diag(prec))) + From 07937f2da3692cc12b889ed30684c3727e980ed4 Mon Sep 17 00:00:00 2001 From: Salma Date: Sat, 30 Jan 2016 09:38:55 +0100 Subject: [PATCH 0075/1925] fix connectivity measure errors raise --- nilearn/connectome/connectivity_matrices.py | 25 ++++++++++++++++--- .../tests/test_connectivity_matrices.py | 12 ++++++--- 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index a647849305..8901411ffd 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -299,7 +299,7 @@ def fit(self, X, y=None): Parameters ---------- - X : array-like, shape (n_subjects, n_samples, n_features) + X : list of numpy.ndarray, shape for each (n_samples, n_features) The input subjects time series. Returns @@ -308,9 +308,26 @@ def fit(self, X, y=None): The object itself. Useful for chaining operations. """ self.cov_estimator_ = clone(self.cov_estimator) - if np.ndim(X) != 3: - raise ValueError("Expects a 3D array-like data, got array with" - " {} dimensions".format(np.ndim(X))) + if not hasattr(X, "__iter__"): + raise ValueError("'subjects' input argument must be an iterable. " + "You provided {0}".format(X.__class__)) + + subjects_types = [type(s)for s in X] + if set(subjects_types) != {np.ndarray}: + raise ValueError("Each subject must be 2D numpy.ndarray.\n You " + "provided {0}".format(str(subjects_types))) + + subjects_dims = [s.ndim for s in X] + if set(subjects_dims) != {2}: + raise ValueError("Each subject must be 2D numpy.ndarray.\n You" + "provided arrays of dimensions " + "{0}".format(str(subjects_dims))) + + n_subjects = [s.shape[1] for s in X] + if len(set(n_subjects)) > 1: + raise ValueError("All subjects must have the same number of " + "features.\nYou provided: " + "{0}".format(str(n_subjects))) if self.kind == 'tangent': covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] diff --git a/nilearn/connectome/tests/test_connectivity_matrices.py b/nilearn/connectome/tests/test_connectivity_matrices.py index 1e6bf676f8..4a4d198cd5 100644 --- a/nilearn/connectome/tests/test_connectivity_matrices.py +++ b/nilearn/connectome/tests/test_connectivity_matrices.py @@ -336,11 +336,17 @@ def test_prec_to_partial(): def test_connectivity_measure_errors(): - # Raising error for input with bad dimension + # Raising error for input subjects not iterable conn_measure = ConnectivityMeasure() - assert_raises(ValueError, conn_measure.fit, np.ones((4, 4))) + assert_raises(ValueError, conn_measure.fit, 1.) + + # Raising error for input subjects not 2D numpy.ndarrays + assert_raises(ValueError, conn_measure.fit, [np.ones((100, 40)), + np.ones((10,))]) + + # Raising error for input subjects with different number of features assert_raises(ValueError, conn_measure.fit, - np.ones((3, 4, 7, 5))) + [np.ones((100, 40)), np.ones((100, 41))]) def test_connectivity_measure_outputs(): From bce519757f8e6be4c1088854dea29565d0ce1c06 Mon Sep 17 00:00:00 2001 From: Salma Date: Sat, 30 Jan 2016 14:07:43 +0100 Subject: [PATCH 0076/1925] flake8 --- nilearn/connectome/connectivity_matrices.py | 7 +++---- nilearn/connectome/tests/test_connectivity_matrices.py | 3 +-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index 8901411ffd..cf61760e95 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -312,7 +312,7 @@ def fit(self, X, y=None): raise ValueError("'subjects' input argument must be an iterable. " "You provided {0}".format(X.__class__)) - subjects_types = [type(s)for s in X] + subjects_types = [type(s) for s in X] if set(subjects_types) != {np.ndarray}: raise ValueError("Each subject must be 2D numpy.ndarray.\n You " "provided {0}".format(str(subjects_types))) @@ -352,9 +352,8 @@ def transform(self, X): The transformed connectivity matrices. """ if self.kind == 'correlation': - covariances_std = [self.cov_estimator_.fit( - signal._standardize(x, detrend=False, normalize=True) - ).covariance_ for x in X] + covariances_std = [self.cov_estimator_.fit(signal._standardize( + x, detrend=False, normalize=True)).covariance_ for x in X] connectivities = [_cov_to_corr(cov) for cov in covariances_std] else: covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] diff --git a/nilearn/connectome/tests/test_connectivity_matrices.py b/nilearn/connectome/tests/test_connectivity_matrices.py index 4a4d198cd5..81d56c3096 100644 --- a/nilearn/connectome/tests/test_connectivity_matrices.py +++ b/nilearn/connectome/tests/test_connectivity_matrices.py @@ -324,7 +324,7 @@ def test_geometric_mean_errors(): def test_sym_to_vec(): sym = np.ones((3, 3)) - vec = np.array([1., sqrt(2), 1., sqrt(2), sqrt(2), 1.]) + vec = np.array([1., sqrt(2), 1., sqrt(2), sqrt(2), 1.]) assert_array_almost_equal(sym_to_vec(sym), vec) @@ -414,4 +414,3 @@ def test_connectivity_measure_outputs(): d = np.sqrt(np.diag(np.diag(prec))) assert_array_almost_equal(d.dot(cov_new).dot(d), -prec + 2 * np.diag(np.diag(prec))) - From 14a4ae4062b69a797be35d6df8a259617a175610 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Sat, 30 Jan 2016 16:33:02 +0100 Subject: [PATCH 0077/1925] ENH: more flexible derivatives argument --- nilearn/datasets/func.py | 4 ++++ nilearn/datasets/tests/test_func.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index ea1d9e1f82..d963e2da54 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1078,6 +1078,10 @@ def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', classification of autism: ABIDE results." Frontiers in human neuroscience 7 (2013). """ + # People keep getting it wrong and submiting a string instead of a + # list of strings. We'll make their life easy + if isinstance(derivatives, _basestring): + derivatives = [derivatives, ] # Parameter check for derivative in derivatives: diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 19127a7490..bfdbbeb716 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -334,6 +334,10 @@ def test_fetch_abide_pcp(): assert_equal(len(dataset.func_preproc), 400) assert_not_equal(dataset.description, '') + # Smoke test using only a string, rather than a list of strings + dataset = func.fetch_abide_pcp(data_dir=tst.tmpdir, url=local_url, + quality_checked=False, verbose=0, + derivatives='func_preproc') def test__load_mixed_gambles(): rng = check_random_state(42) From ad5b42959522cf691e083980f29d3404b7181b3f Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 1 Feb 2016 09:52:32 +0100 Subject: [PATCH 0078/1925] Change number of subjects in adhd from 40 to 30 --- nilearn/datasets/func.py | 82 ++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index d963e2da54..066adedb43 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -59,12 +59,12 @@ def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): opts = {'uncompress': True} files = [ - (os.path.join('pymvpa-exampledata', 'attributes.txt'), url, opts), - (os.path.join('pymvpa-exampledata', 'bold.nii.gz'), url, opts), - (os.path.join('pymvpa-exampledata', 'mask.nii.gz'), url, opts), - (os.path.join('pymvpa-exampledata', 'attributes_literal.txt'), - url, opts), - ] + (os.path.join('pymvpa-exampledata', 'attributes.txt'), url, opts), + (os.path.join('pymvpa-exampledata', 'bold.nii.gz'), url, opts), + (os.path.join('pymvpa-exampledata', 'mask.nii.gz'), url, opts), + (os.path.join('pymvpa-exampledata', 'attributes_literal.txt'), + url, opts), + ] dataset_name = 'haxby2001_simple' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, @@ -162,14 +162,14 @@ def fetch_haxby(data_dir=None, n_subjects=1, fetch_stimuli=False, n_files = len(sub_files) files = [ - (os.path.join('subj%d' % i, sub_file), - url + 'subj%d-2010.01.14.tar.gz' % i, - {'uncompress': True, - 'md5sum': md5sums.get('subj%d-2010.01.14.tar.gz' % i, None)}) - for i in range(1, n_subjects + 1) - for sub_file in sub_files - if not (sub_file == 'anat.nii.gz' and i == 6) # no anat for sub. 6 - ] + (os.path.join('subj%d' % i, sub_file), + url + 'subj%d-2010.01.14.tar.gz' % i, + {'uncompress': True, + 'md5sum': md5sums.get('subj%d-2010.01.14.tar.gz' % i, None)}) + for i in range(1, n_subjects + 1) + for sub_file in sub_files + if not (sub_file == 'anat.nii.gz' and i == 6) # no anat for sub. 6 + ] files = _fetch_files(data_dir, files, resume=resume, verbose=verbose) @@ -190,17 +190,17 @@ def fetch_haxby(data_dir=None, n_subjects=1, fetch_stimuli=False, # return the data return Bunch( - anat=files[7::n_files], - func=files[0::n_files], - session_target=files[1::n_files], - mask_vt=files[2::n_files], - mask_face=files[3::n_files], - mask_house=files[4::n_files], - mask_face_little=files[5::n_files], - mask_house_little=files[6::n_files], - mask=mask, - description=fdescr, - **kwargs) + anat=files[7::n_files], + func=files[0::n_files], + session_target=files[1::n_files], + mask_vt=files[2::n_files], + mask_face=files[3::n_files], + mask_house=files[4::n_files], + mask_face_little=files[5::n_files], + mask_house_little=files[6::n_files], + mask=mask, + description=fdescr, + **kwargs) def fetch_nyu_rest(n_subjects=None, sessions=[1], data_dir=None, resume=True, @@ -398,8 +398,8 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, Parameters ---------- n_subjects: int, optional - The number of subjects to load. If None is given, all the - 40 subjects are used. + The number of subjects to load from maximum of 40 subjects. + If default None, only 30 subjects will be loaded. data_dir: string, optional Path of the data directory. Used to force data storage in a specified @@ -407,7 +407,7 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, url: string, optional Override download URL. Used for test only (or if you setup a mirror of - the data). + the data). Default: None Returns ------- @@ -441,7 +441,7 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, nitrc_ids = range(7782, 7822) max_subjects = len(ids) if n_subjects is None: - n_subjects = max_subjects + n_subjects = 30 if n_subjects > max_subjects: warnings.warn('Warning: there are only %d subjects' % max_subjects) n_subjects = max_subjects @@ -455,7 +455,7 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, # First, get the metadata phenotypic = ('ADHD200_40subs_motion_parameters_and_phenotypics.csv', - url + '7781/adhd40_metadata.tgz', opts) + url + '7781/adhd40_metadata.tgz', opts) phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume, verbose=verbose)[0] @@ -598,8 +598,8 @@ def fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1): file_mask = [(os.path.join('mask', m), url, opts) for m in file_mask] file_names = func_figure + func_random + \ - label_figure + label_random + \ - file_mask + label_figure + label_random + \ + file_mask dataset_name = 'miyawaki2008' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, @@ -819,7 +819,7 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, "right button press (visual cue)": "right visual click", "right button press": "right auditory & visual click", "right vs left button press": "right auditory & visual click " - + "vs left auditory&visual click", + + "vs left auditory&visual click", "button press (auditory cue) vs sentence listening": "auditory click vs auditory sentences", "button press (visual cue) vs sentence reading": @@ -866,8 +866,8 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, urls = ["%sbrainomics_data_%d.zip?rql=%s&vid=data-zip" % (root_url, i, _urllib.parse.quote(base_query % {"types": rql_types, - "label": c}, - safe=',()')) + "label": c}, + safe=',()')) for c, i in zip(contrasts_wrapped, contrasts_indices)] filenames = [] for subject_id in subject_ids: @@ -884,8 +884,8 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, urls.append("%sbrainomics_data_masks.zip?rql=%s&vid=data-zip" % (root_url, _urllib.parse.quote(base_query % {"types": '"boolean mask"', - "label": "mask"}, - safe=',()'))) + "label": "mask"}, + safe=',()'))) for subject_id in subject_ids: file_path = os.path.join( "brainomics_data", subject_id, "boolean_mask_mask.nii.gz") @@ -896,8 +896,8 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, urls.append("%sbrainomics_data_anats.zip?rql=%s&vid=data-zip" % (root_url, _urllib.parse.quote(base_query % {"types": '"normalized T1"', - "label": "anatomy"}, - safe=',()'))) + "label": "anatomy"}, + safe=',()'))) for subject_id in subject_ids: file_path = os.path.join( "brainomics_data", subject_id, @@ -911,8 +911,8 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, url_csv2 = ("%sdataset/cubicwebexport2.csv?rql=%s&vid=csvexport" % (root_url, _urllib.parse.quote("Any X,XI,XD WHERE X is QuestionnaireRun, " - "X identifier XI, X datetime " - "XD", safe=',') + "X identifier XI, X datetime " + "XD", safe=',') )) else: url_csv = "%s/cubicwebexport.csv" % url From fe3ff1fe75e33ff03f0d07aea09419a2f910cb3a Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Mon, 1 Feb 2016 10:41:41 +0100 Subject: [PATCH 0079/1925] Robustify csv_to_array --- nilearn/_utils/numpy_conversions.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nilearn/_utils/numpy_conversions.py b/nilearn/_utils/numpy_conversions.py index 434a9edd9d..a41c761f26 100644 --- a/nilearn/_utils/numpy_conversions.py +++ b/nilearn/_utils/numpy_conversions.py @@ -152,12 +152,14 @@ def csv_to_array(csv_path, delimiters=' \t,;', **kwargs): if not isinstance(csv_path, _basestring): raise TypeError('CSV must be a file path. Got a CSV of type: %s' % type(csv_path)) - # First, we try genfromtxt which works in most cases. - array = np.genfromtxt(csv_path, **kwargs) - if array.ndim <= 1 and np.all(np.isnan(array)): - # If the delimiter is not known genfromtxt generates an array full of - # nan. In that case, we try to guess the delimiter + try: + # First, we try genfromtxt which works in most cases. + array = np.genfromtxt(csv_path, loose=False, **kwargs) + except ValueError: + # There was an error during the conversion to numpy array, probably + # because the delimiter is wrong. + # In that case, we try to guess the delimiter. try: with open(csv_path, 'r') as csv_file: dialect = csv.Sniffer().sniff(csv_file.readline(), delimiters) From 3cd6c9a7fb398c7ad0ee618d3c121824ff6942f2 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 1 Feb 2016 10:59:10 +0100 Subject: [PATCH 0080/1925] Reverted pep8 changes --- nilearn/datasets/func.py | 74 ++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 066adedb43..0eea2e96e9 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -59,12 +59,12 @@ def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): opts = {'uncompress': True} files = [ - (os.path.join('pymvpa-exampledata', 'attributes.txt'), url, opts), - (os.path.join('pymvpa-exampledata', 'bold.nii.gz'), url, opts), - (os.path.join('pymvpa-exampledata', 'mask.nii.gz'), url, opts), - (os.path.join('pymvpa-exampledata', 'attributes_literal.txt'), - url, opts), - ] + (os.path.join('pymvpa-exampledata', 'attributes.txt'), url, opts), + (os.path.join('pymvpa-exampledata', 'bold.nii.gz'), url, opts), + (os.path.join('pymvpa-exampledata', 'mask.nii.gz'), url, opts), + (os.path.join('pymvpa-exampledata', 'attributes_literal.txt'), + url, opts), + ] dataset_name = 'haxby2001_simple' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, @@ -162,14 +162,14 @@ def fetch_haxby(data_dir=None, n_subjects=1, fetch_stimuli=False, n_files = len(sub_files) files = [ - (os.path.join('subj%d' % i, sub_file), - url + 'subj%d-2010.01.14.tar.gz' % i, - {'uncompress': True, - 'md5sum': md5sums.get('subj%d-2010.01.14.tar.gz' % i, None)}) - for i in range(1, n_subjects + 1) - for sub_file in sub_files - if not (sub_file == 'anat.nii.gz' and i == 6) # no anat for sub. 6 - ] + (os.path.join('subj%d' % i, sub_file), + url + 'subj%d-2010.01.14.tar.gz' % i, + {'uncompress': True, + 'md5sum': md5sums.get('subj%d-2010.01.14.tar.gz' % i, None)}) + for i in range(1, n_subjects + 1) + for sub_file in sub_files + if not (sub_file == 'anat.nii.gz' and i == 6) # no anat for sub. 6 + ] files = _fetch_files(data_dir, files, resume=resume, verbose=verbose) @@ -190,17 +190,17 @@ def fetch_haxby(data_dir=None, n_subjects=1, fetch_stimuli=False, # return the data return Bunch( - anat=files[7::n_files], - func=files[0::n_files], - session_target=files[1::n_files], - mask_vt=files[2::n_files], - mask_face=files[3::n_files], - mask_house=files[4::n_files], - mask_face_little=files[5::n_files], - mask_house_little=files[6::n_files], - mask=mask, - description=fdescr, - **kwargs) + anat=files[7::n_files], + func=files[0::n_files], + session_target=files[1::n_files], + mask_vt=files[2::n_files], + mask_face=files[3::n_files], + mask_house=files[4::n_files], + mask_face_little=files[5::n_files], + mask_house_little=files[6::n_files], + mask=mask, + description=fdescr, + **kwargs) def fetch_nyu_rest(n_subjects=None, sessions=[1], data_dir=None, resume=True, @@ -455,7 +455,7 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, # First, get the metadata phenotypic = ('ADHD200_40subs_motion_parameters_and_phenotypics.csv', - url + '7781/adhd40_metadata.tgz', opts) + url + '7781/adhd40_metadata.tgz', opts) phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume, verbose=verbose)[0] @@ -598,8 +598,8 @@ def fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1): file_mask = [(os.path.join('mask', m), url, opts) for m in file_mask] file_names = func_figure + func_random + \ - label_figure + label_random + \ - file_mask + label_figure + label_random + \ + file_mask dataset_name = 'miyawaki2008' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, @@ -819,7 +819,7 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, "right button press (visual cue)": "right visual click", "right button press": "right auditory & visual click", "right vs left button press": "right auditory & visual click " - + "vs left auditory&visual click", + + "vs left auditory&visual click", "button press (auditory cue) vs sentence listening": "auditory click vs auditory sentences", "button press (visual cue) vs sentence reading": @@ -866,8 +866,8 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, urls = ["%sbrainomics_data_%d.zip?rql=%s&vid=data-zip" % (root_url, i, _urllib.parse.quote(base_query % {"types": rql_types, - "label": c}, - safe=',()')) + "label": c}, + safe=',()')) for c, i in zip(contrasts_wrapped, contrasts_indices)] filenames = [] for subject_id in subject_ids: @@ -884,8 +884,8 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, urls.append("%sbrainomics_data_masks.zip?rql=%s&vid=data-zip" % (root_url, _urllib.parse.quote(base_query % {"types": '"boolean mask"', - "label": "mask"}, - safe=',()'))) + "label": "mask"}, + safe=',()'))) for subject_id in subject_ids: file_path = os.path.join( "brainomics_data", subject_id, "boolean_mask_mask.nii.gz") @@ -896,8 +896,8 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, urls.append("%sbrainomics_data_anats.zip?rql=%s&vid=data-zip" % (root_url, _urllib.parse.quote(base_query % {"types": '"normalized T1"', - "label": "anatomy"}, - safe=',()'))) + "label": "anatomy"}, + safe=',()'))) for subject_id in subject_ids: file_path = os.path.join( "brainomics_data", subject_id, @@ -911,8 +911,8 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, url_csv2 = ("%sdataset/cubicwebexport2.csv?rql=%s&vid=csvexport" % (root_url, _urllib.parse.quote("Any X,XI,XD WHERE X is QuestionnaireRun, " - "X identifier XI, X datetime " - "XD", safe=',') + "X identifier XI, X datetime " + "XD", safe=',') )) else: url_csv = "%s/cubicwebexport.csv" % url From 4569e392f7a24662afcf1613c4ae13e6dbedc2b7 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 1 Feb 2016 15:45:50 +0100 Subject: [PATCH 0081/1925] Change default n_subjects=None to n_subjects=30 in adhd fetcher --- doc/whats_new.rst | 9 +++++++++ nilearn/datasets/func.py | 7 ++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 3578b53837..96f3d88377 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,3 +1,12 @@ +0.2.2 +====== + +Changelog +--------- + +The default n_subjects=None in :func:`nilearn.datasets.fetch_adhd` is now +changed to n_subjects=30. + 0.2.1 ====== diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 0eea2e96e9..caf1a96e3b 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -391,7 +391,7 @@ def fetch_nyu_rest(n_subjects=None, sessions=[1], data_dir=None, resume=True, session=session, description=fdescr) -def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, +def fetch_adhd(n_subjects=30, data_dir=None, url=None, resume=True, verbose=1): """Download and load the ADHD resting-state dataset. @@ -399,7 +399,8 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, ---------- n_subjects: int, optional The number of subjects to load from maximum of 40 subjects. - If default None, only 30 subjects will be loaded. + By default, 30 subjects will be loaded. If None is given, + all 40 subjects will be loaded. data_dir: string, optional Path of the data directory. Used to force data storage in a specified @@ -441,7 +442,7 @@ def fetch_adhd(n_subjects=None, data_dir=None, url=None, resume=True, nitrc_ids = range(7782, 7822) max_subjects = len(ids) if n_subjects is None: - n_subjects = 30 + n_subjects = max_subjects if n_subjects > max_subjects: warnings.warn('Warning: there are only %d subjects' % max_subjects) n_subjects = max_subjects From c02649ebd0967be58ca4043eedcbc9c076dbbd8c Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Mon, 1 Feb 2016 17:05:06 +0100 Subject: [PATCH 0082/1925] Makes splitting more robust --- nilearn/datasets/atlas.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 0f8c80ac88..1874fa1a82 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -261,8 +261,11 @@ def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, new_names = [names[0]] for label, name in zip(labels[1:], names[1:]): new_label += 1 - if ((left_atlas == label).sum() == 0 or - (right_atlas == label).sum() == 0): + left_elements = (left_atlas == label).sum() + right_elements = (right_atlas == label).sum() + n_elements = float(left_elements + right_elements) + if (left_elements / n_elements < 0.05 or + right_elements / n_elements < 0.05): new_atlas[atlas == label] = new_label new_names.append(name) continue From 9ffef88b03f894712ccaaadba28ce22198bd5526 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Mon, 1 Feb 2016 17:38:27 +0100 Subject: [PATCH 0083/1925] Add test for symmetric split --- nilearn/datasets/tests/test_atlas.py | 38 +++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index 4cfd5c169e..a255b48c14 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -109,20 +109,44 @@ def test_fail_fetch_atlas_harvard_oxford(): os.makedirs(nifti_dir) target_atlas_nii = os.path.join(nifti_dir, target_atlas_fname) - struct.load_mni152_template().to_filename(target_atlas_nii) + + # Create false atlas + atlas_data = np.zeros((10, 10, 10), dtype=int) + + # Create an interhemispheric map + atlas_data[:, :2, :] = 1 + + # Create a left map + atlas_data[:5, 3:5, :] = 2 + + # Create a right map, with one voxel on the left side + atlas_data[5:, 7:9, :] = 3 + atlas_data[4, 7, 0] = 3 + + nibabel.Nifti1Image(atlas_data, np.eye(4) * 3).to_filename( + target_atlas_nii) dummy = open(os.path.join(ho_dir, 'HarvardOxford-Cortical.xml'), 'w') - dummy.write(" " - "" - "") + dummy.write("\n" + "\n" + '\n' + '\n' + '\n' + "") dummy.close() ho = atlas.fetch_atlas_harvard_oxford(target_atlas, - data_dir=tst.tmpdir) + data_dir=tst.tmpdir, + symmetric_split=True) - assert_true(isinstance(nibabel.load(ho.maps), nibabel.Nifti1Image)) + assert_true(isinstance(ho.maps, nibabel.Nifti1Image)) assert_true(isinstance(ho.labels, list)) - assert_true(len(ho.labels) > 0) + assert_equal(len(ho.labels), 5) + assert_equal(ho.labels[0], "Background") + assert_equal(ho.labels[1], "R1, left part") + assert_equal(ho.labels[2], "R1, right part") + assert_equal(ho.labels[3], "R2") + assert_equal(ho.labels[4], "R3") @with_setup(setup_mock, teardown_mock) From 0a882012ed545adc849f8e8c023f7e8cf0864615 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Tue, 2 Feb 2016 11:23:53 +0100 Subject: [PATCH 0084/1925] Temporary work-around for neurodebian build failure on Travis. See #975. --- continuous_integration/install.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index c276df7cae..6c668e5840 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -83,6 +83,10 @@ create_new_conda_env() { if [[ "$DISTRIB" == "neurodebian" ]]; then create_new_venv pip install nose-timer + # Temporary work-around for + # https://github.com/nilearn/nilearn/issues/975. Work-around is from: + # https://github.com/travis-ci/travis-ci/issues/5285#issuecomment-164464865 + sudo sed -i 's/us-central1.gce/us-central1.gce.clouds/' /etc/apt/sources.list && sudo apt-get update bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh) sudo apt-get install -qq python-scipy python-nose python-nibabel python-sklearn From bcd02596aa762c3e3083a5c2bb7fd9b32383db6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Tue, 2 Feb 2016 13:02:32 +0100 Subject: [PATCH 0085/1925] Revert "Temporary work-around for neurodebian build failure on Travis." Looks like the underlying issue was fixed. --- continuous_integration/install.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index 6c668e5840..c276df7cae 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -83,10 +83,6 @@ create_new_conda_env() { if [[ "$DISTRIB" == "neurodebian" ]]; then create_new_venv pip install nose-timer - # Temporary work-around for - # https://github.com/nilearn/nilearn/issues/975. Work-around is from: - # https://github.com/travis-ci/travis-ci/issues/5285#issuecomment-164464865 - sudo sed -i 's/us-central1.gce/us-central1.gce.clouds/' /etc/apt/sources.list && sudo apt-get update bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh) sudo apt-get install -qq python-scipy python-nose python-nibabel python-sklearn From 8fb317fe67db2cf6f3e45d1391ffce6817ab97b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=93scar=20N=C3=A1jera?= Date: Tue, 2 Feb 2016 13:45:37 +0100 Subject: [PATCH 0086/1925] [MAINT] Update to sphinx-gallery v0.1.1 --- doc/sphinxext/sphinx_gallery/__init__.py | 2 +- .../sphinx_gallery/_static/broken_example.png | Bin 0 -> 21404 bytes .../sphinx_gallery/_static/gallery.css | 195 ++++++++++-------- .../sphinx_gallery/backreferences.py | 2 +- doc/sphinxext/sphinx_gallery/gen_gallery.py | 8 +- doc/sphinxext/sphinx_gallery/gen_rst.py | 181 ++++++++++------ doc/sphinxext/sphinx_gallery/notebook.py | 123 +++++++++++ 7 files changed, 360 insertions(+), 151 deletions(-) create mode 100644 doc/sphinxext/sphinx_gallery/_static/broken_example.png create mode 100644 doc/sphinxext/sphinx_gallery/notebook.py diff --git a/doc/sphinxext/sphinx_gallery/__init__.py b/doc/sphinxext/sphinx_gallery/__init__.py index 247d21aebe..5accf7c4e3 100644 --- a/doc/sphinxext/sphinx_gallery/__init__.py +++ b/doc/sphinxext/sphinx_gallery/__init__.py @@ -5,7 +5,7 @@ """ import os -__version__ = '0.0.11' +__version__ = '0.1.1' def glr_path_static(): diff --git a/doc/sphinxext/sphinx_gallery/_static/broken_example.png b/doc/sphinxext/sphinx_gallery/_static/broken_example.png new file mode 100644 index 0000000000000000000000000000000000000000..4fea24e7df4781c2c32c8d7995511ac89e953145 GIT binary patch literal 21404 zcmaHTWmKEb({>2%?jE#QaVRdqp+KR9;KkkDwKxQKD^i>mcWI%x7Y&k9+}-8j_dn93t*7AV|HwhohmqKD$2joW8ywxydML zqrEnNH1nv}&*;vI5H|n-lkmR>0w6n=?Db1>cR5{mEhkHNug|U)052~uE*nQXH?z;q z7FUG-5?6Je82K9+|@BAhth9CST zcHAcNv&0`(9}ayi)2wQc@&7-~jsgpX(%s?2qY+ic?=2k&^fkcbJ3TknEkKtDe&=cQ z0sPtmK2NQh;D#Y$&7V2Y^6sv#*~LahMU`V>iICenIfqO4taPuTpf)ZngI?P{O4u`s z0y&n$M&@6Xr%?l)|9V(8`Q9z9qwelr@uMg5WpxhDtsm=(Xg5w7AEldPGDW?Jy-Nb0 zrg(@%fC03Cai>T62zfwNHn*;$2*qH0d^@__p>5R6EErAj5#AlsOa^i9D#F^?Qw~A{ zkmHTp_)*CJu0q2SL>9z3HHd#g3=t2xx`O)jf=n3THGrF2mjgt7WbgLWh!%!QQ{byY zs4-Exf&)U3dmE4+9sqvyqd4YET>w?7NpYH8tM78bHVA886=S{#+o|Ww%jnaeJ@67jlAh$qiTgZ(5vFUE9+*NvohRuUx<&0xEfDR#2V*@-%Wn z)GmNqmvj+T7b9*$-=#YUnShrTMGRS0HVbYs?jMgB%&9?j>ayO9JjK_0E9k?3{bd)Z z0Ose$YR}|tK}DQI^=2gPIWVn9lT1ja%we!|k_o=aQ3*@{KeR;j{RT)gE=%I8)Y(Ll zn1I1yKmR%TA-+XndVBuRBJ~L-(p!aa%)dz2>sNJ_9Zh_TGlP=t6~DyWyE~uXqKVJh z8x@5Trj%SSus`AIm;dM=Jbc{+kuIYfUdS$LV) zL$}h_C6R>jKDq_3Oy}rju2_$^Hv_H}vbIElV|)=~IMVZb=f2&6kSHWF{AnESfkhl} zxFDP2TJrh}ed?Tmc7rGdnN~XUB>Y#o%l;^W{9W_7##akfa=UUNQ!H-KSM%17Erj(Y z+nd){QocQTN}~%#KzWoTlPqrvVT?>WJfLpy0@fFbYQp*8e#J%r14uczC5wW}BwKo+ zb6{uC9+xUyaVl6R>G&kiRY(6vSpXExm-SR8n0SrT@}VTsL_z+)AMImlxtbZTIB+<7 z2`TV&%WCF2;G9|qzhSEVfjL#$JK|3QDT007b0O@~A0r40M6k?g`tAEaJ>o4sF)xO#dhJal5wZh>KXSC6%3tABw}3V^TFR*3(+WA-y(f$M^I zQ^MwR8khCvwTmO<6&PQQLoob}&ksyzvv^@+b+}u(|2P!t1BRyQ&Qtgm-T$re$4)=q zpoUBOR85SY4>i^LC+Cgw%8Ok@1>4SoFu9Fr^eqi93KYRjk zlO`94lDE45fCTD6g2-CSb|NK=AR3{rU7C5_IDBc075!s3ekC7=AP=raQ9bLBP2#>Y z_930Wp^fA1)G0I{RAjV!Gj4k>5emEeRlB#1fwV3t_9kTXCH`iwEg`O9qs>HNMN&LDoyv=@F%`7Duax+Tz zSt}uKSj@$X>yB%z9`9`XzvECt0mYb9_|r0(2Hlf@6O>j7^}vM5V&#Nd){c88-95qd z1b|cVK^Nh`kC(^hXMy)upb_AW{0!O3`GI9q!R+_{*{3SmrQj`pOz{(E@8ZhyGs*** zeW zUe~&?JRObhW}KLdBaD?;#X_M|Z_(5`0rV8R4$`!&^-Ztq6Vb<3QebD2sDcz?*8H~* zBz#2C6)e-zQ$>p+ps%Xl^X@DwBWHy#xhu~BA#PBpFhEN)sQZC3{*mf!(aluq)eS%2T#_Ptl_7Di}$w74yFK6GA&WWB)C|cQaURdtej<9uz zl=A2(5L{#`s4*lKE4#73hY~BE*F!AzNuJcYFfTGOI#ong(ChHBHVzzg!KA{bH6bZW z^sKi*RvHoVfiHcEzgn9@jt#Wr|2XFt=e6`y+46I@4}K<@;uVw5_hy z(2ShmC{M&oAN$?9lp5XP_Mr~HY9~3%Z&@ST2>=vwdn!1mdNYL5>Ipi(797HR`0&sTA@UBdE z&se)5OPto!iN9Zk)N;7VYo`ql^}FH9*9pxVx<MySO1Lixsv z1k~rj`*5BaVer>{J-zo$*>7Zl6h@{lrv5T zCySy2X>XL%^)lmZgD-Z*Y8@0UgOWHb-<@_!jkS5 z*sJo#TWOY(IoJ&U80-4CQwwd>T(3x$uTW{44)V@y7A^QY;}iKZ5{Q#X8?K-(ORDfw z?VH!N@m2Px1-lhUWqT5GK8k1TMTC10zhT#dmOZJDac99>I#^h3-++}+h%>5w_p5^S zINc*Cc4hV*>yk!l9;dSHC5nWpvo13viP21cJHa^E049OFvo9xoL;4cb`DG)#LvKXj zq-fX&`@bZb8iVT-N?Y3tB@ylD@jAk=lGLCh;E|`7_Wb9LNbQdf7SjZtq*IjVl!C!& zB6)@?J2S9`(v<=UL?$P}H2VVMdMjyF2yOTC{x+5Wj=pwaE#VvM=(KkWIC5Lw@5?Ab z8!J;SGOQ;!U8Zrvq zx?yHZtn#RxAnF(_oP>Wg2~eRdO^_>DB2vl-eRY!7)|rhqgLE4moK6F6Id}Nl81v+Y zxBtRc`gjN5JK|795OAUMuq3Azm~{DtF$WWguTP?da_ufm=iL+>Pbr8%$W zzP0QWFKrw*h8x+!&~IZVmCoz7KoM^6r%0qQXJBllJ%w>NT9JaE`DVE(3eP05^WzTW znk#Uvs4UL2ep`C!*39p}Oz&486Opk3$!y&@SM^?KCqT{mAfD^b3+CwoT}ExShhq!{ z;4p&gkwb|?I$ix`Oc45U`4H!gqvll^@9*@0Jmjd`WILaQaC3vebD*>M!2M65fB?B- z-UqstW?VIvPxZShYyu-Rw&IUttoKMBB=B~OoNSBM=FkQVe^9L+92Fna-ePE zC2o$M{nOK!b$HbS(~1SG_$n@S!GQh{(;GZFL#sz>;ooDmeF-g>Tz9ui&;H0|4u;lHSBW%t@-Y7QOV4p#=+OwgsjHp4C zbI>=^lcJL56@_?4+LT|Pl^>k1N?)~J@I7?+0iNjobS-H>pC6!FosyEWqDLRe8kI57;eB`oYmo%1-+#tKH@9-9$4%CJ^|sJ;fDWo$>aqlnO}n@9s!x`r2SxU2b+ zDykzI6Uw9-$!uiO7W$iax%2(AYaoxH3v_eZ@t&57d8^#Vw8#y+aL1xX_1CMIJlnB!nl; z{m>4p?ox2AOBv_gsj2Vg{|rHHn{%QYjmSN{(Ku}6h&8y`OdEIrXb4Q9lm8JjqqT@RO{xKy1L4}+IW5wHbh~E;e?PF zDQI6tVG>j7($U$^?6#;J>)@18MucPSb#N*-i@gQ-(BdN)<8uAgVt)Oh*Szk2vF}eP zhw;q=hO%o-q{J^xt{b2ELUsK>qw)X5RStXe<%Y@RhZXCN`Lzd+FvTrTI~gLu<#S7l zF~n}AAI`xa%V3O4sbLBan6bB7yU)R0p}C|vaN&`Ri?Rc$W&94=D62Zep*9KBaNU+j z4OY(PA)?ZAgSl>Qob2|uf1Z9P_VAD^>GZvn7^E_7kaB#t^M~-2A}AN-;0z2R-Mi#y z2@*bilgcB&nEa#wefST@d=*{~EFtw+yT1eA8%S+t2GEWsiQXx4##V9S!Kr5t+>6@} zR5Y7CS;Bbeg?3thw|#4uYGZFlx0uWlZM20cg1uGLI(hRN-`PG7J7sMu=YSw%Pu=X~ zvv(_%^UI%JL%60;>-*yXgD6VZ3wl+TMlGdM#)}aBY5%pEW$i59(ZQ5IYjUoEQ?`nHeP28ZfvXH&v{?CTqtiluh6{Uh%tAdlfVO z`|F-aP)iVbRX*~$SWMEjnj`f6u`KpH?%YM>LFjAbaKM?&4y3Xm57II@^N6>tr9_A&S?p_VIfB;Srz%3DUUQaHwb-1)k|ShXOy5O;%w;W z3eCsyIP~vR@KrF!>d%WNG9Gk-X=ZK+25AxI;UMy63*RY&wq6r4)L$5uR?xRJ#`zW z4w5$=Z4Vz4X?GZ!8A&BtD|affv5rbzx-jY=AU;S zXq(@?#_mQ86md0|&e;%3#78E|o`#Xrk?z)kjq{8QS(nIU-)c9sQ%LHr?)Mn7f7I57 z5TKHtCh`ZM>h~_<#Bx=mj+6>nWiNM^x23JpKmCSy#rtDW^MxW5Fz0jWdXC0HA+9QT z{2b4Bg6X%6vZ;asPU~C8Ctp6TL=}zWp>uz262`t-t#banx-5dUx69pTgknGd|58t4 zW!rlPG0%H1OVN+6CO+8EnrJDwh3>?bMsA|9uv2+3LHDkujkAXR1(? zvpVYZ@T?&%Ck6-6{3EsL0C3+kEa-Ul@o#Nrab(A6et?S2wL1 ze1kalLnPVc)yOUTHur1Cgf0L1GwW>i1glI(bRt>2A4Ksf(Y>!JhpA51dhSO1E2 z0?L-=l3x9I3_hSuU%KM05XfZ7__ea3Y8iM5if>^$swANwR+N|(!$j{DMJSfVZ0|Rp z?<5kCa2UDwqsDh-DeRfcKbz({)rB(#wVEYddbVubG%x?>VhGCwKjz$KoJ~Wk4lb+r zm3r$myeu`+ehQrr&imvAh;MtNxQeI5TD(&xu*X!j4Qah6cXx1~$Ym#gpD$Nm$8w7O zA#{bb#)`RKqUD?C-mD|(VI5o_w4 zXPy6mS)H0GCT)QjPR=h*SkEZ+85m@W2rgjI253bQ#0gXUu@ zP^If_dvuKg`v`W93}Y{v6hw>BZZBbE!4~2oi@(Hy_>B12AmCCZ*uTyC*VWnaficy8 z7h?>gro;Id3_Lm1_$s!?1jE8N`S6#Qa^R8RM#$yF2+a9-Fb90*A&OKR;z~L(xAI`L z3v9?6a?+WhoX2SF=DBNX^>ia!mH-9uIbwo=2BUu+2V90+@AQWt%8qA8WMUN|-K~>= zg)am!ZGn(+Xaa|eI$@|UQB!RsNoxd_331)c<3OfSh=*d zo1Y04m=uC^KUfH=GaXrCkB~`{gU3pHIj_{9IDF{HV@UbfZNE{U^Hh0%{)6ed>AvC< zJ03AR9}q^jajw_rClx=mkq^vBx4PEb2C(EQa;A6nXNM3)jTOqSZ=diB|9zHJuUR6C zPP-BwIrXIX2)gOB`W!x|FROBfRd4V>r_%lHg+tC|+DfL4FXe%O{(f}#8HFIu!RRa;g^)SNRpiIscDtPq|DD($=JJ#FB>nQ=!A2G2Jypl*5s`%~F#Dk+K zH!IaE13fSB{?vGjvkh}OJ*nYx|4MyAZtLb#>KLddS!yNj7Jc1Wpv-C8R?t*90FQ~2 zuXZZubX&H&942(Dk0vp-zj0O=60rE*81bTCf>^kp@G3FBYF5ucptMm23*XDn{7_p1 zg(Be@ZM{q>R(vJ@qua6OyIl}%RFv(={_g~!YJ(O}~8Oi@AuuAzNo}GxGzDmlR#!de|}R zhriw&VI@tR;vgm3P=?qN#>KJiB}x)JcE^=hlxI+7t%5aNon25>VI`G>cWYM}&b&zhMF~ z3F-r)2w9>+Vr%kHNviTvwbajPoWq^DjhDA@dZo$G-RTphKBTmNKfxf=(u{I zi-)Iccfh&=u&WxmRG53>r`wY?5im8Xe^esP$a28wK|!t60wnYVa0(1uv4V*0qlA*> zD>C>)O6wddYqz_;_$G?LJefxGIW}0&a2hLiRn}3}vd>LOnx~l&Mn3hh5}R7MJ>A@e zaw31MH&e8UNCa9$ZR{>$YpN=JKDA~{zR!OL*HUCz#X1k@8v})tn9eFf^%s`m71Z0HY0iIWe|*e%LjYT(jjr8AG#ZIO~b5 zs78T+D-G@Wys@s<75%#~N1P{QNMO3!XVcWH2FVHRsJdkV-4(%fdpZ6v?pNf+qQ8*Yw}r90gUI#i z6TL0abX;@lYk-7t^9o%r#%#oD*q`LA*wClwhnyy7UpkemMC5NUiUEH0KNVRaL%cTK z6ws+=F4J`Yp(^R6&(PD+k))YY&5{di6;8c!B9h~2&KytR<6+^RHDbUx=2U!nq{Jubbvk~ZYlCU=Uo;iBrN6Cj`@@~l=fC2_;gH$h{b%pQ`+03(si+T7J zEG3U@Y{jD)bslndCt>Dqv6BiR#2^Gu?#xF2S&i2r-O=?7V?Pp{>bMW)hh|eYwuvr& zXf3>@pZS8okaSsE-u|=T=Qr%YRKEg4DDfh301hUUI1A5;x2o9@jbh`8@eM*k@yye> zmpKHi_e#Zl88u2i%XIp>XnozqC7Ya~X~B%6T3`N@WZ@8XnveCyhGh_$$Zr*cNhqJq zF5clas8A!7y?`5~R&f4TDfy4Ug`=UyZhvH~@Z*cJkEA_{@_#t9g4hb@C*4Z3TR zkk7G}Tk|E~M!$$F?r*S=t)?%nGdoLS@Ex8U47K+tXxvF8d)_YH2qO-dBZ3a?2W@1P z(ou65if!1vO)Z0FI4(h#ONkX{;?Jw!r2k(nfU?Ns%ACo1gXJsV8}=(~$BO>o@6O$# zy5$QR7->`^3r59;`pAz29200Ik8Wtl?xKiPnY5Z3|EDBMANr0c?US1PkKV(xk23|n zbkliPCVep^PiF$QPKrnS0gUns3JKg z{cCR9%Vnp^3qN$=yqXF&#-w{7)oOel`UuYUD|j~;n&_2GI=Bhz%$W&pMa75vWH~f# z(#$5MH8MD}?-dq>3;|)1H@P~p(MPvW9<8lh57j+mAe~oAuDYjxhVqjS z+FQ+49LXQY_Y92`qXL=X>}&?N-#H`QO2zcIWj9I=nc>m6mZ zeh;y$Y-hmB0-iKQJ;yWB$_neq{Whrgfthoa+g3kFB}u?9+v$aZKmk`aeLi7mUpc=4 z=si+OcA*@DX!!%pqFOQyCP6a(KIBro`xr7WlvLQp4@nM^Ku!Kll>t!KGU%Je-Q!=E zs_A009TpVGqh;xYkP*Q0;Hk+1Wy)#tM?->6&OeLRS)tr?6p$ zp%h^qRP>%`G>iH@db7+ogmkDKpP`vJqPMq+D#?u>KnZ+7s7fpANKUO+-dcGJO$7O@ zFv7jdJg=O)Gibn<%8k1ZPaTTf?g?D0{~F(;$P*C>R_#iO_?C z@$6-dyIw&W{Jf54Q68Mu-ZsBe>Rl$|FANXyrP{oX^gSGyzVs z^em}!VFDzO%0M#A@)K34jCcJO06alAdXWUBw1X?SZpQq#;Bsg=)`6aEGQ9mpvsdRpjmIZuE zf-%C<-p0t@MyULiDnm4or&3iI36h{tc)#2)cN}Z_?Ez@nemYo8Il9^GERD!f&;O89 zeZaK&%%u<=hI#>+^Zj|*ohb7nDU0+-h8z#_KkA^bnCltQd(C?YOhkMCAY0JyPu@s7 zu1gUeh=6gkVz_esWRwUyvF=+9EC6ZPnw%GYNHet_h}{m#5@J+(y82Xb#1zWgyOr9( za(jD>`Z7-io*8LvquxmdQg=S{t#KLNoUe=LK_Z02H-#`mOZ%#%DV6(e2fs~MUJ}&Opw_^03G8P_&8Yq{sWf<8?fopA?-<=B- z%z4ouK~5+Gjw&H4AFrhxZ#;&43*sE#ySWZBkL&mW6PrDB3mINnWpw9-4Gwq*;FbNr zK4qVoKmHeN9Dd*E#-?X1<(k}9R!gJgZwp%~f^2vr)OfW#O3V%f0hMQ}XI3$7Q%`l^ ze%*UaoBLnJKBs8-pAf?v+O)>x!kbvw2JTtwgKZ%}awn*gH|ax_t27a`+dx{rq#?#l zu7yr|fpgKG z5K2HzeY4QJB|B-E4c_58T7rqX$c-#Be`=p|*h(TFV}YLCFfIsT9=q(gn}vSCOy(2Z zc4~5yIe#H7^oQIHc{?hn65QOAe7H`c=^(?dU(aVE{J5-1he z?pMiL|Cl@Qqb@Kc>nkr?2=|c@A&PUih~rau$?-IIVaywZe+`%o=Ja3ZW&A{wEh+)E zlutzE;t?KNe$iWA4w6KCq(7OUsCH7O!bWz)wXHC2eqaB0YVD;_04F^`-e0(v33+L? zI2fj(U$S}p*igt8922gXH@`5uX`3I$F2>Ulc08`7(|cXA#t8$&b3j#&>|msW4|xD3 z?&U=NkkCVPVA`=0iBj?gY8IumR_i;r_rW}YKb+*5e8^q(NplQth^6xi>oA>pln)JJ z73)EW*`h&(uBUgbuW_o**WSqRxmbrq`e@#3a=$ zHLn$WN#)E3H_&=`F56H~WT+VZRARc}zb!-u zG~OQUdVlP8b@~hKrH?UgjV9?qutbR!Di??aYRM{zSCZYn!O}kZA@cKtq|fjuOoL#1 zUHa%&wD?2&n^!zqJZXX`*?df&mhcLOYl4Rb?j=rP8dC@hX4a&|Mci~sKSu!;DFy#} zprgLtmikiR-Z-am`j!{LS3p$(0kfgOrhJs|`=KvVm8;YjZh436@zB2`b0hCyLud*n3BlVys_>1Evzmmn zk)uX-mTHk8(6jO8Gc)O_{mT*plf@SX{0l1GrR@s5_rMo!h@llj)({q0JLiLvHYOC= z>;1j^d%igd(zeoJ>9;vUtiy+%wJYPisb>bk8?JuzEvM&Yl1XVSx*^Y!v!9uwA1RVX zr5b~fV_4EZMf|fPk24*3N*W08DxYQ^C91t8yBDK168C85SZ&hk;^Exg@p*atUQ+>-t0{@K&p{FB1tiMs*D+&3sxkBH?HRIV( zWw_+MF#a%;JcE#MtmcqNF4^XNbERl>$0+H;fw|vl+|{>b}qf*=vOFZu9wewQD3*QVmd8-wZ~*OdAXd1mIvoXiyW^|vJC z%%w5YKiyTsRiCzWRbKWq+CZCKhc@HA&CTo&QaHUnabc zZvD~&x~q`@A#9t}e|iqB?wOT!{0`kS?LfBpCt>?-H&1zzs`ZUZUrXW6bVBQ`=#SU|#}Clv z-+Hg!?b_qIX+&?^GeVkFEBVlW^J~ydr2+!u;5D~J2(BFY?gQl|Gwkn=d1mQHoKC~j z+Ft3d+w+G+L@p`P>#Lz`kx~K3Z{EL-TPTX4WYtGy0=(Jj2q82J2#g59h<&v%i4c8LVlK)z9}>3G`3KV_;(TQMW-4 znWiz(;_Ox3K259YG$iZ$&;ZoE_1{*U66-yfr|XK8sIDHQK$fP`0c`~=FoS^Phwn~| z_0K>DD4-(bAc@9VIPJoPkP+-nTmNNyV~(P#m+>-)EEFve-4(Y1sbC;|$kcwK4Mt@& zz(W@O3tQ>av226jxl%$AYlkSwtrqHCzK3`9N2Fbn{+6kSXG;m2zYjg}BMU&+AGOj) z5h63%)%oS;o8ZdPHu*wKSBm5lduR_rH5V{Wz@lKXV#PpO15-23)s1M=m`Mpa(+ZSS z{h;rxJ8*Egoa@%Yq{`%}0J8HV+r#?jC+m?ChyA#C*Sk(BTv9J@T0t% zJT&(V65>zUFqldDa)Tg=HJ+#`K*0PJ?n85de|8kgyQCCXt%g4JBX_x`0_!6u6&xnW zC0t8lB)1NuDd~ENsqVSaC=Qp{aNDI!7BnwpKL`&wmvD zD8xSs7f1()m+^8B&D`2Z_qO${rzJ{}&iIx4AJy4OK{SQT<2RL9hM&3QR|{@rk2;MU z4>D6asu~p0`-IGf#Mgi)go;PMtC}Xdre>mDhTmZ)-DA_wlGd%AsveubN4viaL_N}Q zLNy%{__Iz5Lw%1jt!xDQ4-3X=VgF~jO{zn?uW>)3V(i6$OaqTdx+eB z2yE}YQoJrmjG0Jz_RokQ`2F5;ej0tR?>;H{`-z?sk!aDz^9wJ!NziQa0-7zJNLXVr zkK%Cyc{^EwO!x!A$d|Xp5=gg}TlL(js=l-q(S(hQAF;V-wS&Y-y-QRpVh`jK<%FtE ziYCu>ihKc@q?bK3p(5@57zrr9DComIy><(R;vd{YI{ffrJf6@X!Lv!_>8}-fAG}Uv z;L3mU#om6H!1VQ_lRn`}Z_aY!N(QSpvoS7iEy}}s642qIz z9$tRgD= z9R4jh5R$nIEcWxYJQ-rPlMNTM)sH>ri7m=f-5@LJo8IY#75~C7d~+JWzzyoPk|}#? zsj!%;D912;qZ}@VOhpavEM!ixsCFR?dwc(@|HwaEyY&H3d0~)t)dFC$T$2f1V^9n4 zRz~W2L4ExE;R)eh%(wV(Ykv3QAZ+N)Kr1qH?u&laQr|uTlVp9$Dv+CrUw1e||HyFq z#oA2!ryvepdEou(*pEE%{&UD55_>u8U{7oOaoI?alvaeSF+998aKv4+Aa28saFw_l zTbQf-NsIj8!10v$PpIwpx;OZgk`Wwj=tBgf%95|ZaguIzZMB56X2KzaJ-1)3fRXzA zg1_Q{Qp44p^2JXU4x5jA&otiqR~v^qJ8Z)O(RFZj0i|xv(z#;9CkhZ*#BpMPVVRk^ zHoD|Z80AmJ{q=166v=vOp_Cm-myAD(^}yHOmO}I$R?~aq@I(x($~F#msX!u7ffc>%-Dsk!_u6OKztV=$@P0y!PcA;T6K$2pWt_&+J0O9G91KA63AK_)6$+` zLhu%AbUUi5P@79)&CgvS%E$Rmr)ZfX-u$hC;ezLVOkG6ii6wYYT9%-&BI@On`PpjH zQ?if$Hh6QyFnFc9DkLM6xoM6ncJa5}p&j?i>Wcrl2@hL^ilsRt=?g=kJqy_WpQb6D zs0#=E&jn5UW2Ko^<9a7uH!}~KR`q23VkP|{FNIP)r)0h0zMEGJ>r`?1oAauGj@m4J zvEatxwI*6Zip#g7PwDwXYSU53rmzl5!1kTl13hH)w(#=-z8!!f24HyN$sVJWhgXR? z6j_9|-=z3Ql(Ox$!o&4wMAwWoj89Y5+AURaNZ*AJFX?b2kr4PuYKwmr0Ph79V8!Uu zd^sTCBMade5`r@2%2@>+ZErA_$Wq4sM1|aq%4_S{z5V*^ae|1673fg$(8cB-Hqu6i zeq8GNe()tNEp_BqVUA7B&R6J9R7Exq$dr|KcxedGlMl*=q$n1jc!rnTr!* z^Q4nxQ5`86_|{OUa`P(b43>utcNz_IsgLM=I}Nhq=(?2QsXsr#ySLa(34?L0xJSyt zG;c*HA*dP1hUQC8s{ps3!YGhxFNW3iQKmWpI?@yF4>~Rwdxy~+((&~1Me%|G6IMdR zCm1GW(kgy*8&r{8Qr*n-A|&_ukAj-<9^xoc|)6R%@j6Jqf}^CZ<4%ES%h%e;j8s9hd27>n)Rd z&Qsj+v`4t3jz69lMxLW>8z|t&c{C|qe(`Xp58>^IO~T9-O(j(#q`N^?d-Tot*jodo ze7wskx;5l|;=nAvT5jv&4N{jvhi*RB6=iPwDuy=b_t_;X4Ch|?uOGYzDV_A#3!5Ob z4Xa$o%Y5>)G1SOCeJd-{<)ImO8&UXfvd$s_vkXU|aPzlQ=ucdHJ8tC`fEw3ABJ^>i zA9@h?*Kwqo)lmS{%iGc|F}LPr)-ppQpAH7~zBdg<*PN6+PYDO;Q}DH~1_enqQ8D@C z4HsRR!gBDVZ!jpt^3!_7qN_V?hJKEJa%HR&&sVN;Nqyn64a|<}5iPp88YHk#qAb4M zKlU905m^QBX}1)w13kX^5UegT6aM&A$Pc{4`g^0XvJ0cP>dRcIKLL^Mps@gHAOwUIh@@&a?z0UEG9?Ln>igB(P5`cVdh{)f1(=W*$DiW}!1aHN!v zd9A+?2B>?KZJ&s*h^SQ9NO??BQ^x7mow5RMzJAt{#B948xe9%g<0pnu<<3MLx}KT% zug&NT$@rwH%0(-Bw~#dgpcd}TcUlb9`jZ2Eu_|I5;^oh|(Jt9xFa0d7f@hLmAwH-5 z>NugBW~7%(2{NBj$=z|sqhM`m@2K&o}bOj_d9cIdH&PQPIV1&1@Wa9_?KZHEmil>nId(irnzm zI_T<-`VEGVzzN_(uc-J2kMYl{Chi^msgXu0nBgBqDnOW{fs;P^bzhww)PShD-L6Dt zzrm<){!}AnMgyxHdRBtt4k=T{^R|y_G)&i#P)QOGoFt!jTZw^xC|K4Yv`zJ}vwyJ! z9Wf-$b)tiADT7%qxLWkPtO_-Lza4cQW}qPSpqEOlctjHBQyjB5ADVPWc#{0^HD@QW z4vSg}a+1DcUv=zhIs{bZZ5fuz{}=3_IxnwwloK6{l9engx9N*bqs6~y?ZL^eF8?^r z|6(3M1fz;1pbsl|w+cIfp+JsM;@s`nqP^ZDJ||cj{>jrdsC30hzIt3;2ol{VY-RO2 zv>Y-ujESm~gIq=l5IQIa=l8bawFQ%IThP`PcL1U@I_wC85W6YaI#9ic7=at%jAbCh z4M$eZvxI0xg3wzO-xp%%o1=!_g(?HTu*SM&|?C3-NeWLT*8As4IZfy2+%YHU_$(4 z@yTywc$N?~2`pXlmL{1TuJyb!CB98Ue=B?|lKzD0I_M!kPU@*3v!mOGuNl!?59lb8 zmPL)+=#!DvxYPSu+JT1s1YK~Cp@$Iw#jm{kr z7w<6Nyj)F#MEo=E7x1jOhJ7P^s}&uRhe~=VN+mo64+@+AW83O?OVxHaoRITrXQd`0 z^5Xn1Q{kIKinEjcu*S+W(&1UHjDE0K# z=GoEEDYKIIzUaK_o4+{v9hXp=KQ+>02eIBScBp7G4JhtyA*N&Fo6$_P(iGuk?vgMu zv?Q0;8c_asXb}CSTgstT!*JfJ2aP;B1c>874%ahzQD(0Qk?Kp8mLla4m%^OX3&;0+ zL203k7R}s~u;dKN>Aoq;G&*Unb;tDgApP+BjS#LiYD5DAk~{KhyHjG^Q_0}!?s0t0 zyJUp6n!F^+8)zx{gk>M+8oOq20PjfDduO$!Gl6{1*8^ZX?*~{b^|x3a9Hl=^T@NXb z9glACXjfjta>q(#`~KM63IDM?M7uN$pb?hrEofM0(8{UQg8J|OraJ*88~3wK&qLst zn%6hlFQLp#b{!fsW1k53@*@Z^C_j)(!Ybfza6whkF@S zUS<_zU$;4%51VbGz__f78Dp2LG45aWrrXx!m4Zv!|8cs*uce8psL^+d>&0bV#w8Y2zdsf>13&89>?bf;GsVf6qHUtOv8IuKQ6&i#(GwLsk zc8byUnea=Ond3Tx;yw>u!_j)VmW$k-HlGW-vIfUjZ(n^*kgB4a`k%A1{U$ZYwVCVW zXuchK#T6xV6f2GEijIhK6+z9+j)>bs)IFfZ|^ihBM; zLy+sin+8-^SA9IJ<`zVFaLE5+INF3ncCgex?)E^#?BIm#T8zT53JX9cF-t)skO@M? zUqad2&OT5+_0~X#>tXV-W59z}xiT-9~aNgjyy=cM8^*|1v%nJXw z$Gy0u?E(VD)DQ>xDOSav;FKG>vmH2YXhs z(j%=z;G&bI$YWXVkTb_q|iHRk6QQ>)v7C78XUp z*L5PnS`xL`M4B2@^=$Tse%}WS=pLJVYK{wsx_=V2`y)Wj*9x#3!8G^%%Za{{ot_ zOFCLzA;-G3TbD!6i&K@(ulSjzFV3;QyHq2vdZpmOT3q!k`Wis5=Iz2U7Im(j<{(NN5$ZLlZ z>l`qW6z3|jW)K|-qcE4Z*pZkjWqUQTp*CRDC6%B<#(UaJ+!;G_Ne+H|OoE`XN5}CS zN$)WD0xgCRqpwZ>owX**#HSh}$~o0Bv!JWWFz>UMSpI(+q`F5Uo@&G8*W-=7O?xF0zEdYOzSV03 zEcyCnn&Jic`ynBn2)Rfoh5sqPo>a(GVQDt7!s|Wom!~mdV%^*CtrYon ziGP14jwNkmp@7(+w-nic-vqc=VBeMOx4h;b<>#P|yvvU+v_q2{U~Xf#E$g;_-mOuriujUg#o~f(2&*o^E(?pbQ|cYmLo~ll)4~r9RQJ|c8?Maz za-Ieyu2`-9{C!odBbFh`;q#kx+lSG65?*TKv$8^4ORyO`Llhy8CK=W^4g72_;jL@4 z(!$Rm-)ma)7B4}472El9q(rbNub>^3^RTm(d*X5XSoiQ`sm%RHn&m=}Nn!@jK)B#H zgFHdhzXwE|TF^iOoq;~=4U?)p91F^O7SE+8_SjGjZ27X2!%Hmf?ms4a*PNp&KC-HM z`hB;&F9^G~rj~ybuZ`q8@J$2d-00w`$b`N>h{j)`M9q?5O*w2yH8GG_JOow1FX%Nq zmuNAQQpc>v*DhrcGchKAUS<#t1hy>=X0^xHvRjF znQqs>)N>|unzAi){OGZ!41-SVC8m%;<YGTL5)Hl~UVX>%61JyaMQ1G>kIiau^kr&th>WUWD+%KAe&7^q>#^LV z`-&JHdR=>T=daQ~neDE3_P_R^gaet@Z19Iw&!>#5+&4w|={6T%Z@5+x_d*9}QkzYk zb{GQbn;HV(iHs;T`nYT-!^!<2hsP z=>tXrx5|`J;ueeAgu}>4)gtCl>RJRh3LCZswMVQE8Ql+#3eaejcjYGh>-WWvb&{Zk z_!b${`5uuA2Gpa~=-Zs#;-`+{{yzRGe_rs3Ziz1de=V`UpVQvEMLduWZrjEYSDSz1 zR)^)VooD#Ta^Vt2Y+_1s*#L^&Grmvi=U*QLZ{He;p=cQkZCQTO2#67gHazxFv+Ei# zp5Y>@@9ABB_}{WLYI?9LHoh?`=AO4C>#Ac*TpTk^y6LYE8^6s2+jHC4+6Z(mM}~6TA#ctq9Mqw zldToszUTTTIbQcOzbSiS$lO*h;jc7tfOPa;$D^M@ppJ ze56?`HZ9H-Os{i?m4O-KOk=LH{pOvV;ZtM1j7JxV9fbQjVGW%D!MfNRG$sE63W14A z+7gu?{BQ5K#xu%uW@6`7*KM4XA)hSwz)xnjU4BPV)B}5{N3>_Ty6z2GjK7%SzddE* zEmmFF8~|DztfDdPIpbR0 zO7R^88}!gi+^`;bRuE` z%17F)p2w`Ev1?B4fMr%%ycpAU&$R;l`C}(onG*52Xr+Ve3&o%G$6wtLdU=Io_{I+; z;y$?B*1h@zXZeG(?MO497FxBcc`Gh$=@?JwqVLrfm}&N@@mS&?@F$^B<4?l-Jf_a# z^a*M)%n&{Sg^P~jFjUtI5Ly7FZHp@(F2vv?T(^h7eSO%kZ^QVuEdy?bFs7cP7zs!x zkv4n>qk5v#O%V|UUHe~TpO@0C9p+kwFGi=r=3X2{3;P+j4UDy0= z2^)KQx@3|{c09eX+sJ0CbLlLvtKYMoiO_KqI*-9H()vz&FD|W}$UAHA7FKCu=ME3d zA_x@7g#0&BsdDsu0tW19_i$gh!ol%H@v}^KhH_iKOxdBE_-7-1Glr?Y6cM^yZ=Wb+WvUoN8*D-~9a8 zCfX$-*`?^UAT4a{VpMfsIdzVx4e-?BQnRC$CUrX3fOT({MY*h7ih}%)MUUBssljvk zc*sSsD6se{hX>dCQ7hL%s6m`Jzo4TJnnZdInFDUGON3VuWyw~$`J?8LanoeK-R4o# z3KMb@`aWr1;%Jr%E`$KWnNt7YeG( zjhag6(pz4IrqSkeI8r;50TOSsjTo7qWbM+q%N!?^B#0_eKH7h8$z4LK7vP1Tbc81a zTic{4RJt6yXnw;cz7~0~;AioS?oddS?y+>D|`_Ft`~=!yRv zOAla~dK@Po`c77|*aW-hAUy0J_5v?F#t_Lz9+N40AVn<`z2SB)2yR!K3-6lI*h zmDe@11u;vrN}H0o%EsByQrj&`XySZUo1N9zPepHHz(LEH(#Ywy-*lC4n3!=ouWjts znq_wj+54c2($MO-!4VhCx7Gn`L)-ML%XCrUb%dd>Xh#TBPTwz^20b_1u>aC!dB`if z&HV=-{^ytm?nZ>^X9`o1l{SI!0aCVfGG&25zMDnymJ;w!4W3?)jb}Nhxd_0n=r-py zz^l3hL^(S5xJ1kxo(02D`uZwcHo<9DRRgNA<{jPuF#%=wQt`-n8ASEp$$WsV9QlR6 z>ILXj+QQHAU}!RIr+f>F^7l|)dCf}OwmcI_Vjv5^C;l|&(GVNVSVe-@cV57oPa8Pb zM`cW;F>aiCosl-Wvj+z;cvT#&379-2bbF%p@8nHAY|m*w6(uC@@yil>_1!}ll>oFr zJWVMm4j*O^6MR&!l}DrIG_slJg5`w~3fINgT%nT-L%1!7x22-ABM-6QiEWU-xck@f zANR6oQ3Lul@NP6>H5tP8u5>M7Gj^_bOsKeXKrlYeqRehvYfsT8Sb?AbJjZqKWGm2~ z&w~Fo=m8A}O0m{ca)h2n^x$0**WK`; zV2lpEErk4X{EnftS=YCby12L`I=IAE1%^P7r%;%8q?40V@{ zgM;ldeE9&~|6564$Z@O8_tZr%eKtwDi=INr5F~@)oiAS?`H+qYbFD53z_t~&M+{`o zE!l&r!;?zJQKR=#IEDc?Tz9D%^g2qT;@w9BDj*h9jHMU*Eh!23uKoM;1t23K*f02q zPOQW5YgzMKH=ko=_{Xtiw8Kk(7G{WHOmG%uP<$R9g#sTDpJGDyugTfTPEKo6#LbP= zxi~nC;~#T9eA*%QKWD(9b3k1!rIpmC9F@vs!C?n1rI8vdM$)*y3XBB>DUJI-!rn^_ ztBH>YL{Xa<3LSGD-3B4vG(4Ss=W*2W1>SHTvhwzCn)XYj$5e0l+v8KVCA??*i zt5M3_=!;62;`I4>FUd;-1yWdT^-MdpvSXaGglB%w1gr>@TP`b7v(GdTH&R1!6bAib zXq=Qu|M*t{UjQQlD+@{z%^pTSgOM3Wl~YFS+|Rceqc-oJ``4MVyC5+CAli0-?507g zY5>tCs9kLg0j2;{MapvW;rikjan*J75?(1OR~7_t3Oq&BVe4P(V07)9GDflc>cO!zxk8(BPXT4S#Jo){!EgLLQt@=!=1EUEl5030 zl&R9`6>8S{wT#&KA;`gDzYQGKTU^><&!egO@Hf1T`v_1eo|9M!cqk zf9#f0(Ki!+YKcVHP))`_gIh~da}h2;LS|*QUEcB!O%3Y|ViD7+{P0_S>E;l{(R5h? zMilZ2+fJ1$E)zn|J9;3JK2}tw&2>oOxAPLn`PTc!xjxUeOQJDIPCLBKfzhk{0vxZ@ zF0FR*xX3DJu-#Q zbNuo;GcF+!ME)(VAJ|lvM5)Ew>Zku7+#cd`a2NXwvS=Y){m2=Cx-0iU^YyS&?0Xa(MFA7QAe z`3;c}o;#3!x~cbQ_xv)f756VorAgkG2g&>lc1ILe>; zMID5)%{gY6H_ce`q3)t2pf7w{2!|!Mod>HmOmhq5PV}#UkZui_b=Lg)m|Ynjg#vf? zccB(DZ5R0?&b(vn-uXui!PcKH*GZq138rs)QtTFOQoR`~@_o<_jIz}G<6*sbLM1CQ z&4$Z5qCX{7Zqb3EWH0=ebqt`m-DQo7Tzq>_>4%KZDi1(ld`E)Hpl0o59P)dEvoPq7 z`2yYEuC!RMwDnL!hm-Unucxg~n=pVV*Enpq0wwBRtbS|ngK=^WM@*sNm z9$vVWxd5(Ge8okW(huCGhqJfodnki>PiLn;Cx}w-elV3!opbv^1XYnLw^bPl)Fvq9 z5tWGm3*HSkpYeN1M{y-Z<84kA;unjrqNhhm&pt-s^(xlOTWT093&@SeG0loJJ8vp +
.. only:: html diff --git a/doc/sphinxext/sphinx_gallery/gen_gallery.py b/doc/sphinxext/sphinx_gallery/gen_gallery.py index 8e58b62362..01b13f542c 100644 --- a/doc/sphinxext/sphinx_gallery/gen_gallery.py +++ b/doc/sphinxext/sphinx_gallery/gen_gallery.py @@ -12,6 +12,7 @@ from __future__ import division, print_function, absolute_import +import re import os from . import glr_path_static from .gen_rst import generate_dir_rst @@ -53,14 +54,13 @@ def generate_gallery_rst(app): plot_gallery = bool(app.builder.config.plot_gallery) gallery_conf.update(app.config.sphinx_gallery_conf) + gallery_conf.update(plot_gallery=plot_gallery) + gallery_conf.update(abort_on_example_error=app.builder.config.abort_on_example_error) # this assures I can call the config in other places app.config.sphinx_gallery_conf = gallery_conf app.config.html_static_path.append(glr_path_static()) - if not plot_gallery: - return - clean_gallery_out(app.builder.outdir) examples_dirs = gallery_conf['examples_dirs'] @@ -102,6 +102,7 @@ def generate_gallery_rst(app): gallery_conf = { + 'filename_pattern': re.escape(os.sep) + 'plot', 'examples_dirs': '../examples', 'gallery_dirs': 'auto_examples', 'mod_example_dir': 'modules/generated', @@ -113,6 +114,7 @@ def generate_gallery_rst(app): def setup(app): """Setup sphinx-gallery sphinx extension""" app.add_config_value('plot_gallery', True, 'html') + app.add_config_value('abort_on_example_error', False, 'html') app.add_config_value('sphinx_gallery_conf', gallery_conf, 'html') app.add_stylesheet('gallery.css') diff --git a/doc/sphinxext/sphinx_gallery/gen_rst.py b/doc/sphinxext/sphinx_gallery/gen_rst.py index 48a04d1e82..bad01aab94 100644 --- a/doc/sphinxext/sphinx_gallery/gen_rst.py +++ b/doc/sphinxext/sphinx_gallery/gen_rst.py @@ -15,41 +15,18 @@ from __future__ import division, print_function, absolute_import from time import time import ast +import hashlib import os import re import shutil -import traceback -import sys import subprocess +import sys +import traceback import warnings -from textwrap import dedent -from . import glr_path_static -from .backreferences import write_backreferences, _thumbnail_div # Try Python 2 first, otherwise load from Python 3 -try: - from StringIO import StringIO -except ImportError: - from io import StringIO - -try: - basestring -except NameError: - basestring = str - -try: - # make sure that the Agg backend is set before importing any - # matplotlib - import matplotlib - matplotlib.use('Agg') - import matplotlib.pyplot as plt -except ImportError: - # this script can be imported by nosetest to find tests to run: we should - # not impose the matplotlib requirement in that case. - pass - - +from textwrap import dedent try: # textwrap indent only exists in python 3 from textwrap import indent @@ -71,6 +48,32 @@ def prefixed_lines(): yield (prefix + line if predicate(line) else line) return ''.join(prefixed_lines()) +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +try: + # make sure that the Agg backend is set before importing any + # matplotlib + import matplotlib + matplotlib.use('Agg') + import matplotlib.pyplot as plt +except ImportError: + # this script can be imported by nosetest to find tests to run: we should + # not impose the matplotlib requirement in that case. + pass + +from . import glr_path_static +from .backreferences import write_backreferences, _thumbnail_div +from .notebook import Notebook + +try: + basestring +except NameError: + basestring = str + + ############################################################################### @@ -95,7 +98,10 @@ def flush(self): ({0:.0f} minutes {1:.3f} seconds)\n\n \n.. container:: sphx-glr-download - **Download Python source code:** :download:`{2} <{2}>`\n""" + **Download Python source code:** :download:`{2} <{2}>`\n +\n.. container:: sphx-glr-download + + **Download IPython notebook:** :download:`{3} <{3}>`\n""" # The following strings are used when we have several pictures: we use # an html div tag that our CSS uses to turn the lists into horizontal @@ -117,13 +123,12 @@ def flush(self): :align: center """ -CODE_OUTPUT = """.. rst-class:: sphx-glr-script-out - **Output**:\n +CODE_OUTPUT = """.. rst-class:: sphx-glr-script-out - :: + Out:: -{0}\n""" + {0}\n""" def get_docstring_and_rest(filename): @@ -196,9 +201,9 @@ def split_code_and_text_blocks(source_file): return blocks -def codestr2rst(codestr): +def codestr2rst(codestr, lang='python'): """Return reStructuredText code block from code string""" - code_directive = "\n.. code-block:: python\n\n" + code_directive = "\n.. code-block:: {0}\n\n".format(lang) indented_block = indent(codestr, ' ' * 4) return code_directive + indented_block @@ -231,14 +236,49 @@ def extract_intro(filename): return first_paragraph +def get_md5sum(src_file): + """Returns md5sum of file""" + + with open(src_file, 'r') as src_data: + src_content = src_data.read() + + # data needs to be encoded in python3 before hashing + if sys.version_info[0] == 3: + src_content = src_content.encode('utf-8') + + src_md5 = hashlib.md5(src_content).hexdigest() + return src_md5 + + +def check_md5sum_change(src_file): + """Returns True if src_file has a different md5sum""" + + src_md5 = get_md5sum(src_file) + + src_md5_file = src_file + '.md5' + src_file_changed = True + if os.path.exists(src_md5_file): + with open(src_md5_file, 'r') as file_checksum: + ref_md5 = file_checksum.read() + if src_md5 == ref_md5: + src_file_changed = False + + if src_file_changed: + with open(src_md5_file, 'w') as file_checksum: + file_checksum.write(src_md5) + + return src_file_changed + + def _plots_are_current(src_file, image_file): - """Test existence of image file and later touch time to source script""" + """Test existence of image file and no change in md5sum of + example""" first_image_file = image_file.format(1) - needs_replot = ( - not os.path.exists(first_image_file) or - os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime) - return not needs_replot + has_image = os.path.exists(first_image_file) + src_file_changed = check_md5sum_change(src_file) + + return has_image and not src_file_changed def save_figures(image_path, fig_count, gallery_conf): @@ -380,7 +420,7 @@ def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs): intro = extract_intro(new_fname) write_backreferences(seen_backrefs, gallery_conf, target_dir, fname, intro) - this_entry = _thumbnail_div(target_dir, fname, intro) + """ + this_entry = _thumbnail_div(target_dir, fname, intro) + """ .. toctree:: :hidden: @@ -437,21 +477,36 @@ def execute_script(code_block, example_globals, image_path, fig_count, # Depending on whether we have one or more figures, we're using a # horizontal list or a single rst call to 'image'. + image_list = "" if len(figure_list) == 1: figure_name = figure_list[0] image_list = SINGLE_IMAGE % figure_name.lstrip('/') - else: + elif len(figure_list) > 1: image_list = HLIST_HEADER for figure_name in figure_list: image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') except Exception: - figure_list = [] - image_list = '%s is not compiling:' % src_file + formatted_exception = traceback.format_exc() + print(80 * '_') - print(image_list) - traceback.print_exc() + print('%s is not compiling:' % src_file) + print(formatted_exception) print(80 * '_') + + figure_list = [] + image_list = codestr2rst(formatted_exception, lang='pytb') + + # Overrides the output thumbnail in the gallery for easy identification + broken_img = os.path.join(glr_path_static(), 'broken_example.png') + shutil.copyfile(broken_img, os.path.join(cwd, image_path.format(1))) + fig_count += 1 # raise count to avoid overwriting image + + # Breaks build on first example error + + if gallery_conf['abort_on_example_error']: + raise + finally: os.chdir(cwd) sys.stdout = orig_stdout @@ -483,22 +538,21 @@ def generate_file_rst(fname, target_dir, src_dir, gallery_conf): script_blocks = split_code_and_text_blocks(example_file) - if _plots_are_current(src_file, image_path): - amount_of_code = sum([len(bcontent) - for blabel, bcontent in script_blocks - if blabel == 'code']) + amount_of_code = sum([len(bcontent) + for blabel, bcontent in script_blocks + if blabel == 'code']) + + if _plots_are_current(example_file, image_path): return amount_of_code time_elapsed = 0 ref_fname = example_file.replace(os.path.sep, '_') example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname) + example_nb = Notebook(fname, target_dir) - if not fname.startswith('plot'): - convert_func = dict(code=codestr2rst, text=text2string) - for blabel, bcontent in script_blocks: - example_rst += convert_func[blabel](bcontent) + '\n' - else: + filename_pattern = gallery_conf.get('filename_pattern') + if re.search(filename_pattern, src_file) and gallery_conf['plot_gallery']: # A lot of examples contains 'print(__doc__)' for example in # scikit-learn so that running the example prints some useful # information. Because the docstring has been separated from @@ -520,6 +574,7 @@ def generate_file_rst(fname, target_dir, src_dir, gallery_conf): gallery_conf) time_elapsed += rtime + example_nb.add_code_cell(bcontent) if is_example_notebook_like: example_rst += codestr2rst(bcontent) + '\n' @@ -530,15 +585,23 @@ def generate_file_rst(fname, target_dir, src_dir, gallery_conf): else: example_rst += text2string(bcontent) + '\n' - - amount_of_code = sum([len(bcontent) - for blabel, bcontent in script_blocks - if blabel == 'code']) + example_nb.add_markdown_cell(text2string(bcontent)) + else: + for blabel, bcontent in script_blocks: + if blabel == 'code': + example_rst += codestr2rst(bcontent) + '\n' + example_nb.add_code_cell(bcontent) + else: + example_rst += bcontent + '\n' + example_nb.add_markdown_cell(text2string(bcontent)) save_thumbnail(image_path, base_image_name, gallery_conf) time_m, time_s = divmod(time_elapsed, 60) + example_nb.save_file() with open(os.path.join(target_dir, base_image_name + '.rst'), 'w') as f: - example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname) + example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname, + example_nb.file_name) f.write(example_rst) + return amount_of_code diff --git a/doc/sphinxext/sphinx_gallery/notebook.py b/doc/sphinxext/sphinx_gallery/notebook.py new file mode 100644 index 0000000000..c0ee5cd80b --- /dev/null +++ b/doc/sphinxext/sphinx_gallery/notebook.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +r""" +============================ +Parser for Jupyter notebooks +============================ + +Class that holds the Ipython notebook information + +""" +# Author: Óscar Nájera +# License: 3-clause BSD + +from __future__ import division, absolute_import, print_function +import json +import os +import re +import sys + +def ipy_notebook_skeleton(): + """Returns a dictionary with the elements of a Jupyter notebook""" + py_version = sys.version_info + notebook_skeleton = { + "cells": [], + "metadata": { + "kernelspec": { + "display_name": "Python " + str(py_version[0]), + "language": "python", + "name": "python" + str(py_version[0]) + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": py_version[0] + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython" + str(py_version[0]), + "version": '{0}.{1}.{2}'.format(*sys.version_info[:3]) + } + }, + "nbformat": 4, + "nbformat_minor": 0 + } + return notebook_skeleton + + +def rst2md(text): + """Converts the RST text from the examples docstrigs and comments + into markdown text for the IPython notebooks""" + + top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M) + text = re.sub(top_heading, r'# \1', text) + + math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M) + text = re.sub(math_eq, + lambda match: r'$${0}$$'.format(match.group(1).strip()), + text) + inline_math = re.compile(r':math:`(.+)`') + text = re.sub(inline_math, r'$\1$', text) + + return text + + +class Notebook(object): + """Ipython notebook object + + Constructs the file cell-by-cell and writes it at the end""" + + def __init__(self, file_name, target_dir): + """Declare the skeleton of the notebook + + Parameters + ---------- + file_name : str + original script file name, .py extension will be renamed + target_dir: str + directory where notebook file is to be saved + """ + + self.file_name = file_name.replace('.py', '.ipynb') + self.write_file = os.path.join(target_dir, self.file_name) + self.work_notebook = ipy_notebook_skeleton() + self.add_code_cell("%matplotlib inline") + + def add_code_cell(self, code): + """Add a code cell to the notebook + + Parameters + ---------- + code : str + Cell content + """ + + code_cell = { + "cell_type": "code", + "execution_count": None, + "metadata": {"collapsed": False}, + "outputs": [], + "source": [code.strip()] + } + self.work_notebook["cells"].append(code_cell) + + def add_markdown_cell(self, text): + """Add a markdown cell to the notebook + + Parameters + ---------- + code : str + Cell content + """ + markdown_cell = { + "cell_type": "markdown", + "metadata": {}, + "source": [rst2md(text)] + } + self.work_notebook["cells"].append(markdown_cell) + + def save_file(self): + """Saves the notebook to a file""" + with open(self.write_file, 'w') as out_nb: + json.dump(self.work_notebook, out_nb, indent=2) From 2a5a6c1fe565d24492c2cafcdaa185f2251dd883 Mon Sep 17 00:00:00 2001 From: Salma Date: Tue, 2 Feb 2016 15:53:56 +0100 Subject: [PATCH 0087/1925] added test for correlation --- nilearn/connectome/connectivity_matrices.py | 4 ++-- nilearn/connectome/tests/test_connectivity_matrices.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index cf61760e95..9600a8793d 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -352,9 +352,9 @@ def transform(self, X): The transformed connectivity matrices. """ if self.kind == 'correlation': - covariances_std = [self.cov_estimator_.fit(signal._standardize( + covariances = [self.cov_estimator_.fit(signal._standardize( x, detrend=False, normalize=True)).covariance_ for x in X] - connectivities = [_cov_to_corr(cov) for cov in covariances_std] + connectivities = [_cov_to_corr(cov) for cov in covariances] else: covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] if self.kind == 'covariance': diff --git a/nilearn/connectome/tests/test_connectivity_matrices.py b/nilearn/connectome/tests/test_connectivity_matrices.py index 81d56c3096..cb80f96a08 100644 --- a/nilearn/connectome/tests/test_connectivity_matrices.py +++ b/nilearn/connectome/tests/test_connectivity_matrices.py @@ -409,6 +409,8 @@ def test_connectivity_measure_outputs(): if cov_estimator == EmpiricalCovariance(): assert_array_almost_equal(d.dot(cov_new).dot(d), covs[k]) + assert_array_almost_equal(np.diag(cov_new), + np.ones((n_features))) elif kind == "partial correlation": prec = linalg.inv(covs[k]) d = np.sqrt(np.diag(np.diag(prec))) From b1d1d21c9fa17b1cd83d8090d43b2a2f2fa18ff1 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Fri, 29 Jan 2016 17:04:09 +0100 Subject: [PATCH 0088/1925] Fix PEP8 violations --- nilearn/input_data/nifti_spheres_masker.py | 2 +- nilearn/input_data/tests/test_nifti_spheres_masker.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nilearn/input_data/nifti_spheres_masker.py b/nilearn/input_data/nifti_spheres_masker.py index b1d4f4638a..f07dc9d625 100644 --- a/nilearn/input_data/nifti_spheres_masker.py +++ b/nilearn/input_data/nifti_spheres_masker.py @@ -67,7 +67,7 @@ def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap, if nearest is None: continue A[i, nearest] = True - + # Include the voxel containing the seed itself if not masked mask_coords = mask_coords.astype(int).tolist() for i, seed in enumerate(seeds): diff --git a/nilearn/input_data/tests/test_nifti_spheres_masker.py b/nilearn/input_data/tests/test_nifti_spheres_masker.py index b385ebc2c9..5ada0e8731 100644 --- a/nilearn/input_data/tests/test_nifti_spheres_masker.py +++ b/nilearn/input_data/tests/test_nifti_spheres_masker.py @@ -110,7 +110,7 @@ def test_small_radius(): seed = (1.4, 1.4, 1.4) masker = NiftiSpheresMasker([seed], radius=0.1, - mask_img=nibabel.Nifti1Image(mask, affine)) + mask_img=nibabel.Nifti1Image(mask, affine)) masker.fit_transform(nibabel.Nifti1Image(data, affine)) # Test if masking is taken into account @@ -118,11 +118,11 @@ def test_small_radius(): mask[1, 1, 0] = 1 masker = NiftiSpheresMasker([seed], radius=0.1, - mask_img=nibabel.Nifti1Image(mask, affine)) + mask_img=nibabel.Nifti1Image(mask, affine)) assert_raises_regex(ValueError, 'Sphere around seed #0 is empty', masker.fit_transform, nibabel.Nifti1Image(data, affine)) masker = NiftiSpheresMasker([seed], radius=1.6, - mask_img=nibabel.Nifti1Image(mask, affine)) + mask_img=nibabel.Nifti1Image(mask, affine)) masker.fit_transform(nibabel.Nifti1Image(data, affine)) From 7de0bee94f8486b97e762acec2845033d6d5f066 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Tue, 2 Feb 2016 09:27:35 +0100 Subject: [PATCH 0089/1925] limit download status output to 4 on circle ci --- nilearn/datasets/utils.py | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index 353da74a15..4b61ebd6b7 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -109,7 +109,8 @@ def _chunk_report_(cur_chunk_size, bytes_so_far, total_size, initial_size, t0): def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, - initial_size=0, total_size=None, verbose=1): + initial_size=0, total_size=None, verbose=1, + report_frequency=0): """Download a file chunk by chunk and show advancement Parameters @@ -135,6 +136,10 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, verbose: int, optional verbosity level (0 means no message). + report_frequency: int, optional + frequency of partial download reports displayed during a fetch. + default: 0: a report is displayed for each read chunk. + Returns ------- data: string @@ -154,12 +159,18 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, bytes_so_far = initial_size t0 = time.time() + _block_size = 1 + if (report_frequency > 0 and + total_size is not None and total_size // report_frequency > 0): + _block_size = total_size // report_frequency + while True: chunk = response.read(chunk_size) bytes_so_far += len(chunk) - if report_hook: - _chunk_report_(len(chunk), bytes_so_far, total_size, initial_size, t0) - + # Reporting download progress by block + if report_hook and bytes_so_far % _block_size < chunk_size: + _chunk_report_(len(chunk), bytes_so_far, + total_size, initial_size, t0) if chunk: local_file.write(chunk) else: @@ -167,6 +178,14 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, return +# To prevent flooding output messages on Circle CI when running the +# examples, we override report_frequency to 4 using a check on the CIRCLECI +# environment variable which is set to "true" automatically +# by circle ci, see https://circleci.com/docs/environment-variables. +if os.getenv('CIRCLECI'): + from functools import partial + _chunk_read_ = partial(_chunk_read_, report_frequency=4) + def _get_dataset_dir(dataset_name, data_dir=None, default_paths=None, verbose=1): @@ -531,7 +550,10 @@ def _fetch_file(url, data_dir, resume=True, overwrite=False, dt = time.time() - t0 if verbose > 0: # Complete the reporting hook - sys.stderr.write(' ...done. (%i seconds, %i min)\n' % (dt, dt // 60)) + sys.stderr.write(' ...done. ({0:.4f} MB, {1} seconds, ' + '{2} min)\n' + .format(os.path.getsize(full_name) / float(1e6), + int(dt), int(dt // 60))) except (_urllib.error.HTTPError, _urllib.error.URLError) as e: if 'Error while fetching' not in str(e): # For some odd reason, the error message gets doubled up From 5a317b04e2292f882c783125969c6170d9f8c5ed Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Tue, 2 Feb 2016 16:50:53 +0100 Subject: [PATCH 0090/1925] update chunk report for each downloaded percent of the data --- nilearn/datasets/utils.py | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index 4b61ebd6b7..c1dc572a29 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -103,14 +103,13 @@ def _chunk_report_(cur_chunk_size, bytes_so_far, total_size, initial_size, t0): # Trailing whitespace is to erase extra char when message length # varies sys.stderr.write( - "\rDownloaded %d of %d bytes (%0.2f%%, %s remaining)" - % (bytes_so_far, total_size, total_percent * 100, + "\rDownloaded %d of %d bytes (%i%%, %s remaining)" + % (bytes_so_far, total_size, int(total_percent * 100), _format_time(time_remaining))) def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, - initial_size=0, total_size=None, verbose=1, - report_frequency=0): + initial_size=0, total_size=None, verbose=1): """Download a file chunk by chunk and show advancement Parameters @@ -136,10 +135,6 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, verbose: int, optional verbosity level (0 means no message). - report_frequency: int, optional - frequency of partial download reports displayed during a fetch. - default: 0: a report is displayed for each read chunk. - Returns ------- data: string @@ -160,14 +155,14 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, t0 = time.time() _block_size = 1 - if (report_frequency > 0 and - total_size is not None and total_size // report_frequency > 0): - _block_size = total_size // report_frequency + if (total_size is not None and total_size // 100 > 0): + _block_size = total_size // 100 while True: chunk = response.read(chunk_size) bytes_so_far += len(chunk) - # Reporting download progress by block + # Reporting download progress by block (one block is 1% of the + # total size of the full size if _block_size == 1). if report_hook and bytes_so_far % _block_size < chunk_size: _chunk_report_(len(chunk), bytes_so_far, total_size, initial_size, t0) @@ -178,14 +173,6 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, return -# To prevent flooding output messages on Circle CI when running the -# examples, we override report_frequency to 4 using a check on the CIRCLECI -# environment variable which is set to "true" automatically -# by circle ci, see https://circleci.com/docs/environment-variables. -if os.getenv('CIRCLECI'): - from functools import partial - _chunk_read_ = partial(_chunk_read_, report_frequency=4) - def _get_dataset_dir(dataset_name, data_dir=None, default_paths=None, verbose=1): From 5018d6cc67cdb366ede088af0bee9322a7bc2bd5 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 3 Feb 2016 09:58:03 +0100 Subject: [PATCH 0091/1925] fixing typo --- nilearn/datasets/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index c1dc572a29..f6fd75d35f 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -162,7 +162,7 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, chunk = response.read(chunk_size) bytes_so_far += len(chunk) # Reporting download progress by block (one block is 1% of the - # total size of the full size if _block_size == 1). + # total size or the full size if _block_size == 1). if report_hook and bytes_so_far % _block_size < chunk_size: _chunk_report_(len(chunk), bytes_so_far, total_size, initial_size, t0) From d253e8cdbfefcca66f19ce49e2c379547114a6fa Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 28 Jan 2016 14:18:54 +0100 Subject: [PATCH 0092/1925] adding memory consumption function based on memory profiler + test --- nilearn/_utils/testing.py | 65 +++++++++++++++++++++++++++++++++-- nilearn/tests/test_testing.py | 30 ++++++++++++++-- 2 files changed, 90 insertions(+), 5 deletions(-) diff --git a/nilearn/_utils/testing.py b/nilearn/_utils/testing.py index 310f273f38..3bef5ad1c5 100644 --- a/nilearn/_utils/testing.py +++ b/nilearn/_utils/testing.py @@ -1,6 +1,5 @@ -"""Utilities for testing nilearn. -""" -# Author: Alexandre Abrahame, Philippe Gervais +"""Utilities for testing nilearn.""" +# Author: Alexandre Abraham, Philippe Gervais # License: simplified BSD import contextlib import functools @@ -10,6 +9,7 @@ import sys import tempfile import warnings +import gc import numpy as np import scipy.signal @@ -60,6 +60,65 @@ def assert_warns(warning_class, func, *args, **kw): return output +# we use memory_profiler library for memory consumption checks +try: + from memory_profiler import memory_usage + + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + return func + + def memory_used(func, *args, **kwargs): + """Compute memory usage when executing func.""" + gc.collect() + mem_use = memory_usage((func, args, kwargs), interval=0.001) + return max(mem_use) - min(mem_use) + +except ImportError: + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + def dummy_func(): + import nose + raise nose.SkipTest('Test requires memory_profiler.') + return dummy_func + + memory_usage = memory_used = None + + +def assert_memory_less_than(memory_limit, tolerance, + callable_obj, *args, **kwargs): + """Check memory consumption of a callable stays below a given limit. + + Parameters + ---------- + memory_limit : int + The expected memory limit in MiB. + tolerance: float + As memory_profiler results have some variability, this adds some + tolerance around memory_limit. Accepted values are in range [0.0, 1.0]. + callable_obj: callable + The function to be called to check memory consumption. + + """ + mem_used = memory_used(callable_obj, *args, **kwargs) + + if mem_used > memory_limit * (1 + tolerance): + raise ValueError("Memory consumption measured ({0:.2f} MiB) is " + "greater than required memory limit ({1} MiB) within " + "accepted tolerance ({2:.2f}%)." + "".format(mem_used, memory_limit, tolerance * 100)) + + # We are confident in memory_profiler measures above 100MiB. + # We raise an error if the measure is below the limit of 50MiB to avoid + # false positive. + if mem_used < 50: + raise ValueError("Memory profiler measured an untrustable memory " + "consumption ({0:.2f} MiB). The expected memory " + "limit was {1:.2f} MiB. Try to bench with larger " + "objects (at least 100MiB in memory).". + format(mem_used, memory_limit)) + + class MockRequest(object): def __init__(self, url): self.url = url diff --git a/nilearn/tests/test_testing.py b/nilearn/tests/test_testing.py index f7fb842514..4d027d35e3 100644 --- a/nilearn/tests/test_testing.py +++ b/nilearn/tests/test_testing.py @@ -1,10 +1,36 @@ import itertools - import numpy as np from nose.tools import assert_equal, assert_raises -from nilearn._utils.testing import generate_fake_fmri +from nilearn._utils.testing import generate_fake_fmri, with_memory_profiler +from nilearn._utils.testing import assert_memory_less_than, assert_raises_regex + + +def create_object(size): + """Just create and return an object containing `size` bytes.""" + b'a' * size + + +@with_memory_profiler +def test_memory_usage(): + # Valid measures + for mem in (500, 200, 100): + assert_memory_less_than(mem, 0.1, create_object, mem * 1024 ** 2) + + # Ensure an exception is raised with too small objects as + # memory_profiler can return non trustable memory measure in this case. + assert_raises_regex(ValueError, + "Memory profiler measured an untrustable memory", + assert_memory_less_than, 50, 0.1, + create_object, 25 * 1024 ** 2) + + # Ensure ValueError is raised if memory used is above expected memory + # limit. + assert_raises_regex(ValueError, + "Memory consumption measured", + assert_memory_less_than, 50, 0.1, + create_object, 100 * 1024 ** 2) def test_generate_fake_fmri(): From 44d60ef028f4b5bd138cba567d6951e54164daca Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 28 Jan 2016 14:58:36 +0100 Subject: [PATCH 0093/1925] add memory profiling function with _iter_check_niimg function. --- nilearn/tests/test_niimg_conversions.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/nilearn/tests/test_niimg_conversions.py b/nilearn/tests/test_niimg_conversions.py index 4fe56b67d9..40ba543fce 100644 --- a/nilearn/tests/test_niimg_conversions.py +++ b/nilearn/tests/test_niimg_conversions.py @@ -24,6 +24,8 @@ from nilearn._utils.exceptions import DimensionError from nilearn._utils import testing, niimg_conversions from nilearn._utils.testing import assert_raises_regex +from nilearn._utils.testing import with_memory_profiler +from nilearn._utils.testing import assert_memory_less_than from nilearn._utils.niimg_conversions import _iter_check_niimg @@ -341,6 +343,22 @@ def test_iter_check_niimgs(): _utils.check_niimg(img_2_4d).get_data()) +def _check_memory(list_img_3d): + # We intentionally add an offset of memory usage to avoid non trustable + # measures with memory_profiler. + b'a' * 100 * 1024 ** 2 + list(_iter_check_niimg(list_img_3d)) + + +@with_memory_profiler +def test_iter_check_niimgs_memory(): + # Verify that iterating over a list of images doesn't consume extra + # memory. + assert_memory_less_than(100, 0.1, _check_memory, + [Nifti1Image(np.ones((100, 100, 200)), np.eye(4)) + for i in range(10)]) + + def test_repr_niimgs(): # Test with file path assert_equal(_utils._repr_niimgs("test"), "test") From 0bc0a870a219dbf488305571520e337b4c3e9d3e Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 28 Jan 2016 14:59:53 +0100 Subject: [PATCH 0094/1925] activate testing with memory_profiler on travis --- continuous_integration/install.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index c276df7cae..d84d7f071f 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -102,6 +102,8 @@ else exit 1 fi +pip install psutil memory_profiler + if [[ "$COVERAGE" == "true" ]]; then pip install coverage coveralls fi From 36b0290f32b5b5f66e524ff3e6a7147b8dc1a64c Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Tue, 2 Feb 2016 09:18:40 +0100 Subject: [PATCH 0095/1925] trying to fix codacy --- nilearn/tests/test_niimg_conversions.py | 3 ++- nilearn/tests/test_testing.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nilearn/tests/test_niimg_conversions.py b/nilearn/tests/test_niimg_conversions.py index 40ba543fce..00eacb42c3 100644 --- a/nilearn/tests/test_niimg_conversions.py +++ b/nilearn/tests/test_niimg_conversions.py @@ -346,8 +346,9 @@ def test_iter_check_niimgs(): def _check_memory(list_img_3d): # We intentionally add an offset of memory usage to avoid non trustable # measures with memory_profiler. - b'a' * 100 * 1024 ** 2 + mem_offset = b'a' * 100 * 1024 ** 2 list(_iter_check_niimg(list_img_3d)) + del mem_offset @with_memory_profiler diff --git a/nilearn/tests/test_testing.py b/nilearn/tests/test_testing.py index 4d027d35e3..845f043ca6 100644 --- a/nilearn/tests/test_testing.py +++ b/nilearn/tests/test_testing.py @@ -9,7 +9,8 @@ def create_object(size): """Just create and return an object containing `size` bytes.""" - b'a' * size + mem_size = b'a' * size + return mem_size @with_memory_profiler From d0bc2754c36c4b3ec006a9ac9fa4c4f391c98947 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Tue, 2 Feb 2016 11:42:27 +0100 Subject: [PATCH 0096/1925] trying to fix travis --- nilearn/tests/test_testing.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nilearn/tests/test_testing.py b/nilearn/tests/test_testing.py index 845f043ca6..f5d548a88a 100644 --- a/nilearn/tests/test_testing.py +++ b/nilearn/tests/test_testing.py @@ -9,8 +9,7 @@ def create_object(size): """Just create and return an object containing `size` bytes.""" - mem_size = b'a' * size - return mem_size + mem_use = b'a' * size @with_memory_profiler From 8bc3083da7a34c50ddc232d08823fb957aa841d0 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Tue, 2 Feb 2016 16:41:29 +0100 Subject: [PATCH 0097/1925] applying comments --- nilearn/tests/test_niimg_conversions.py | 2 +- nilearn/tests/test_testing.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nilearn/tests/test_niimg_conversions.py b/nilearn/tests/test_niimg_conversions.py index 00eacb42c3..61b48c1bbd 100644 --- a/nilearn/tests/test_niimg_conversions.py +++ b/nilearn/tests/test_niimg_conversions.py @@ -348,7 +348,7 @@ def _check_memory(list_img_3d): # measures with memory_profiler. mem_offset = b'a' * 100 * 1024 ** 2 list(_iter_check_niimg(list_img_3d)) - del mem_offset + return mem_offset @with_memory_profiler diff --git a/nilearn/tests/test_testing.py b/nilearn/tests/test_testing.py index f5d548a88a..ddcafa0a55 100644 --- a/nilearn/tests/test_testing.py +++ b/nilearn/tests/test_testing.py @@ -10,6 +10,7 @@ def create_object(size): """Just create and return an object containing `size` bytes.""" mem_use = b'a' * size + return mem_use @with_memory_profiler From 1369e6da2834ed8626e144969bfee7ceab61a91c Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 3 Feb 2016 11:08:22 +0100 Subject: [PATCH 0098/1925] refresh report display every half second --- nilearn/datasets/utils.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index f6fd75d35f..a89757d26b 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -103,8 +103,8 @@ def _chunk_report_(cur_chunk_size, bytes_so_far, total_size, initial_size, t0): # Trailing whitespace is to erase extra char when message length # varies sys.stderr.write( - "\rDownloaded %d of %d bytes (%i%%, %s remaining)" - % (bytes_so_far, total_size, int(total_percent * 100), + "\rDownloaded %d of %d bytes (%.1f%%, %s remaining)" + % (bytes_so_far, total_size, total_percent * 100, _format_time(time_remaining))) @@ -154,23 +154,24 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, bytes_so_far = initial_size t0 = time.time() - _block_size = 1 - if (total_size is not None and total_size // 100 > 0): - _block_size = total_size // 100 - + t_display = t0 while True: chunk = response.read(chunk_size) bytes_so_far += len(chunk) - # Reporting download progress by block (one block is 1% of the - # total size or the full size if _block_size == 1). - if report_hook and bytes_so_far % _block_size < chunk_size: + t_read = time.time() + # Refresh report every half second. + if report_hook and t_read > t_display + 0.5: _chunk_report_(len(chunk), bytes_so_far, total_size, initial_size, t0) + t_display = time.time() if chunk: local_file.write(chunk) else: break - + # Show final report (with 100% downloaded). + if report_hook: + _chunk_report_(len(chunk), bytes_so_far, + total_size, initial_size, t0) return From 1284250ba12ede4d0db9281d64d8ea6d221fab62 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 3 Feb 2016 14:11:04 +0100 Subject: [PATCH 0099/1925] reverting download completed display change --- nilearn/datasets/utils.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index a89757d26b..732bf7fcf8 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -538,10 +538,8 @@ def _fetch_file(url, data_dir, resume=True, overwrite=False, dt = time.time() - t0 if verbose > 0: # Complete the reporting hook - sys.stderr.write(' ...done. ({0:.4f} MB, {1} seconds, ' - '{2} min)\n' - .format(os.path.getsize(full_name) / float(1e6), - int(dt), int(dt // 60))) + sys.stderr.write(' ...done. ({0:.0f} seconds, {1:.0f} min)\n' + .format(dt, dt // 60)) except (_urllib.error.HTTPError, _urllib.error.URLError) as e: if 'Error while fetching' not in str(e): # For some odd reason, the error message gets doubled up From 3ca02bc280910da19a6b256a009a674a6bc56c8a Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 3 Feb 2016 14:14:57 +0100 Subject: [PATCH 0100/1925] re applying lost changes --- nilearn/datasets/utils.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index 732bf7fcf8..6680dd9ddc 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -153,17 +153,16 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, total_size = None bytes_so_far = initial_size - t0 = time.time() - t_display = t0 + t0 = time_last_display = time.time() while True: chunk = response.read(chunk_size) bytes_so_far += len(chunk) - t_read = time.time() + time_last_read = time.time() # Refresh report every half second. - if report_hook and t_read > t_display + 0.5: + if report_hook and time_last_read > time_last_display + 0.5: _chunk_report_(len(chunk), bytes_so_far, total_size, initial_size, t0) - t_display = time.time() + time_last_display = time_last_read if chunk: local_file.write(chunk) else: From 113b24eed8585925760acdd848e580f5c28bf2d4 Mon Sep 17 00:00:00 2001 From: Salma Date: Wed, 3 Feb 2016 15:23:02 +0100 Subject: [PATCH 0101/1925] typo --- nilearn/connectome/connectivity_matrices.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index 9600a8793d..b96a6bbb24 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -352,9 +352,10 @@ def transform(self, X): The transformed connectivity matrices. """ if self.kind == 'correlation': - covariances = [self.cov_estimator_.fit(signal._standardize( - x, detrend=False, normalize=True)).covariance_ for x in X] - connectivities = [_cov_to_corr(cov) for cov in covariances] + covariances_std = [self.cov_estimator_.fit( + signal._standardize(x, detrend=False, normalize=True) + ).covariance_ for x in X] + connectivities = [_cov_to_corr(cov) for cov in covariances_std] else: covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] if self.kind == 'covariance': From 4439072f85faf873c147bfce64d744532765f252 Mon Sep 17 00:00:00 2001 From: Salma Date: Wed, 3 Feb 2016 15:25:43 +0100 Subject: [PATCH 0102/1925] set syntax for python 2.6 --- nilearn/connectome/connectivity_matrices.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index b96a6bbb24..4755bd1f57 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -313,7 +313,7 @@ def fit(self, X, y=None): "You provided {0}".format(X.__class__)) subjects_types = [type(s) for s in X] - if set(subjects_types) != {np.ndarray}: + if set(subjects_types) != set([np.ndarray]): raise ValueError("Each subject must be 2D numpy.ndarray.\n You " "provided {0}".format(str(subjects_types))) From 10c0921192b3ad24cb600faf0bad19641682f151 Mon Sep 17 00:00:00 2001 From: Salma Date: Wed, 3 Feb 2016 15:48:07 +0100 Subject: [PATCH 0103/1925] set syntax for python 2.6 --- nilearn/connectome/connectivity_matrices.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index 4755bd1f57..c54a0461bf 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -318,7 +318,7 @@ def fit(self, X, y=None): "provided {0}".format(str(subjects_types))) subjects_dims = [s.ndim for s in X] - if set(subjects_dims) != {2}: + if set(subjects_dims) != set([2]): raise ValueError("Each subject must be 2D numpy.ndarray.\n You" "provided arrays of dimensions " "{0}".format(str(subjects_dims))) From b5dabd5302c81136fd278ee8a406176ac20266b1 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 3 Feb 2016 16:18:58 +0100 Subject: [PATCH 0104/1925] improve refresh display of download report --- nilearn/datasets/utils.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index 6680dd9ddc..0f2f5dc4d6 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -158,8 +158,10 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, chunk = response.read(chunk_size) bytes_so_far += len(chunk) time_last_read = time.time() - # Refresh report every half second. - if report_hook and time_last_read > time_last_display + 0.5: + if (report_hook and + # Refresh report every half second or when download is + # finished. + (time_last_read > time_last_display + 0.5 or not chunk)): _chunk_report_(len(chunk), bytes_so_far, total_size, initial_size, t0) time_last_display = time_last_read @@ -167,10 +169,7 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, local_file.write(chunk) else: break - # Show final report (with 100% downloaded). - if report_hook: - _chunk_report_(len(chunk), bytes_so_far, - total_size, initial_size, t0) + return From d7abc449692152c5013ca59e13121aa4548a8b1f Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 4 Feb 2016 10:54:18 +0100 Subject: [PATCH 0105/1925] Update What's new. --- doc/whats_new.rst | 61 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 59 insertions(+), 2 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 96f3d88377..5779910847 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -4,8 +4,65 @@ Changelog --------- -The default n_subjects=None in :func:`nilearn.datasets.fetch_adhd` is now -changed to n_subjects=30. +The 0.2.2 is a bugfix + dependency update release (for sphinx gallery). It +aims at preparing a renewal of the tutorials. + +New features +............ + - Fetcher for Megatrawl Netmats dataset. + +Enhancements +............ + - Flake8 is now run on pull requests. + - Reworking of the documentation organization. + - Sphinx-gallery updated to version 0.1.1 + - The default n_subjects=None in :func:`nilearn.datasets.fetch_adhd` is now + changed to n_subjects=30. + +Bug fixes +......... + - Fix `symmetric_split` behavior in + :func:`nilearn.datasets.fetch_atlas_harvard_oxford` + - Fix casting errors when providing integer data to + :func:`nilearn.image.high_variance_confounds` + - Fix matplotlib 1.5.0 compatibility in + :func:`nilearn.plotting.plot_prob_atlas` + - Fix matplotlib backend choice on Mac OS X. + - :func:`nilearn.plotting.find_xyz_cut_coords` raises a meaningful error + when 4D data is provided instead of 3D. + - :class:`nilearn.input_data.NiftiSpheresMasker` handles radius smaller than + the size of a voxel + - :class:`nilearn.regions.RegionExtractor` handles data containing Nans. + - Confound regression does not force systematically the normalization of + the confounds. + - Force time series normalization in + :class:`nilearn.connectome.ConnectivityMeasure` + and check dimensionality of the input. + - :func:`nilearn._utils.numpy_conversions.csv_to_array` could consider + valid CSV files as invalid. + +API changes summary +................... + - Deprecated dataset downloading function have been removed. + - Download progression message refreshing rate has been lowered to sparsify + CircleCI logs. + +Contributors +............. + +Contributors (from ``git shortlog -ns 0.2.1..0.2.2``):: + + 39 Kamalakar Daddy + 22 Alexandre Abraham + 21 Loïc Estève + 19 Gael Varoquaux + 12 Alexandre Abadie + 7 Salma + 3 Danilo Bzdok + 1 Arthur Mensch + 1 Ben Cipollini + 1 Elvis Dohmatob + 1 Óscar Nájera 0.2.1 ====== From 01068783fb07b9cadc4858875882adbf76f842b7 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 4 Feb 2016 11:16:11 +0100 Subject: [PATCH 0106/1925] Update mailmap and version --- .mailmap | 37 +++++++++++++++++-------------------- nilearn/version.py | 2 +- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/.mailmap b/.mailmap index fd860f4f0d..6db314aaf2 100644 --- a/.mailmap +++ b/.mailmap @@ -1,39 +1,36 @@ -Alexandre Abraham -Alexandre Abraham -Alexandre Gramfort +Aina Frau Pascual +Alexandre Abadie +Alexandre Abraham +Alexandre Gramfort Alexandre Savio +Arthur Mensch Ben Cipollini Bertrand Thirion -Chris Filo Gorgolewski +Chris Filo Gorgolewski Danilo Bzdok +Demian Wassermann +Dimitri Papadopoulos Orfanos Elvis Dohmatob Fabian Pedregosa -Fabian Pedregosa -Fabian Pedregosa -Gael Varoquaux -GaelVaroquaux Gael Varoquaux -Jan Margeta +Jan Margeta Jaques Grobler Jason Gors Jean Kossaifi -Jean Kossaifi +juhuntenburg +Kamalakar Daddy Konstantin Shmelkov Loïc Estève +Martin Perez-Guevara Matthias Ekman -Mehdi Rahim -Mehdi Rahim +Mehdi Rahim Michael Eickenberg +Michael Hanke Michael Waskom -Philippe Gervais +Óscar Nájera +Philippe Gervais Ronald Phlypo -Salma Bougacha +Salma Vincent Michel Virgile Fritsch -Yannick Schwartz -schwarty Yannick Schwartz -Óscar Nájera -Kamalakar Daddy -Fabian Pedregosa -Fabian Pedregosa diff --git a/nilearn/version.py b/nilearn/version.py index 3a1475ed1b..e0b77a19b0 100644 --- a/nilearn/version.py +++ b/nilearn/version.py @@ -21,7 +21,7 @@ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # -__version__ = '0.2.1' +__version__ = '0.2.2' _NILEARN_INSTALL_MSG = 'See %s for installation information.' % ( 'http://nilearn.github.io/introduction.html#installation') From 56aeb237f062c376dc0d566c1a58deaf3b52c752 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 4 Feb 2016 11:22:04 +0100 Subject: [PATCH 0107/1925] Update News on website --- doc/themes/nilearn/layout.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/themes/nilearn/layout.html b/doc/themes/nilearn/layout.html index d4755755fc..ceed13c1c4 100644 --- a/doc/themes/nilearn/layout.html +++ b/doc/themes/nilearn/layout.html @@ -195,7 +195,7 @@

Machine learning for Neuro-Imaging in Python

News

    -
  • Dec 13th 2015: Nilearn 0.2.1 released +

  • Feb 4th 2016: Nilearn 0.2.2 released

  • July 13th 2015: Nilearn 0.1.4 released

  • From ee5534141221cfafe2074c0b69a971a362d09531 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 4 Feb 2016 13:06:50 +0100 Subject: [PATCH 0108/1925] MAINT remove unused cur_chunk_size parameter from nilearn.datasets.utils._chunk_report_ --- nilearn/datasets/utils.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index 0f2f5dc4d6..54ec6ddf02 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -63,14 +63,11 @@ def readlinkabs(link): return os.path.join(os.path.dirname(link), path) -def _chunk_report_(cur_chunk_size, bytes_so_far, total_size, initial_size, t0): +def _chunk_report_(bytes_so_far, total_size, initial_size, t0): """Show downloading percentage. Parameters ---------- - cur_chunk_size: int - Number of bytes downloaded on current iteration (0=>end of download) - bytes_so_far: int Number of downloaded bytes @@ -162,7 +159,7 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, # Refresh report every half second or when download is # finished. (time_last_read > time_last_display + 0.5 or not chunk)): - _chunk_report_(len(chunk), bytes_so_far, + _chunk_report_(bytes_so_far, total_size, initial_size, t0) time_last_display = time_last_read if chunk: From d00ee88f22da72d710199e496b72c0bec9b14a5d Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 4 Feb 2016 13:43:31 +0100 Subject: [PATCH 0109/1925] Remove ref to csv_to_array --- doc/whats_new.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 5779910847..dac46d46ec 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -38,7 +38,7 @@ Bug fixes - Force time series normalization in :class:`nilearn.connectome.ConnectivityMeasure` and check dimensionality of the input. - - :func:`nilearn._utils.numpy_conversions.csv_to_array` could consider + - `nilearn._utils.numpy_conversions.csv_to_array` could consider valid CSV files as invalid. API changes summary From 08364754500f62110debaf44753395af33d1282f Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 4 Feb 2016 14:12:22 +0100 Subject: [PATCH 0110/1925] Clean __pycache__ directories --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 97cebc5699..fc8f63370b 100644 --- a/Makefile +++ b/Makefile @@ -13,6 +13,7 @@ all: clean test doc-noplot clean-pyc: find . -name "*.pyc" | xargs rm -f + find . -name "__pycache__" | xargs rm -r clean-so: find . -name "*.so" | xargs rm -f From d841026d4e63a3904273028947cefa3e2f2de6c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 4 Feb 2016 15:22:04 +0100 Subject: [PATCH 0111/1925] Fix Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fc8f63370b..9e82623705 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ all: clean test doc-noplot clean-pyc: find . -name "*.pyc" | xargs rm -f - find . -name "__pycache__" | xargs rm -r + find . -name "__pycache__" | xargs rm -rf clean-so: find . -name "*.so" | xargs rm -f From d066ba3d2f805685ea938d40bdd32e68cd678a7d Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 4 Feb 2016 11:36:44 +0100 Subject: [PATCH 0112/1925] adding contributing page --- contributing.rst | 39 ++++++++++++++++++++++++++++++++++ doc/index.rst | 8 +++---- doc/install_doc_component.html | 12 +++++------ doc/themes/nilearn/layout.html | 1 + 4 files changed, 50 insertions(+), 10 deletions(-) create mode 100644 contributing.rst diff --git a/contributing.rst b/contributing.rst new file mode 100644 index 0000000000..9b97fd4698 --- /dev/null +++ b/contributing.rst @@ -0,0 +1,39 @@ +.. _contributing: + +============ +Contributing +============ + +This project is a community effort, and everyone is welcome to +contribute. + +The project is hosted on https://github.com/nilearn/nilearn + +Nilearn is somewhat :ref:`selective ` when it comes to +adding new algorithms, and the best way to contribute and to help the project +is to start working on known issues. +See :ref:`easy_issues` to get started. + +Submitting a bug report +======================= + +In case you experience issues using this package, do not hesitate to submit a +ticket to the +`Bug Tracker `_. You are +also welcome to post feature requests or pull requests. + +.. _git_repo: + +Retrieving the latest code +========================== + +We use `Git `_ for version control and +`GitHub `_ for hosting our main repository. + +You can check out the latest sources with the command:: + + git clone git://github.com/nilearn/nilearn.git + +or if you have write privileges:: + + git clone git@github.com:nilearn/nilearn.git diff --git a/doc/index.rst b/doc/index.rst index d5b62a6c91..ad420410e5 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -74,13 +74,13 @@ .. raw:: html -
+ - +

- +

@@ -96,6 +96,6 @@ user_guide.rst auto_examples/index.rst whats_new.rst + ../contributing.rst Nilearn is part of the `NiPy ecosystem `_. - diff --git a/doc/install_doc_component.html b/doc/install_doc_component.html index 82f8ec85f5..402003f810 100644 --- a/doc/install_doc_component.html +++ b/doc/install_doc_component.html @@ -46,10 +46,10 @@

We recommend that you install a complete scientific Python distribution like 64 bit Anaconda - . Since it meets all the requirements of nilearn, it will save + . Since it meets all the requirements of nilearn, it will save you time and trouble. You could also check PythonXY as an alternative.

+ href="http://code.google.com/p/pythonxy/" target="_blank">PythonXY

Nilearn requires a Python installation and the following dependencies: ipython, scikit-learn, matplotlib and nibabel

@@ -77,7 +77,7 @@

We recommend that you install a complete scientific Python distribution like 64 bit - Anaconda. Since it meets all the requirements of nilearn, + Anaconda. Since it meets all the requirements of nilearn, it will save you time and trouble.

Nilearn requires a Python installation and the following @@ -103,7 +103,7 @@

If you are using Ubuntu or Debian and you have access to - Neurodebian, then simply install the + Neurodebian, then simply install the python-nilearn package through Neurodebian.

@@ -113,14 +113,14 @@ packages using the distribution package manager: ipython , scikit-learn (sometimes called sklearn, or python-sklearn), matplotlib (sometimes - called python-matplotlib) and nibabel + called python-matplotlib) and nibabel (sometimes called python-nibabel)

If you do not have access to the package manager we recommend that you install a complete scientific Python distribution like 64 bit - Anaconda. Since it meets all the requirements of nilearn, + Anaconda. Since it meets all the requirements of nilearn, it will save you time and trouble..

Second: open a Terminal

diff --git a/doc/themes/nilearn/layout.html b/doc/themes/nilearn/layout.html index ceed13c1c4..fdaebbfdab 100644 --- a/doc/themes/nilearn/layout.html +++ b/doc/themes/nilearn/layout.html @@ -225,6 +225,7 @@

Development

href="https://github.com/nilearn/nilearn">Nilearn on GitHub

  • All material Free Software: BSD license (3 clause).

  • Authors

  • +
  • Contributing

  • {% endif %} From a1eec4db728c60e6d53fa9db0509a19f73640737 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 4 Feb 2016 12:41:29 +0100 Subject: [PATCH 0113/1925] fix contributing page generation --- doc/conf.py | 1 + doc/index.rst | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index ed8b32ed0a..e176ca7881 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -28,6 +28,7 @@ try: shutil.copy('../AUTHORS.rst', '.') + shutil.copy('../contributing.rst', '.') except IOError: # When nose scans this file, it is not in the right working # directory, and thus the line above fails diff --git a/doc/index.rst b/doc/index.rst index ad420410e5..3c0b3e2ad9 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -96,6 +96,6 @@ user_guide.rst auto_examples/index.rst whats_new.rst - ../contributing.rst + contributing.rst Nilearn is part of the `NiPy ecosystem `_. From 46987cd4778d9d69acce577a1d8bf834b7418e64 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 4 Feb 2016 13:01:50 +0100 Subject: [PATCH 0114/1925] better access strategy of rst located at root --- doc/AUTHORS.rst | 3 +++ doc/conf.py | 8 -------- doc/contributing.rst | 3 +++ 3 files changed, 6 insertions(+), 8 deletions(-) create mode 100644 doc/AUTHORS.rst create mode 100644 doc/contributing.rst diff --git a/doc/AUTHORS.rst b/doc/AUTHORS.rst new file mode 100644 index 0000000000..7e7db43b92 --- /dev/null +++ b/doc/AUTHORS.rst @@ -0,0 +1,3 @@ +.. -*- mode: rst -*- + +.. include:: ../AUTHORS.rst diff --git a/doc/conf.py b/doc/conf.py index e176ca7881..21a0229ab1 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -26,14 +26,6 @@ # We also add the directory just above to enable local imports of nilearn sys.path.insert(0, os.path.abspath('..')) -try: - shutil.copy('../AUTHORS.rst', '.') - shutil.copy('../contributing.rst', '.') -except IOError: - # When nose scans this file, it is not in the right working - # directory, and thus the line above fails - pass - # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be diff --git a/doc/contributing.rst b/doc/contributing.rst new file mode 100644 index 0000000000..33230297f8 --- /dev/null +++ b/doc/contributing.rst @@ -0,0 +1,3 @@ +.. -*- mode: rst -*- + +.. include:: ../contributing.rst From b759eeabd5c8bf09aa5a61a2b8826aff716e4cad Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 4 Feb 2016 13:24:27 +0100 Subject: [PATCH 0115/1925] adding link from README.rst + more information --- README.rst | 15 +++------------ contributing.rst | 21 ++++++++++++++++----- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/README.rst b/README.rst index 952a089154..d9df78c564 100644 --- a/README.rst +++ b/README.rst @@ -66,16 +66,7 @@ http://nilearn.github.io/introduction.html#installation. Development =========== -Code ----- +The project is hosted on https://github.com/nilearn/nilearn -GIT -~~~ - -You can check the latest sources with the command:: - - git clone git://github.com/nilearn/nilearn - -or if you have write privileges:: - - git clone git@github.com:nilearn/nilearn +More detailed instruction on how to contribute are available at +http://nilearn.github.io/contributing.html diff --git a/contributing.rst b/contributing.rst index 9b97fd4698..358aa9ff42 100644 --- a/contributing.rst +++ b/contributing.rst @@ -9,10 +9,11 @@ contribute. The project is hosted on https://github.com/nilearn/nilearn -Nilearn is somewhat :ref:`selective ` when it comes to -adding new algorithms, and the best way to contribute and to help the project -is to start working on known issues. -See :ref:`easy_issues` to get started. +Nilearn is somewhat selective when it comes to adding new features, and the +best way to contribute and to help the project is to start working on known +issues. +See `Easy issues `_ to get +started. Submitting a bug report ======================= @@ -28,7 +29,10 @@ Retrieving the latest code ========================== We use `Git `_ for version control and -`GitHub `_ for hosting our main repository. +`GitHub `_ for hosting our main repository. If you are +new on GitHub and don't know how to work with it, please first +have a look at `this `_ to get the basics. + You can check out the latest sources with the command:: @@ -37,3 +41,10 @@ You can check out the latest sources with the command:: or if you have write privileges:: git clone git@github.com:nilearn/nilearn.git + +Coding guidelines +================= + +Nilearn follows the coding conventions used by scikit-learn. `Please read them +`_ +before you start implementing your changes. From b061e74781f89eb1c5ecc02f2b8215b1f7e34b61 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 4 Feb 2016 14:06:03 +0100 Subject: [PATCH 0116/1925] use upper case filename for root rst files (AUTHORS, CONTRIBUTING) and lower case in other places --- contributing.rst => CONTRIBUTING.rst | 0 doc/{AUTHORS.rst => authors.rst} | 0 doc/contributing.rst | 2 +- doc/index.rst | 2 +- doc/themes/nilearn/layout.html | 6 +++--- 5 files changed, 5 insertions(+), 5 deletions(-) rename contributing.rst => CONTRIBUTING.rst (100%) rename doc/{AUTHORS.rst => authors.rst} (100%) diff --git a/contributing.rst b/CONTRIBUTING.rst similarity index 100% rename from contributing.rst rename to CONTRIBUTING.rst diff --git a/doc/AUTHORS.rst b/doc/authors.rst similarity index 100% rename from doc/AUTHORS.rst rename to doc/authors.rst diff --git a/doc/contributing.rst b/doc/contributing.rst index 33230297f8..9e8a9f2e85 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -1,3 +1,3 @@ .. -*- mode: rst -*- -.. include:: ../contributing.rst +.. include:: ../CONTRIBUTING.rst diff --git a/doc/index.rst b/doc/index.rst index 3c0b3e2ad9..7dfdfb2b4d 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -92,7 +92,7 @@ .. toctree:: :hidden: - AUTHORS.rst + authors.rst user_guide.rst auto_examples/index.rst whats_new.rst diff --git a/doc/themes/nilearn/layout.html b/doc/themes/nilearn/layout.html index fdaebbfdab..8822cd5dcd 100644 --- a/doc/themes/nilearn/layout.html +++ b/doc/themes/nilearn/layout.html @@ -22,7 +22,7 @@
  • User Guide
  • Examples
  • Reference
  • - + {% endblock %} @@ -224,7 +224,7 @@

    Development

  • Nilearn on GitHub

  • All material Free Software: BSD license (3 clause).

  • -
  • Authors

  • +
  • Authors

  • Contributing

  • {% endif %} @@ -232,7 +232,7 @@

    Development

    Giving credit

    {% endblock %} From ed66d141d051b40c48cf30acf4047bdfc89980c2 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 4 Feb 2016 15:08:39 +0100 Subject: [PATCH 0117/1925] applying comments --- CONTRIBUTING.rst | 3 +-- doc/authors.rst | 2 -- doc/contributing.rst | 2 -- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 358aa9ff42..5d6c4220a6 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -9,8 +9,7 @@ contribute. The project is hosted on https://github.com/nilearn/nilearn -Nilearn is somewhat selective when it comes to adding new features, and the -best way to contribute and to help the project is to start working on known +The best way to contribute and to help the project is to start working on known issues. See `Easy issues `_ to get started. diff --git a/doc/authors.rst b/doc/authors.rst index 7e7db43b92..e122f914a8 100644 --- a/doc/authors.rst +++ b/doc/authors.rst @@ -1,3 +1 @@ -.. -*- mode: rst -*- - .. include:: ../AUTHORS.rst diff --git a/doc/contributing.rst b/doc/contributing.rst index 9e8a9f2e85..e582053ea0 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -1,3 +1 @@ -.. -*- mode: rst -*- - .. include:: ../CONTRIBUTING.rst From d486939920964659d6854e7fc102b3874826c4c8 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Fri, 5 Feb 2016 11:14:53 +0100 Subject: [PATCH 0118/1925] Fix PythonXY link --- doc/install_doc_component.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/install_doc_component.html b/doc/install_doc_component.html index 402003f810..ebf4127311 100644 --- a/doc/install_doc_component.html +++ b/doc/install_doc_component.html @@ -49,7 +49,7 @@ . Since it meets all the requirements of nilearn, it will save you time and trouble. You could also check - href="http://code.google.com/p/pythonxy/" target="_blank">PythonXY + href="http://python-xy.github.io/" target="_blank">PythonXY

    Nilearn requires a Python installation and the following dependencies: ipython, scikit-learn, matplotlib and nibabel

    From 9747b5cbc2f753c4dd848fe61bf9ef3ed59e8539 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Fri, 5 Feb 2016 11:25:28 +0100 Subject: [PATCH 0119/1925] DOC fix README.rst redundant text in contributing section --- README.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.rst b/README.rst index d9df78c564..71a4df0644 100644 --- a/README.rst +++ b/README.rst @@ -66,7 +66,5 @@ http://nilearn.github.io/introduction.html#installation. Development =========== -The project is hosted on https://github.com/nilearn/nilearn - -More detailed instruction on how to contribute are available at +Detailed instructions on how to contribute are available at http://nilearn.github.io/contributing.html From e5fef38140a688b374b383db6296c12ea945786e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Fri, 5 Feb 2016 11:59:49 +0100 Subject: [PATCH 0120/1925] MAINT temporary Work-around missing libgfortran dependency for latest scipy version. --- continuous_integration/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index d84d7f071f..3914c51dfc 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -33,7 +33,7 @@ print_conda_requirements() { # if yes which version to install. For example: # - for numpy, NUMPY_VERSION is used # - for scikit-learn, SCIKIT_LEARN_VERSION is used - TO_INSTALL_ALWAYS="pip nose" + TO_INSTALL_ALWAYS="pip nose libgfortran" REQUIREMENTS="$TO_INSTALL_ALWAYS" TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn flake8" for PACKAGE in $TO_INSTALL_MAYBE; do From 64332cc68f34dc1b3d1e077f6859b5cce72ce590 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Fri, 5 Feb 2016 14:07:44 +0100 Subject: [PATCH 0121/1925] ENH: plotting: better dimming on white bg --- nilearn/plotting/img_plotting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/plotting/img_plotting.py b/nilearn/plotting/img_plotting.py index c4a6bca2dc..c4bb52107d 100644 --- a/nilearn/plotting/img_plotting.py +++ b/nilearn/plotting/img_plotting.py @@ -380,7 +380,7 @@ def _load_anat(anat_img=MNI152TEMPLATE, dim=False, black_bg='auto'): else: if not isinstance(dim, numbers.Number): dim = .6 - vmin = vmean - (1 + dim) * ptp + vmin = .5 * (2 - dim) * vmean - (1 + dim) * ptp return anat_img, black_bg, vmin, vmax From 71ca155d90cad8aa13ace66804cfdf3ee2abcf85 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Fri, 5 Feb 2016 14:33:25 +0100 Subject: [PATCH 0122/1925] MAINT: make install shouldn't update the website --- Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Makefile b/Makefile index 97cebc5699..1700ed14d8 100644 --- a/Makefile +++ b/Makefile @@ -66,5 +66,3 @@ doc: pdf: make -C doc pdf -install: - cd doc && make install From 34fd230b3b28626a9b9571aef730ae21bf5c52a1 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 9 Feb 2016 10:12:43 +0100 Subject: [PATCH 0123/1925] ENH [Plotting] Accepting User defined levels in contour fillings --- nilearn/plotting/displays.py | 5 +++-- nilearn/plotting/tests/test_displays.py | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/nilearn/plotting/displays.py b/nilearn/plotting/displays.py index 413a501208..039ed07f3d 100644 --- a/nilearn/plotting/displays.py +++ b/nilearn/plotting/displays.py @@ -539,8 +539,9 @@ def add_contours(self, img, filled=False, **kwargs): if filled: colors = kwargs['colors'] levels = kwargs['levels'] - # contour fillings levels should be given as (lower, upper). - levels.append(np.inf) + if len(levels) <= 1: + # contour fillings levels should be given as (lower, upper). + levels.append(np.inf) alpha = kwargs['alpha'] self._map_show(img, type='contourf', levels=levels, alpha=alpha, colors=colors[:3]) diff --git a/nilearn/plotting/tests/test_displays.py b/nilearn/plotting/tests/test_displays.py index 2fa209490d..4a3cdb7aba 100644 --- a/nilearn/plotting/tests/test_displays.py +++ b/nilearn/plotting/tests/test_displays.py @@ -38,3 +38,17 @@ def test_demo_ortho_projector(): with tempfile.TemporaryFile() as fp: oprojector.savefig(fp) oprojector.close() + + +def test_contour_fillings_levels_in_add_contours(): + oslicer = OrthoSlicer(cut_coords=(0, 0, 0)) + img = load_mni152_template() + # levels should be atleast 2 + # If single levels are passed then we force upper level to be inf + oslicer.add_contours(img, filled=True, colors='r', + alpha=0.2, levels=[0.]) + + # If two levels are passed, it should be increasing from zero index + # In this case, we simply omit appending inf + oslicer.add_contours(img, filled=True, colors='b', + alpha=0.1, levels=[0., 0.2]) From 729ec21fa8ffbd00f80129841d7869b585a7850f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Tue, 9 Feb 2016 13:51:44 +0100 Subject: [PATCH 0124/1925] Revert "MAINT temporary Work-around missing libgfortran dependency" This reverts commit e5fef38140a688b374b383db6296c12ea945786e. Conda package dependency problem has now been fixed. --- continuous_integration/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index 3914c51dfc..d84d7f071f 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -33,7 +33,7 @@ print_conda_requirements() { # if yes which version to install. For example: # - for numpy, NUMPY_VERSION is used # - for scikit-learn, SCIKIT_LEARN_VERSION is used - TO_INSTALL_ALWAYS="pip nose libgfortran" + TO_INSTALL_ALWAYS="pip nose" REQUIREMENTS="$TO_INSTALL_ALWAYS" TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn flake8" for PACKAGE in $TO_INSTALL_MAYBE; do From fd48c3700ef351a95e16b9209a7782ce956506bc Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 09:46:23 +0100 Subject: [PATCH 0125/1925] FIX wrong link in exception --- nilearn/_utils/exceptions.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/nilearn/_utils/exceptions.py b/nilearn/_utils/exceptions.py index f9bb6fd3af..b9de4ee0cc 100644 --- a/nilearn/_utils/exceptions.py +++ b/nilearn/_utils/exceptions.py @@ -49,17 +49,16 @@ def increment_stack_counter(self): @property def message(self): - message = ( - "Data must be a %iD Niimg-like object but you provided a " - "%s%iD image%s. " - "See http://nilearn.github.io/manipulating_visualizing/" - "manipulating_images.html#niimg." % ( - self.required_dimension + self.stack_counter, - "list of " * self.stack_counter, - self.file_dimension, - "s" * (self.stack_counter != 0) - ) - ) + message = ("Data must be a {0}D Niimg-like object but you provided a " + "{1}{2}D image{3}. " + "See http://nilearn.github.io/manipulating_images/" + "manipulating_images.html#niimg-like-objects." + .format(self.required_dimension + self.stack_counter, + "list of " * self.stack_counter, + self.file_dimension, + "s" * (self.stack_counter != 0) + ) + ) return message def __str__(self): From ac3b131e0955658af2ca01f140fbd36b1376a749 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Fri, 5 Feb 2016 17:53:12 +0100 Subject: [PATCH 0126/1925] testing a simple way to apply a formula to a list of images --- nilearn/image/__init__.py | 6 ++++-- nilearn/image/image.py | 33 +++++++++++++++++++++++++++++++ nilearn/image/tests/test_image.py | 33 ++++++++++++++++++++++++++++++- 3 files changed, 69 insertions(+), 3 deletions(-) diff --git a/nilearn/image/__init__.py b/nilearn/image/__init__.py index 6d7c299338..fb96ac0214 100644 --- a/nilearn/image/__init__.py +++ b/nilearn/image/__init__.py @@ -4,7 +4,8 @@ """ from .resampling import resample_img, reorder_img from .image import high_variance_confounds, smooth_img, crop_img, \ - mean_img, swap_img_hemispheres, index_img, iter_img, threshold_img + mean_img, swap_img_hemispheres, index_img, iter_img, threshold_img, \ + math_img from .image import new_img_like # imported this way to avoid circular imports from .._utils.niimg_conversions import concat_niimgs as concat_imgs from .._utils.niimg import copy_img @@ -12,4 +13,5 @@ __all__ = ['resample_img', 'high_variance_confounds', 'smooth_img', 'crop_img', 'mean_img', 'reorder_img', 'swap_img_hemispheres', 'concat_imgs', 'copy_img', - 'index_img', 'iter_img', 'new_img_like', 'threshold_img'] + 'index_img', 'iter_img', 'new_img_like', 'threshold_img', + 'math_img'] diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 94c0cb439b..5c78daa435 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -665,3 +665,36 @@ def threshold_img(img, threshold, mask_img=None): threshold_img = new_img_like(img, img_data, affine) return threshold_img + + +def math_img(formula, **imgs): + """Interpret a string formula using niimg in named parameters. + + Parameters + ---------- + formula: string + The mathematical formula to apply to image internal data. + + """ + niimg = None + try: + list_imgs = [] + for k, v in imgs.items(): + list_imgs.append(v) + check_niimg(list_imgs) + data = v.get_data().view() + imgs[k] = data + niimg = v + except Exception as e: + raise ValueError("Input images cannot be compared: {0}".format(e)) + + # Add a reference to the input dictionary of eval so that numpy + # functions can be used inside. + imgs['np'] = np + try: + result = eval(formula, imgs) + except Exception as e: + raise ValueError("Input formula couldn't be processed: {0}" + .format(e)) + + return new_img_like(niimg, result, niimg.get_affine()) diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index 37c3ee3d6e..aca4be057d 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -1,15 +1,17 @@ """ Test image pre-processing functions """ -from nose.tools import assert_true, assert_false +from nose.tools import assert_true, assert_false, assert_equal from distutils.version import LooseVersion from nose import SkipTest import platform import os import nibabel +from nibabel import Nifti1Image import numpy as np from numpy.testing import assert_array_equal, assert_allclose +from nilearn._utils.testing import assert_raises_regex from nilearn.image import image from nilearn.image import resampling @@ -18,6 +20,7 @@ from nilearn.image import new_img_like from nilearn.image import threshold_img from nilearn.image import iter_img +from nilearn.image import math_img X64 = (platform.architecture()[0] == '64bit') @@ -447,3 +450,31 @@ def test_isnan_threshold_img_data(): maps_img = nibabel.Nifti1Image(data, np.eye(4)) # test threshold_img to converge properly when input image has nans. threshold_maps = threshold_img(maps_img, threshold=0.8) + + +def test_math_img_exceptions(): + img1 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) + img2 = Nifti1Image(np.zeros((10, 20, 10, 10)), np.eye(4)) + img3 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) + + formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" + assert_raises_regex(ValueError, + "Input images cannot be compared", + math_img, formula, img1=img1, img2=img2) + + bad_formula = "np.toto(img1, axis=-1) - np.mean(img3, axis=-1)" + assert_raises_regex(ValueError, + "Input formula couldn't be processed", + math_img, bad_formula, img1=img1, img3=img3) + + +def test_math_img(): + img1 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) + img2 = Nifti1Image(np.zeros((10, 10, 10, 10)), np.eye(4)) + expected_result = Nifti1Image(np.ones((10, 10)), np.eye(4)) + + formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" + result = math_img(formula, img1=img1, img2=img2) + assert_array_equal(result.get_data(), expected_result.get_data()) + assert_array_equal(result.get_affine(), expected_result.get_affine()) + assert_equal(result.shape, expected_result.shape) From c8fca557aa7da1bf20139c01a696d7de1128d616 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Mon, 8 Feb 2016 10:31:37 +0100 Subject: [PATCH 0127/1925] fixing test --- nilearn/image/tests/test_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index aca4be057d..182081cfa0 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -471,7 +471,7 @@ def test_math_img_exceptions(): def test_math_img(): img1 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) img2 = Nifti1Image(np.zeros((10, 10, 10, 10)), np.eye(4)) - expected_result = Nifti1Image(np.ones((10, 10)), np.eye(4)) + expected_result = Nifti1Image(np.ones((10, 10, 10)), np.eye(4)) formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" result = math_img(formula, img1=img1, img2=img2) From b45f06c5869a5c4ca5191235224f7205fd18f37d Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Mon, 8 Feb 2016 10:54:50 +0100 Subject: [PATCH 0128/1925] fixing flake8 --- nilearn/image/tests/test_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index 182081cfa0..c83c2819af 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -449,7 +449,7 @@ def test_isnan_threshold_img_data(): maps_img = nibabel.Nifti1Image(data, np.eye(4)) # test threshold_img to converge properly when input image has nans. - threshold_maps = threshold_img(maps_img, threshold=0.8) + threshold_img(maps_img, threshold=0.8) def test_math_img_exceptions(): From 92083bf3f5e7fdf5a2be194bab90580d1e7ce993 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Mon, 8 Feb 2016 17:27:11 +0100 Subject: [PATCH 0129/1925] small refactoring of the math_img function --- nilearn/image/image.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 5c78daa435..21bf595752 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -676,23 +676,25 @@ def math_img(formula, **imgs): The mathematical formula to apply to image internal data. """ - niimg = None try: - list_imgs = [] - for k, v in imgs.items(): - list_imgs.append(v) - check_niimg(list_imgs) - data = v.get_data().view() - imgs[k] = data - niimg = v + check_niimg(imgs.values()) except Exception as e: raise ValueError("Input images cannot be compared: {0}".format(e)) + # Computing input data as a dictionary of numpy arrays. Keep a reference + # niimg for building the result as new niimg like. + niimg = None + list_data = {} + for k, v in imgs.items(): + data = v.get_data().view() + list_data[k] = data + niimg = v + # Add a reference to the input dictionary of eval so that numpy # functions can be used inside. - imgs['np'] = np + list_data['np'] = np try: - result = eval(formula, imgs) + result = eval(formula, list_data) except Exception as e: raise ValueError("Input formula couldn't be processed: {0}" .format(e)) From ff58fe41dd8bc2316f4a7750da3af9da6d0f4b9e Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Tue, 9 Feb 2016 16:21:17 +0100 Subject: [PATCH 0130/1925] addressing comments: - renaming variables - improving docstring with an example - use _safe_get_data to retrieve the numpy array a an niimg --- nilearn/image/image.py | 44 +++++++++++++++++++++++++++++++++++------- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 21bf595752..0d0c87f464 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -674,6 +674,36 @@ def math_img(formula, **imgs): ---------- formula: string The mathematical formula to apply to image internal data. + imgs: + Keyword arguments corresponding to the variables in the formula. + + Example + ------- + The same formula can be applied on different lists of nifti images. + + >>> import numpy as np + >>> from nibabel import Nifti1Image + >>> from nilearn.image import math_img + + Let's create 3 sample nifti images. + + >>> rng = np.random.RandomState(0) + >>> img1 = Nifti1Image(np.random.normal(loc=1.0, + size=(10, 10, 10)), np.eye(4)) + >>> img2 = Nifti1Image(np.random.normal(loc=2.0, + size=(10, 10, 10)), np.eye(4)) + >>> img3 = Nifti1Image(np.random.normal(loc=3.0, + size=(10, 10, 10)), np.eye(4)) + + Let's compare the mean image on the last axis between 2 images with + the following formula. + + >>> formula = "np.mean(img2, axis=-1) - np.mean(img2, axis=-1)" + + We can now apply the same formula with different input data: + + >>> math_img(formula, img1=img1, img2=img2) + >>> math_img(formula, img1=img2, img2=img3) """ try: @@ -684,17 +714,17 @@ def math_img(formula, **imgs): # Computing input data as a dictionary of numpy arrays. Keep a reference # niimg for building the result as new niimg like. niimg = None - list_data = {} - for k, v in imgs.items(): - data = v.get_data().view() - list_data[k] = data - niimg = v + data_dict = {} + for key, img in imgs.items(): + data = _safe_get_data(img) + data_dict[key] = data + niimg = img # Add a reference to the input dictionary of eval so that numpy # functions can be used inside. - list_data['np'] = np + data_dict['np'] = np try: - result = eval(formula, list_data) + result = eval(formula, data_dict) except Exception as e: raise ValueError("Input formula couldn't be processed: {0}" .format(e)) From d3ba69bd2a3f08495b7518a9e2d083f36c296187 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Tue, 9 Feb 2016 17:19:07 +0100 Subject: [PATCH 0131/1925] fixing docstring --- nilearn/image/image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 0d0c87f464..9e5bac5157 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -698,7 +698,7 @@ def math_img(formula, **imgs): Let's compare the mean image on the last axis between 2 images with the following formula. - >>> formula = "np.mean(img2, axis=-1) - np.mean(img2, axis=-1)" + >>> formula = "np.mean(img2, axis=-1) - np.mean(img1, axis=-1)" We can now apply the same formula with different input data: From 8650f8faff86ec6227c74d13e1e163eaf0544269 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Tue, 9 Feb 2016 17:35:01 +0100 Subject: [PATCH 0132/1925] fixing docstring 2 --- nilearn/image/image.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 9e5bac5157..8bd535b25b 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -688,11 +688,11 @@ def math_img(formula, **imgs): Let's create 3 sample nifti images. >>> rng = np.random.RandomState(0) - >>> img1 = Nifti1Image(np.random.normal(loc=1.0, + >>> img1 = Nifti1Image(np.random.normal(loc=1.0,\ size=(10, 10, 10)), np.eye(4)) - >>> img2 = Nifti1Image(np.random.normal(loc=2.0, + >>> img2 = Nifti1Image(np.random.normal(loc=2.0,\ size=(10, 10, 10)), np.eye(4)) - >>> img3 = Nifti1Image(np.random.normal(loc=3.0, + >>> img3 = Nifti1Image(np.random.normal(loc=3.0,\ size=(10, 10, 10)), np.eye(4)) Let's compare the mean image on the last axis between 2 images with @@ -702,8 +702,8 @@ def math_img(formula, **imgs): We can now apply the same formula with different input data: - >>> math_img(formula, img1=img1, img2=img2) - >>> math_img(formula, img1=img2, img2=img3) + >>> result_1 = math_img(formula, img1=img1, img2=img2) + >>> result_2 = math_img(formula, img1=img2, img2=img3) """ try: From bd4109875cf98fda4060050b91dca67f12941901 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 10 Feb 2016 13:45:20 +0100 Subject: [PATCH 0133/1925] addressing comments --- nilearn/image/image.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 8bd535b25b..b375c38ecb 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -668,18 +668,27 @@ def threshold_img(img, threshold, mask_img=None): def math_img(formula, **imgs): - """Interpret a string formula using niimg in named parameters. + """Interpret a numpy based string formula using niimg in named parameters. Parameters ---------- formula: string The mathematical formula to apply to image internal data. imgs: - Keyword arguments corresponding to the variables in the formula. + Keyword arguments corresponding to the variables in the formula as + Nifti images. All input images should have the same geometry (shape, + affine). + + Returns + ------- + Nifti1Image + Result of the formula as a Nifti image. Note the dimension of the + result image can be smaller than the input image. The affine is the + same as the input the image. Example ------- - The same formula can be applied on different lists of nifti images. + The same formula can be applied on different lists of Nifti images. >>> import numpy as np >>> from nibabel import Nifti1Image From 1c08e6a70c5114d1b2a771d2ef88d6e1f03c82b2 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 13:33:31 +0100 Subject: [PATCH 0134/1925] improving math_img and test --- nilearn/image/image.py | 33 +++++++++++++++++++------------ nilearn/image/tests/test_image.py | 22 ++++++++++++++++----- 2 files changed, 37 insertions(+), 18 deletions(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index b375c38ecb..db141a4fe1 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -674,7 +674,7 @@ def math_img(formula, **imgs): ---------- formula: string The mathematical formula to apply to image internal data. - imgs: + imgs: images Keyword arguments corresponding to the variables in the formula as Nifti images. All input images should have the same geometry (shape, affine). @@ -716,26 +716,33 @@ def math_img(formula, **imgs): """ try: - check_niimg(imgs.values()) - except Exception as e: - raise ValueError("Input images cannot be compared: {0}".format(e)) + # Check that input images are valid niimg and have a compatible shape + # and affine. + niimgs = [] + for image in imgs.values(): + niimgs.append(check_niimg(image)) + _check_same_fov(*niimgs, raise_error=True) + except Exception as exc: + exc.args = (("Input images cannot be compared, you provided '{0}'," + .format(imgs.values()),) + exc.args) + raise # Computing input data as a dictionary of numpy arrays. Keep a reference - # niimg for building the result as new niimg like. + # niimg for building the result as a new niimg. niimg = None data_dict = {} for key, img in imgs.items(): - data = _safe_get_data(img) - data_dict[key] = data - niimg = img + niimg = check_niimg(img) + data_dict[key] = _safe_get_data(niimg) - # Add a reference to the input dictionary of eval so that numpy - # functions can be used inside. + # Add a reference to numpy in the kwargs of eval so that numpy functions + # can be called from there. data_dict['np'] = np try: result = eval(formula, data_dict) - except Exception as e: - raise ValueError("Input formula couldn't be processed: {0}" - .format(e)) + except Exception as exc: + exc.args = (("Input formula couldn't be processed, you provided '{0}'," + .format(formula),) + exc.args) + raise return new_img_like(niimg, result, niimg.get_affine()) diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index c83c2819af..e101396871 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -456,14 +456,21 @@ def test_math_img_exceptions(): img1 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) img2 = Nifti1Image(np.zeros((10, 20, 10, 10)), np.eye(4)) img3 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) + img4 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4) * 2) formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" + # Images with different shapes should raise a ValueError exception. assert_raises_regex(ValueError, "Input images cannot be compared", math_img, formula, img1=img1, img2=img2) - bad_formula = "np.toto(img1, axis=-1) - np.mean(img3, axis=-1)" + # Images with different affines should raise a ValueError exception. assert_raises_regex(ValueError, + "Input images cannot be compared", + math_img, formula, img1=img1, img2=img4) + + bad_formula = "np.toto(img1, axis=-1) - np.mean(img3, axis=-1)" + assert_raises_regex(AttributeError, "Input formula couldn't be processed", math_img, bad_formula, img1=img1, img3=img3) @@ -474,7 +481,12 @@ def test_math_img(): expected_result = Nifti1Image(np.ones((10, 10, 10)), np.eye(4)) formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" - result = math_img(formula, img1=img1, img2=img2) - assert_array_equal(result.get_data(), expected_result.get_data()) - assert_array_equal(result.get_affine(), expected_result.get_affine()) - assert_equal(result.shape, expected_result.shape) + for create_files in (True, False): + with testing.write_tmp_imgs(img1, img2, + create_files=create_files) as imgs: + result = math_img(formula, img1=imgs[0], img2=imgs[1]) + assert_array_equal(result.get_data(), + expected_result.get_data()) + assert_array_equal(result.get_affine(), + expected_result.get_affine()) + assert_equal(result.shape, expected_result.shape) From c6f574ad7ed50218df6b9e341a9befa9492af3a9 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 13:33:51 +0100 Subject: [PATCH 0135/1925] adding 2 examples using math_img function --- .../plot_compare_mean_image.py | 24 +++++++++++++++++++ .../plot_multiply_image.py | 21 ++++++++++++++++ nilearn/image/image.py | 2 +- 3 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 examples/04_manipulating_images/plot_compare_mean_image.py create mode 100644 examples/04_manipulating_images/plot_multiply_image.py diff --git a/examples/04_manipulating_images/plot_compare_mean_image.py b/examples/04_manipulating_images/plot_compare_mean_image.py new file mode 100644 index 0000000000..be7f8f9740 --- /dev/null +++ b/examples/04_manipulating_images/plot_compare_mean_image.py @@ -0,0 +1,24 @@ +""" +Comparing the mean of 2 images +============================== + +Here we compare the means of 2 images. +""" + +from nilearn import datasets, plotting, image + +dataset = datasets.fetch_adhd(n_subjects=2) + +# Print basic information on the adhd subjects resting state datasets. +print('Subject 1 resting state dataset at: %s' % dataset.func[0]) +print('Subject 2 resting state dataset at: %s' % dataset.func[1]) + +# Comparing the means of the 2 resting state datasets. +formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" + +result_img = image.math_img(formula, + img1=dataset.func[0], + img2=dataset.func[1]) + +plotting.plot_epi(result_img, title="Comparing means of 2 resting 4D images.") +plotting.show() diff --git a/examples/04_manipulating_images/plot_multiply_image.py b/examples/04_manipulating_images/plot_multiply_image.py new file mode 100644 index 0000000000..1a52674c6c --- /dev/null +++ b/examples/04_manipulating_images/plot_multiply_image.py @@ -0,0 +1,21 @@ +""" +Multiply image +============== + +Here we multiply the voxel values of an image by -1. +""" + +from nilearn import datasets, plotting, image + +data = datasets.fetch_atlas_aal() + +# Print basic information on the AAL regions. +print('AAL regions nifti image (3D) is located at: %s' % data.regions) + +# Multiply voxel values by -1. +formula = "np.dot(img, -1)" + +result_img = image.math_img(formula, img=data.regions) + +plotting.plot_epi(result_img, title="AAL regions multiplied by -1.") +plotting.show() diff --git a/nilearn/image/image.py b/nilearn/image/image.py index db141a4fe1..4de31c0cb9 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -682,7 +682,7 @@ def math_img(formula, **imgs): Returns ------- Nifti1Image - Result of the formula as a Nifti image. Note the dimension of the + Result of the formula as a Nifti image. Note that the dimension of the result image can be smaller than the input image. The affine is the same as the input the image. From a3a46d44d99b72869b0ff1c56895fd6f94b6eac2 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 13:49:43 +0100 Subject: [PATCH 0136/1925] COSMIT: use short version of #niimg in url --- nilearn/_utils/exceptions.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/nilearn/_utils/exceptions.py b/nilearn/_utils/exceptions.py index b9de4ee0cc..1aaa62ffe2 100644 --- a/nilearn/_utils/exceptions.py +++ b/nilearn/_utils/exceptions.py @@ -49,17 +49,16 @@ def increment_stack_counter(self): @property def message(self): - message = ("Data must be a {0}D Niimg-like object but you provided a " - "{1}{2}D image{3}. " - "See http://nilearn.github.io/manipulating_images/" - "manipulating_images.html#niimg-like-objects." - .format(self.required_dimension + self.stack_counter, - "list of " * self.stack_counter, - self.file_dimension, - "s" * (self.stack_counter != 0) - ) - ) - return message + return ("Data must be a {0}D Niimg-like object but you provided a " + "{1}{2}D image{3}. " + "See http://nilearn.github.io/manipulating_images/" + "manipulating_images.html#niimg." + .format(self.required_dimension + self.stack_counter, + "list of " * self.stack_counter, + self.file_dimension, + "s" * (self.stack_counter != 0) + ) + ) def __str__(self): return self.message From bef9cdcb7f8f6f9b68267ad91146c86683515a6f Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 17:22:07 +0100 Subject: [PATCH 0137/1925] addressing comments --- .../plot_compare_mean_image.py | 17 +++++----- .../plot_multiply_image.py | 21 ------------- .../plot_negate_image.py | 24 ++++++++++++++ nilearn/image/image.py | 31 +++++-------------- 4 files changed, 42 insertions(+), 51 deletions(-) delete mode 100644 examples/04_manipulating_images/plot_multiply_image.py create mode 100644 examples/04_manipulating_images/plot_negate_image.py diff --git a/examples/04_manipulating_images/plot_compare_mean_image.py b/examples/04_manipulating_images/plot_compare_mean_image.py index be7f8f9740..94947fb483 100644 --- a/examples/04_manipulating_images/plot_compare_mean_image.py +++ b/examples/04_manipulating_images/plot_compare_mean_image.py @@ -1,24 +1,27 @@ """ -Comparing the mean of 2 images -============================== +Comparing the means of 2 images +=============================== -Here we compare the means of 2 images. +Here we compare the means of 2 resting state 4D images. """ from nilearn import datasets, plotting, image +################################################################################ +# Fetching 2 subject resting state functionnal MRI from datasets. dataset = datasets.fetch_adhd(n_subjects=2) +################################################################################ # Print basic information on the adhd subjects resting state datasets. print('Subject 1 resting state dataset at: %s' % dataset.func[0]) print('Subject 2 resting state dataset at: %s' % dataset.func[1]) +################################################################################ # Comparing the means of the 2 resting state datasets. -formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" - -result_img = image.math_img(formula, +result_img = image.math_img("np.mean(img1, axis=-1) - np.mean(img2, axis=-1)", img1=dataset.func[0], img2=dataset.func[1]) -plotting.plot_epi(result_img, title="Comparing means of 2 resting 4D images.") +plotting.plot_epi(result_img, + title="Comparing means of 2 resting state 4D images.") plotting.show() diff --git a/examples/04_manipulating_images/plot_multiply_image.py b/examples/04_manipulating_images/plot_multiply_image.py deleted file mode 100644 index 1a52674c6c..0000000000 --- a/examples/04_manipulating_images/plot_multiply_image.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Multiply image -============== - -Here we multiply the voxel values of an image by -1. -""" - -from nilearn import datasets, plotting, image - -data = datasets.fetch_atlas_aal() - -# Print basic information on the AAL regions. -print('AAL regions nifti image (3D) is located at: %s' % data.regions) - -# Multiply voxel values by -1. -formula = "np.dot(img, -1)" - -result_img = image.math_img(formula, img=data.regions) - -plotting.plot_epi(result_img, title="AAL regions multiplied by -1.") -plotting.show() diff --git a/examples/04_manipulating_images/plot_negate_image.py b/examples/04_manipulating_images/plot_negate_image.py new file mode 100644 index 0000000000..994c804c43 --- /dev/null +++ b/examples/04_manipulating_images/plot_negate_image.py @@ -0,0 +1,24 @@ +""" +Negate image +============ + +Here we compute a negative image by multiplying it's voxel values with -1. +""" + +from nilearn import datasets, plotting, image + +################################################################################ +# Fetching AAL atlas regions by loading from datasets. +data = datasets.fetch_atlas_aal() + +################################################################################ +# Print basic information on the AAL regions. +print('AAL regions nifti image (3D) is located at: %s' % data.regions) + +################################################################################ +# Multiply voxel values by -1. +result_img = image.math_img("-img", img=data.regions) + +plotting.plot_roi(data.regions, cmap='Blues', title="AAL regions") +plotting.plot_roi(result_img, cmap='Blues', title="Negative of AAL regions") +plotting.show() diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 4de31c0cb9..ff7b4c221d 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -673,7 +673,8 @@ def math_img(formula, **imgs): Parameters ---------- formula: string - The mathematical formula to apply to image internal data. + The mathematical formula to apply to image internal data. It can use + numpy imported as 'np'. imgs: images Keyword arguments corresponding to the variables in the formula as Nifti images. All input images should have the same geometry (shape, @@ -688,31 +689,15 @@ def math_img(formula, **imgs): Example ------- - The same formula can be applied on different lists of Nifti images. + Let's load an image using nilearn datasets module: - >>> import numpy as np - >>> from nibabel import Nifti1Image - >>> from nilearn.image import math_img - - Let's create 3 sample nifti images. - - >>> rng = np.random.RandomState(0) - >>> img1 = Nifti1Image(np.random.normal(loc=1.0,\ - size=(10, 10, 10)), np.eye(4)) - >>> img2 = Nifti1Image(np.random.normal(loc=2.0,\ - size=(10, 10, 10)), np.eye(4)) - >>> img3 = Nifti1Image(np.random.normal(loc=3.0,\ - size=(10, 10, 10)), np.eye(4)) + >>> from nilearn import datasets + >>> anatomical_image = datasets.load_mni152_template() - Let's compare the mean image on the last axis between 2 images with - the following formula. + Now we can use any numpy function on this image: - >>> formula = "np.mean(img2, axis=-1) - np.mean(img1, axis=-1)" - - We can now apply the same formula with different input data: - - >>> result_1 = math_img(formula, img1=img1, img2=img2) - >>> result_2 = math_img(formula, img1=img2, img2=img3) + >>> from nilearn.image import math_img + >>> log_img = math_img("np.log(img)", img=anatomical_image) """ try: From ae090db1c14fef89a954a0b6671f216ac2b42db8 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 17:49:06 +0100 Subject: [PATCH 0138/1925] improving doctring and referencing math_img from documentation --- doc/manipulating_images/manipulating_images.rst | 2 ++ doc/modules/reference.rst | 1 + nilearn/image/image.py | 4 ++++ 3 files changed, 7 insertions(+) diff --git a/doc/manipulating_images/manipulating_images.rst b/doc/manipulating_images/manipulating_images.rst index 8cb62f0027..95386db5a2 100644 --- a/doc/manipulating_images/manipulating_images.rst +++ b/doc/manipulating_images/manipulating_images.rst @@ -275,6 +275,8 @@ set up your own data preparation procedure: see the effect of affine transforms on data and bounding boxes. * Computing the mean of images (along the time/4th dimension): :func:`nilearn.image.mean_img` +* Applying numpy functions on an image or a list of images: + :func:`nilearn.image.math_img` * Swapping voxels of both hemisphere (e.g., useful to homogenize masks inter-hemispherically): :func:`nilearn.image.swap_img_hemispheres` diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index 1ab02544c4..8b3fae27d6 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -146,6 +146,7 @@ uses. iter_img high_variance_confounds mean_img + math_img new_img_like resample_img reorder_img diff --git a/nilearn/image/image.py b/nilearn/image/image.py index ff7b4c221d..c0d6323f93 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -699,6 +699,10 @@ def math_img(formula, **imgs): >>> from nilearn.image import math_img >>> log_img = math_img("np.log(img)", img=anatomical_image) + We can also apply mathematical operations on a list of images: + + >> result_img = math_img("img1 + img2", img1=img, img2=log_img) + """ try: # Check that input images are valid niimg and have a compatible shape From bbc47fc85e9bc8166110670c50c3791ade8698c6 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 17:51:58 +0100 Subject: [PATCH 0139/1925] improving negate example --- .../plot_negate_image.py | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/examples/04_manipulating_images/plot_negate_image.py b/examples/04_manipulating_images/plot_negate_image.py index 994c804c43..8ac974eaf9 100644 --- a/examples/04_manipulating_images/plot_negate_image.py +++ b/examples/04_manipulating_images/plot_negate_image.py @@ -8,17 +8,27 @@ from nilearn import datasets, plotting, image ################################################################################ -# Fetching AAL atlas regions by loading from datasets. -data = datasets.fetch_atlas_aal() - -################################################################################ -# Print basic information on the AAL regions. -print('AAL regions nifti image (3D) is located at: %s' % data.regions) +# # Retrieve the data: the localizer dataset with contrast maps. +localizer_dataset = datasets.fetch_localizer_contrasts( + ["left vs right button press"], + n_subjects=2, + get_anats=True, + get_tmaps=True) +localizer_anat_filename = localizer_dataset.anats[1] +localizer_tmap_filename = localizer_dataset.tmaps[1] ################################################################################ # Multiply voxel values by -1. -result_img = image.math_img("-img", img=data.regions) +negative_stat_img = image.math_img("-img", img=localizer_tmap_filename) -plotting.plot_roi(data.regions, cmap='Blues', title="AAL regions") -plotting.plot_roi(result_img, cmap='Blues', title="Negative of AAL regions") +plotting.plot_stat_map(localizer_tmap_filename, + bg_img=localizer_anat_filename, + cut_coords=(36, -27, 66), + threshold=3, title="dim=-.5", + dim=-.5) +plotting.plot_stat_map(negative_stat_img, + bg_img=localizer_anat_filename, + cut_coords=(36, -27, 66), + threshold=3, title="dim=-.5", + dim=-.5) plotting.show() From 63f2e5f288d87a221a28dac39d715be513086d54 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Thu, 11 Feb 2016 17:52:25 +0100 Subject: [PATCH 0140/1925] DOC: better docstrings for dim argument --- nilearn/plotting/img_plotting.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/nilearn/plotting/img_plotting.py b/nilearn/plotting/img_plotting.py index c4bb52107d..361061e7d8 100644 --- a/nilearn/plotting/img_plotting.py +++ b/nilearn/plotting/img_plotting.py @@ -446,7 +446,10 @@ def plot_anat(anat_img=MNI152TEMPLATE, cut_coords=None, to matplotlib.pyplot.savefig. dim: boolean or float, optional Dimming factor applied to background image. If True, automatic - heuristics are applied. Accepted float values are between -1 and 1. + heuristics are applied. Accepted float values, where at + typical span is -1 to 1 (-1 = increase contrast; 1 = decrease + contrast), but larger values can be used for a more + pronounced effect. cmap: matplotlib colormap, optional The colormap for the anat vmin: float @@ -613,7 +616,9 @@ def plot_roi(roi_img, bg_img=MNI152TEMPLATE, cut_coords=None, magically by analysis of the image. dim: boolean or float, optional Dimming factor applied to background image. If True, automatic - heuristics are applied. Accepted float values are between -1 and 1. + heuristics are applied. Accepted float values, where at + typical span is -1 to 1 (-1 = increase contrast; 1 = decrease + contrast), but larger values can be used for a more vmin: float Lower bound for plotting, passed to matplotlib.pyplot.imshow vmax: float @@ -720,7 +725,9 @@ def plot_prob_atlas(maps_img, anat_img=MNI152TEMPLATE, view_type='auto', savefig. dim: boolean or float, optional Dimming factor applied to background image. If True, automatic - heuristics are applied. Accepted float values are between -1 and 1. + heuristics are applied. Accepted float values, where at + typical span is -1 to 1 (-1 = increase contrast; 1 = decrease + contrast), but larger values can be used for a more cmap: matplotlib colormap, optional The colormap for the atlas maps vmin: float From 0619787a09fb4c6f164a6b18a8579deefc44cbe7 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 17:56:38 +0100 Subject: [PATCH 0141/1925] updating whats_new.rst --- doc/whats_new.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index dac46d46ec..8c6097517a 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,3 +1,14 @@ +0.2.3 +===== + +Changelog +--------- + +New features +............ + - Mathematical formulas based on numpy functions can be applied on an + image or a list of images using :func:`nilearn.image.math_img`. + 0.2.2 ====== From 525efc8fd137ab68e20202f8810e32ee6dbed1f8 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 18:07:20 +0100 Subject: [PATCH 0142/1925] improving t-maps plot titles --- examples/04_manipulating_images/plot_negate_image.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/04_manipulating_images/plot_negate_image.py b/examples/04_manipulating_images/plot_negate_image.py index 8ac974eaf9..82c025b94d 100644 --- a/examples/04_manipulating_images/plot_negate_image.py +++ b/examples/04_manipulating_images/plot_negate_image.py @@ -24,11 +24,11 @@ plotting.plot_stat_map(localizer_tmap_filename, bg_img=localizer_anat_filename, cut_coords=(36, -27, 66), - threshold=3, title="dim=-.5", + threshold=3, title="t-map, dim=-.5", dim=-.5) plotting.plot_stat_map(negative_stat_img, bg_img=localizer_anat_filename, cut_coords=(36, -27, 66), - threshold=3, title="dim=-.5", + threshold=3, title="Inverted t-map, dim=-.5", dim=-.5) plotting.show() From 0d8e84f13af68d765c19d054e27f3af2b27e22e7 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 18:18:41 +0100 Subject: [PATCH 0143/1925] adressing comments* --- doc/whats_new.rst | 2 +- nilearn/image/image.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 8c6097517a..87dd824cb3 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -7,7 +7,7 @@ Changelog New features ............ - Mathematical formulas based on numpy functions can be applied on an - image or a list of images using :func:`nilearn.image.math_img`. + image or a list of images using :func:`nilearn.image.math_img`. 0.2.2 ====== diff --git a/nilearn/image/image.py b/nilearn/image/image.py index c0d6323f93..bed934e145 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -701,7 +701,7 @@ def math_img(formula, **imgs): We can also apply mathematical operations on a list of images: - >> result_img = math_img("img1 + img2", img1=img, img2=log_img) + >>> result_img = math_img("img1 + img2", img1=img, img2=log_img) """ try: From 7bc848f809ca6afe2cd3e0237b970fef3d875f1c Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Thu, 11 Feb 2016 18:22:30 +0100 Subject: [PATCH 0144/1925] small improvements in examples --- .../plot_compare_mean_image.py | 5 ++++- examples/04_manipulating_images/plot_negate_image.py | 12 +++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/examples/04_manipulating_images/plot_compare_mean_image.py b/examples/04_manipulating_images/plot_compare_mean_image.py index 94947fb483..96bd4e12d1 100644 --- a/examples/04_manipulating_images/plot_compare_mean_image.py +++ b/examples/04_manipulating_images/plot_compare_mean_image.py @@ -2,7 +2,10 @@ Comparing the means of 2 images =============================== -Here we compare the means of 2 resting state 4D images. +The goal of this example is to illustrate the use of the function +:func:`math_img` with a list of images as input. +We compare the means of 2 resting state 4D images. The mean of the images +could have been computed with nilearn :func:`mean_img` function. """ from nilearn import datasets, plotting, image diff --git a/examples/04_manipulating_images/plot_negate_image.py b/examples/04_manipulating_images/plot_negate_image.py index 82c025b94d..5d35ec9528 100644 --- a/examples/04_manipulating_images/plot_negate_image.py +++ b/examples/04_manipulating_images/plot_negate_image.py @@ -1,14 +1,16 @@ """ -Negate image -============ +Negating an image with math_img +=============================== -Here we compute a negative image by multiplying it's voxel values with -1. +The goal of this example is to illustrate the use of the function +:func:`math_img` on T-maps. +We compute a negative image by multiplying its voxel values with -1. """ from nilearn import datasets, plotting, image ################################################################################ -# # Retrieve the data: the localizer dataset with contrast maps. +# Retrieve the data: the localizer dataset with contrast maps. localizer_dataset = datasets.fetch_localizer_contrasts( ["left vs right button press"], n_subjects=2, @@ -29,6 +31,6 @@ plotting.plot_stat_map(negative_stat_img, bg_img=localizer_anat_filename, cut_coords=(36, -27, 66), - threshold=3, title="Inverted t-map, dim=-.5", + threshold=3, title="Negative t-map, dim=-.5", dim=-.5) plotting.show() From fe5d5b69073ac658c2947cec349a70c6a33237d0 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Fri, 12 Feb 2016 13:12:49 +0100 Subject: [PATCH 0145/1925] fixing tests --- nilearn/image/image.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index bed934e145..1a27d75895 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -701,7 +701,8 @@ def math_img(formula, **imgs): We can also apply mathematical operations on a list of images: - >>> result_img = math_img("img1 + img2", img1=img, img2=log_img) + >>> result_img = math_img("img1 + img2", \ + img1=anatomical_image, img2=log_img) """ try: From 1d556d9e634c678f04b377205bb95150d93a4559 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Fri, 12 Feb 2016 17:56:42 +0100 Subject: [PATCH 0146/1925] fixing pep8 and documentation generation --- .../04_manipulating_images/plot_compare_mean_image.py | 10 +++++----- examples/04_manipulating_images/plot_negate_image.py | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/04_manipulating_images/plot_compare_mean_image.py b/examples/04_manipulating_images/plot_compare_mean_image.py index 96bd4e12d1..1a849b39ae 100644 --- a/examples/04_manipulating_images/plot_compare_mean_image.py +++ b/examples/04_manipulating_images/plot_compare_mean_image.py @@ -3,23 +3,23 @@ =============================== The goal of this example is to illustrate the use of the function -:func:`math_img` with a list of images as input. +:func:`nilearn.image.math_img` with a list of images as input. We compare the means of 2 resting state 4D images. The mean of the images -could have been computed with nilearn :func:`mean_img` function. +could have been computed with nilearn :func:`nilearn.image.mean_img` function. """ from nilearn import datasets, plotting, image -################################################################################ +############################################################################### # Fetching 2 subject resting state functionnal MRI from datasets. dataset = datasets.fetch_adhd(n_subjects=2) -################################################################################ +############################################################################### # Print basic information on the adhd subjects resting state datasets. print('Subject 1 resting state dataset at: %s' % dataset.func[0]) print('Subject 2 resting state dataset at: %s' % dataset.func[1]) -################################################################################ +############################################################################### # Comparing the means of the 2 resting state datasets. result_img = image.math_img("np.mean(img1, axis=-1) - np.mean(img2, axis=-1)", img1=dataset.func[0], diff --git a/examples/04_manipulating_images/plot_negate_image.py b/examples/04_manipulating_images/plot_negate_image.py index 5d35ec9528..e2aa8dfc12 100644 --- a/examples/04_manipulating_images/plot_negate_image.py +++ b/examples/04_manipulating_images/plot_negate_image.py @@ -3,13 +3,13 @@ =============================== The goal of this example is to illustrate the use of the function -:func:`math_img` on T-maps. +:func:`nilearn.image.math_img` on T-maps. We compute a negative image by multiplying its voxel values with -1. """ from nilearn import datasets, plotting, image -################################################################################ +############################################################################### # Retrieve the data: the localizer dataset with contrast maps. localizer_dataset = datasets.fetch_localizer_contrasts( ["left vs right button press"], @@ -19,7 +19,7 @@ localizer_anat_filename = localizer_dataset.anats[1] localizer_tmap_filename = localizer_dataset.tmaps[1] -################################################################################ +############################################################################### # Multiply voxel values by -1. negative_stat_img = image.math_img("-img", img=localizer_tmap_filename) From 81f15406191e390066cd62fc5f247464a1fa9ac2 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Fri, 12 Feb 2016 20:05:57 +0100 Subject: [PATCH 0147/1925] fixing docstring --- nilearn/image/image.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 1a27d75895..265192601f 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -672,7 +672,7 @@ def math_img(formula, **imgs): Parameters ---------- - formula: string + formula: str The mathematical formula to apply to image internal data. It can use numpy imported as 'np'. imgs: images @@ -685,7 +685,7 @@ def math_img(formula, **imgs): Nifti1Image Result of the formula as a Nifti image. Note that the dimension of the result image can be smaller than the input image. The affine is the - same as the input the image. + same as the input image. Example ------- From 233d6b3aaa548b6433e11c6ba3f30440860a397b Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Fri, 12 Feb 2016 22:34:48 +0100 Subject: [PATCH 0148/1925] DOC: cosmit in example plot_stat_maps make a more pleasing plot here imports close to where they are use makes examples easier to read for beginners. --- .../04_manipulating_images/plot_compare_mean_image.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/examples/04_manipulating_images/plot_compare_mean_image.py b/examples/04_manipulating_images/plot_compare_mean_image.py index 1a849b39ae..ecdc87d824 100644 --- a/examples/04_manipulating_images/plot_compare_mean_image.py +++ b/examples/04_manipulating_images/plot_compare_mean_image.py @@ -8,23 +8,27 @@ could have been computed with nilearn :func:`nilearn.image.mean_img` function. """ -from nilearn import datasets, plotting, image ############################################################################### # Fetching 2 subject resting state functionnal MRI from datasets. +from nilearn import datasets dataset = datasets.fetch_adhd(n_subjects=2) + ############################################################################### # Print basic information on the adhd subjects resting state datasets. print('Subject 1 resting state dataset at: %s' % dataset.func[0]) print('Subject 2 resting state dataset at: %s' % dataset.func[1]) + ############################################################################### # Comparing the means of the 2 resting state datasets. +from nilearn import plotting, image + result_img = image.math_img("np.mean(img1, axis=-1) - np.mean(img2, axis=-1)", img1=dataset.func[0], img2=dataset.func[1]) -plotting.plot_epi(result_img, - title="Comparing means of 2 resting state 4D images.") +plotting.plot_stat_map(result_img, + title="Comparing means of 2 resting state 4D images.") plotting.show() From bb328641ed8742e5fecc1d08d8ce01525c1e38c0 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Fri, 12 Feb 2016 22:46:23 +0100 Subject: [PATCH 0149/1925] DOC: fix the math_img docstring Fix indentation (rst error), and rename the "Example" section to "Examples" (to have the html page display the examples). --- nilearn/image/image.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 265192601f..a8792f3e68 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -675,34 +675,34 @@ def math_img(formula, **imgs): formula: str The mathematical formula to apply to image internal data. It can use numpy imported as 'np'. - imgs: images + imgs: images (Nifti1Image or file names) Keyword arguments corresponding to the variables in the formula as Nifti images. All input images should have the same geometry (shape, affine). Returns ------- - Nifti1Image + return_img: Nifti1Image Result of the formula as a Nifti image. Note that the dimension of the result image can be smaller than the input image. The affine is the same as the input image. - Example - ------- - Let's load an image using nilearn datasets module: + Examples + -------- + Let's load an image using nilearn datasets module:: - >>> from nilearn import datasets - >>> anatomical_image = datasets.load_mni152_template() + >>> from nilearn import datasets + >>> anatomical_image = datasets.load_mni152_template() - Now we can use any numpy function on this image: + Now we can use any numpy function on this image:: - >>> from nilearn.image import math_img - >>> log_img = math_img("np.log(img)", img=anatomical_image) + >>> from nilearn.image import math_img + >>> log_img = math_img("np.log(img)", img=anatomical_image) - We can also apply mathematical operations on a list of images: + We can also apply mathematical operations on a list of images:: - >>> result_img = math_img("img1 + img2", \ - img1=anatomical_image, img2=log_img) + >>> result_img = math_img("img1 + img2", + ... img1=anatomical_image, img2=log_img) """ try: From e333129c521b2260d06b9167d6d1ef936a5aa9fb Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Fri, 12 Feb 2016 23:42:20 +0100 Subject: [PATCH 0150/1925] DOC: add versionadded It's important so that users reading the docs are not surprised that they don't have this function --- nilearn/image/image.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index a8792f3e68..0e1838e751 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -670,6 +670,8 @@ def threshold_img(img, threshold, mask_img=None): def math_img(formula, **imgs): """Interpret a numpy based string formula using niimg in named parameters. + .. versionadded:: 0.2.3 + Parameters ---------- formula: str From b2431ce513791fc94021397d6f126405ecdb9047 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sun, 14 Feb 2016 15:37:25 +0100 Subject: [PATCH 0151/1925] Datasets: Downloader for COBRE data - Added function, documentation, tests, reference --- doc/modules/reference.rst | 1 + nilearn/datasets/__init__.py | 4 +- nilearn/datasets/description/COBRE_NIAK.rst | 123 ++++++++++++++++++++ nilearn/datasets/func.py | 96 +++++++++++++++ nilearn/datasets/tests/test_func.py | 54 +++++++++ 5 files changed, 276 insertions(+), 2 deletions(-) create mode 100644 nilearn/datasets/description/COBRE_NIAK.rst diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index 8b3fae27d6..f44d587ea1 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -81,6 +81,7 @@ uses. fetch_nyu_rest fetch_oasis_vbm fetch_megatrawls_netmats + fetch_cobre_niak .. _decoding_ref: diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index 1c4fdc2b01..fade5cf0cb 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -7,7 +7,7 @@ fetch_adhd, fetch_miyawaki2008, fetch_localizer_contrasts, fetch_abide_pcp, fetch_localizer_calculation_task, fetch_mixed_gambles, - fetch_megatrawls_netmats) + fetch_megatrawls_netmats, fetch_cobre_niak) from .atlas import (fetch_atlas_craddock_2012, fetch_atlas_destrieux_2009, fetch_atlas_harvard_oxford, fetch_atlas_msdl, fetch_atlas_power_2011, fetch_atlas_smith_2009, @@ -21,4 +21,4 @@ 'fetch_atlas_harvard_oxford', 'fetch_atlas_msdl', 'fetch_atlas_power_2011', 'fetch_atlas_smith_2009', 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', - 'fetch_megatrawls_netmats'] + 'fetch_megatrawls_netmats', 'fetch_cobre_niak'] diff --git a/nilearn/datasets/description/COBRE_NIAK.rst b/nilearn/datasets/description/COBRE_NIAK.rst new file mode 100644 index 0000000000..f145fb9cc3 --- /dev/null +++ b/nilearn/datasets/description/COBRE_NIAK.rst @@ -0,0 +1,123 @@ +COBRE datasets preprocessed using NIAK 0.12.4 version pipeline + + +Content +------- +This work is a derivative from the COBRE sample found in the [International +Neuroimaging Data-sharing Initiative +(INDI)](http://fcon_1000.projects.nitrc.org/indi/retro/cobre.html), originally +released under Creative Commons -- Attribution Non-Commercial. It includes +preprocessed resting-state functional magnetic resonance images for 72 +patients diagnosed with schizophrenia (58 males, age range = 18-65 yrs) and 74 +healthy controls (51 males, age range = 18-65 yrs). The fMRI dataset for each +subject are single nifti files (.nii.gz), featuring 150 EPI blood-oxygenation +level dependent (BOLD) volumes were obtained in 5 mns (TR = 2 s, TE = 29 ms, +FA = 75 degrees, 32 slices, voxel size = 3x3x4 mm3 , matrix size = 64x64, FOV = mm2). + + +The COBRE preprocessed fMRI release more specifically contains the following +files: + :'description': a markdown (text) description of the release. + :'phenotypic': numpy array + contains a comma-separated values, with the sz (1: patient with + schizophrenia, 0: control), age, sex, and FD (frame displacement, + as defined by Power et al. 2012) variables. Each column codes for + one variable, starting with the label, and each line has the label of the + corresponding subject. + :'func': contains list of filenames to functional datasets + fmri_szxxxSUBJECT_session1_run1.nii.gz, a 3D+t nifti volume at 3 mm + isotropic resolution, in the MNI non-linear 2009a symmetric space + (http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009). + Note that the number of time samples may vary, as some samples have been + removed if tagged with excessive motion. See the _extra.mat for more info. + :'mat_files': contains list of filenames to .mat files + fmri_szxxxSUBJECT_session1_run1_extra.mat, a matlab/octave file for each + subject. + + Each .mat file contains the following variables: + * confounds: a TxK array. Each row corresponds to a time sample, and each + column to one confound that was regressed out from the time series + during preprocessing. + * labels_confounds: cell of strings. Each entry is the label of a + confound that was regressed out from the time series. + * mask_suppressed: a T2x1 vector. T2 is the number of time samples in + the raw time series (before preprocessing), T2=119. Each entry + corresponds to a time sample, and is 1 if the corresponding sample + was removed due to excessive motion (or to wait for magnetic + equilibrium at the beginning of the series). Samples that were kept + are tagged with 0s. + * time_frames: a Tx1 vector. Each entry is the time of acquisition + (in s) of the corresponding volume. + + +Preprocessing +------------- +The datasets were analysed using the NeuroImaging Analysis Kit (NIAK +https://github.com/SIMEXP/niak) version 0.12.14, under CentOS version 6.3 with +Octave(http://gnu.octave.org) version 3.8.1 and the Minc toolkit +(http://www.bic.mni.mcgill.ca/ServicesSoftware/ServicesSoftwareMincToolKit) +version 0.3.18. +Each fMRI dataset was corrected for inter-slice difference in acquisition time +and the parameters of a rigid-body motion were estimated for each time frame. +Rigid-body motion was estimated within as well as between runs, using the +median volume of the first run as a target. The median volume of one selected +fMRI run for each subject was coregistered with a T1 individual scan using +Minctracc (Collins and Evans, 1998), which was itself non-linearly transformed +to the Montreal Neurological Institute (MNI) template (Fonov et al., 2011) +using the CIVET pipeline (Ad-Dabbagh et al., 2006). The MNI symmetric +template was generated from the ICBM152 sample of 152 young adults, after 40 +iterations of non-linear coregistration. The rigid-body +transform, fMRI-to-T1 transform and T1-to-stereotaxic transform were all +combined, and the functional volumes were resampled in the MNI space at a 3 mm +isotropic resolution. The "scrubbing" method of (Power et al., 2012), was used +to remove the volumes with excessive motion (frame displacement greater than +0.5 mm). A minimum number of 60 unscrubbed volumes per run, corresponding to +~180 s of acquisition, was then required for further analysis. For this +reason, 16 controls and 29 schizophrenia patients were rejected from the +subsequent analyses. The following nuisance parameters were regressed out from +the time series at each voxel: slow time drifts (basis of discrete cosines +with a 0.01 Hz high-pass cut-off), average signals in conservative masks of +the white matter and the lateral ventricles as well as the first principal +components (95% energy) of the six rigid-body motion parameters and their +squares (Giove et al., 2009). The fMRI volumes were finally spatially smoothed +with a 6 mm isotropic Gaussian blurring kernel. + + +References +---------- +Ad-Dab'bagh Y, Einarson D, Lyttelton O, Muehlboeck J S, Mok K, Ivanov O, +Vincent R D, Lepage C, Lerch J, Fombonne E, Evans A C, 2006. +The CIVET Image-Processing Environment: A Fully Automated Comprehensive +Pipeline for Anatomical Neuroimaging Research. In: Corbetta M. (Ed.), +Proceedings of the 12th Annual Meeting of the Human Brain Mapping +Organization. Neuroimage, Florence, Italy. + +Bellec P, Rosa-Neto P, Lyttelton O C, Benali H, Evans A C, Jul. 2010. +Multi-level bootstrap analysis of stable clusters in resting-state fMRI. +NeuroImage 51 (3), 1126–1139. +URL http://dx.doi.org/10.1016/j.neuroimage.2010.02.082 + +Collins D L, Evans A C, 1997. Animal: validation and applications of +nonlinear registration-based segmentation. International Journal of Pattern +Recognition and Artificial Intelligence 11, 1271-1294. + +Fonov V, Evans A C, Botteron K, Almli C R, McKinstry R C, Collins D L, +Jan. 2011. Unbiased average age-appropriate atlases for pediatric studies. +NeuroImage 54 (1), 313-327. +URL http://dx.doi.org/10.1016/j.neuroimage.2010.07.033 + +Giove F, Gili T, Iacovella V, Macaluso E, Maraviglia B, Oct. 2009. +Images-based suppression of unwanted global signals in resting-state +functional connectivity studies. Magnetic resonance imaging 27 (8), 1058-1064. +URL http://dx.doi.org/10.1016/j.mri.2009.06.004 + +Power J D, Barnes K A, Snyder A Z, Schlaggar B L, Petersen S E, Feb. 2012. +Spurious but systematic correlations in functional connectivity MRI +networks arise from subject motion. NeuroImage 59 (3), 2142-2154. +URL http://dx.doi.org/10.1016/j.neuroimage.2011.10.018 + + +Other derivatives +----------------- +This dataset was used in a publication, see the link below. +https://github.com/SIMEXP/glm_connectome diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index caf1a96e3b..3f12e0ee8f 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -7,6 +7,7 @@ import numpy as np import nibabel from sklearn.datasets.base import Bunch +from sklearn.utils import shuffle from .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr, _read_md5_sum_file, _tree, _filter_columns) @@ -1411,3 +1412,98 @@ def fetch_megatrawls_netmats(dimensionality=100, timeseries='eigen_regression', matrices=matrices, correlation_matrices=correlation_matrices, description=description) + + +def fetch_cobre_niak(n_subjects=10, data_dir=None, url=None, verbose=1): + """Fetch COBRE datasets preprocessed using NIAK 0.12.4 pipeline. + + Downloads and returns preprocessed resting state fMRI datasets and + phenotypic information such as demographic, clinical variables, + measure of frame displacement FD (an average FD for all the time + frames left after censoring). + + For each subject, this function also returns .mat files which contains + all the covariates that have been regressed out of the functional data. + The covariates such as motion parameters, mean CSF signal, etc. It also + contains a list of time frames that have been removed from the time series + by censoring for high motion. + + NOTE: The number of time samples vary, as some samples have been removed + if tagged with excessive motion. This means that data is already time + filtered. See output variable 'decription' for more details. + + .. versionadded 0.2.3 + + Parameters + ---------- + n_subject: int, optional + The number of subjects to load from maximum of 146 subjects. + By default, 10 subjects will be loaded. If n_subjects=None, + all subjects will be loaded. + + data_dir: str, optional + Path to the data directory. Used to force data storage in a + specified location. Default: None + + url: str, optional + Override download url. Used for test only (or if you setup a + mirror of the data). Default: None + + verbose: int, optional + Verbosity level (0 means no message). + + Returns + ------- + data: Bunch + Dictionary-like object, the attributes are: + + - 'func': string list + Paths to Nifti images. + - 'mat_files': string list + Paths to .mat files of each subject. + - 'phenotypic': ndarray + Contains data of clinical variables, sex, age, FD. + - 'description': data description of the release and references. + """ + if url is None: + url = "https://ndownloader.figshare.com/articles/1160600/versions/15" + opts = {'uncompress': True} + + dataset_name = 'COBRE_NIAK' + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + + fdescr = _get_dataset_descr(dataset_name) + # Fetch the phenotypic file and load it + csv_name = 'cobre_model_group.csv' + + csv_file = _fetch_files(data_dir, [(csv_name, url, opts)], verbose=verbose) + # Load file in filename to numpy arrays + names = ['subject_type', 'sz', 'age', 'sex', 'fd'] + csv_array = np.recfromcsv(csv_file[0], names=names, skip_header=True) + # Get the ids of the datasets + ids = csv_array['subject_type'] + max_subjects = len(ids) + if n_subjects is not None: + if n_subjects < max_subjects: + # shuffle datasets to have almost equal balance between sch vs ctrl + ids = shuffle(ids, random_state=0, n_samples=n_subjects) + elif n_subjects > max_subjects: + warnings.warn('Warning: there are only %d subjects' % max_subjects) + n_subjects = max_subjects + else: + n_subjects = max_subjects + + func_filenames = [('fmri_' + i.strip(' "\'') + + '_session1' + '_run1.nii.gz') for i in ids] + mats_filenames = [('fmri_' + i.strip(' "\'') + + '_session1' + '_run1_extra.mat') for i in ids] + + func_files = [(path, url, opts) for path in func_filenames] + mat_files = [(path, url, opts) for path in mats_filenames] + + func_files = _fetch_files(data_dir, func_files, verbose=verbose) + mat_files = _fetch_files(data_dir, mat_files, verbose=verbose) + + return Bunch(func=func_files, mat_files=mat_files, phenotypic=csv_array, + description=fdescr) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index bfdbbeb716..a7590841e2 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -431,3 +431,57 @@ def test_fetch_megatrawls_netmats(): assert_equal(netmats_data.dimensions, 300) assert_equal(netmats_data.timeseries, 'multiple_spatial_regression') assert_equal(netmats_data.matrices, 'full_correlation') + + +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_cobre_niak(): + local_url = "file://" + tst.datadir + ids_sc = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 16, 21, 22, 25, + 28, 29, 32, 34, 37, 39, 40, 41, 42, 44, 46, 47, 49, 59, 60, + 64, 71, 72, 73, 75, 77, 78, 79, 80, 81, 82, 84, 85, 88, 89, + 92, 94, 96, 97, 98, 99, 100, 101, 103, 105, 106, 108, 109, 110, + 112, 117, 122, 126, 132, 133, 137, 142, 143, 145] + ids_con = [13, 14, 17, 18, 19, 20, 23, 24, 26, 27, 30, 31, 33, 35, 36, + 38, 43, 45, 48, 50, 51, 52, 53, 54, 55, 56, 57, 58, 61, 62, + 63, 65, 66, 67, 68, 69, 74, 76, 86, 87, 90, 91, 93, 95, 102, + 104, 107, 111, 113, 114, 115, 116, 118, 119, 120, 121, 123, + 124, 125, 127, 128, 129, 130, 131, 134, 135, 136, 138, 139, + 140, 141, 144, 146, 147] + ids_sch = [('szxxx0040%03d' % i) for i in ids_sc] + ids_cont = ids_cont = [('contxxx0040%03d' % i) for i in ids_con] + subs = np.array(ids_sch + ids_cont, dtype='S17') + subs = subs.view(dtype=[('subject_type', 'S17')]) + tst.mock_fetch_files.add_csv('cobre_model_group.csv', subs) + # All subjects + cobre_data = func.fetch_cobre_niak(n_subjects=None, data_dir=tst.tmpdir, + url=local_url) + + phenotypic_names = ['phenotypic', 'mat_files', 'description', 'func'] + # test length of functional filenames to max 146 + assert_equal(len(cobre_data.func), 146) + # test length of corresponding matlab files of same length to max 146 + assert_equal(len(cobre_data.mat_files), 146) + # test return type variables + assert_equal(cobre_data.keys(), phenotypic_names) + # test functional filenames in a list + assert_true(isinstance(cobre_data.func, list)) + # test matlab files in a list + assert_true(isinstance(cobre_data.mat_files, list)) + + assert_true(isinstance(cobre_data.func[0], _basestring)) + # returned phenotypic data will be an array + assert_true(isinstance(cobre_data.phenotypic, np.recarray)) + # data description should not be empty + assert_not_equal(cobre_data.description, '') + + # Fetch only 30 subjects + data_30_subjects = func.fetch_cobre_niak(n_subjects=30, url=local_url, + data_dir=tst.tmpdir) + assert_equal(len(data_30_subjects.func), 30) + assert_equal(len(data_30_subjects.mat_files), 30) + + # Test more than maximum subjects + test_150_subjects = func.fetch_cobre_niak(n_subjects=150, url=local_url, + data_dir=tst.datadir) + assert_equal(len(test_150_subjects.func), 146) From 6c2075dfa84acc29aaa0b696d7ab25e8fdb0f1d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Mon, 15 Feb 2016 14:56:53 +0100 Subject: [PATCH 0152/1925] COSMIT fixed docstrings --- nilearn/plotting/img_plotting.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/nilearn/plotting/img_plotting.py b/nilearn/plotting/img_plotting.py index 361061e7d8..a01948fea6 100644 --- a/nilearn/plotting/img_plotting.py +++ b/nilearn/plotting/img_plotting.py @@ -446,7 +446,7 @@ def plot_anat(anat_img=MNI152TEMPLATE, cut_coords=None, to matplotlib.pyplot.savefig. dim: boolean or float, optional Dimming factor applied to background image. If True, automatic - heuristics are applied. Accepted float values, where at + heuristics are applied. Accepted float values, where a typical span is -1 to 1 (-1 = increase contrast; 1 = decrease contrast), but larger values can be used for a more pronounced effect. @@ -616,9 +616,10 @@ def plot_roi(roi_img, bg_img=MNI152TEMPLATE, cut_coords=None, magically by analysis of the image. dim: boolean or float, optional Dimming factor applied to background image. If True, automatic - heuristics are applied. Accepted float values, where at + heuristics are applied. Accepted float values, where a typical span is -1 to 1 (-1 = increase contrast; 1 = decrease contrast), but larger values can be used for a more + pronounced effect. vmin: float Lower bound for plotting, passed to matplotlib.pyplot.imshow vmax: float @@ -725,9 +726,10 @@ def plot_prob_atlas(maps_img, anat_img=MNI152TEMPLATE, view_type='auto', savefig. dim: boolean or float, optional Dimming factor applied to background image. If True, automatic - heuristics are applied. Accepted float values, where at + heuristics are applied. Accepted float values, where a typical span is -1 to 1 (-1 = increase contrast; 1 = decrease contrast), but larger values can be used for a more + pronounced effect. cmap: matplotlib colormap, optional The colormap for the atlas maps vmin: float From 57839e26fd4cd1d9245963e45fdc477fac6c6ab1 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 16 Feb 2016 10:36:15 +0100 Subject: [PATCH 0153/1925] Fixing travis py35 failures and name chnaged to fetch_cobre --- doc/modules/reference.rst | 2 +- doc/whats_new.rst | 2 ++ nilearn/datasets/__init__.py | 4 ++-- .../description/{COBRE_NIAK.rst => cobre.rst} | 0 nilearn/datasets/func.py | 12 ++++++------ nilearn/datasets/tests/test_func.py | 18 +++++++++--------- 6 files changed, 20 insertions(+), 18 deletions(-) rename nilearn/datasets/description/{COBRE_NIAK.rst => cobre.rst} (100%) diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index f44d587ea1..d09739dbe5 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -81,7 +81,7 @@ uses. fetch_nyu_rest fetch_oasis_vbm fetch_megatrawls_netmats - fetch_cobre_niak + fetch_cobre .. _decoding_ref: diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 87dd824cb3..5fd94ee5f7 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -8,6 +8,8 @@ New features ............ - Mathematical formulas based on numpy functions can be applied on an image or a list of images using :func:`nilearn.image.math_img`. + - Downloader for COBRE datasets of 146 rest fMRI subjects with + :func:`nilearn.datasets.fetch_cobre` 0.2.2 ====== diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index fade5cf0cb..d107d0e639 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -7,7 +7,7 @@ fetch_adhd, fetch_miyawaki2008, fetch_localizer_contrasts, fetch_abide_pcp, fetch_localizer_calculation_task, fetch_mixed_gambles, - fetch_megatrawls_netmats, fetch_cobre_niak) + fetch_megatrawls_netmats, fetch_cobre) from .atlas import (fetch_atlas_craddock_2012, fetch_atlas_destrieux_2009, fetch_atlas_harvard_oxford, fetch_atlas_msdl, fetch_atlas_power_2011, fetch_atlas_smith_2009, @@ -21,4 +21,4 @@ 'fetch_atlas_harvard_oxford', 'fetch_atlas_msdl', 'fetch_atlas_power_2011', 'fetch_atlas_smith_2009', 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', - 'fetch_megatrawls_netmats', 'fetch_cobre_niak'] + 'fetch_megatrawls_netmats', 'fetch_cobre'] diff --git a/nilearn/datasets/description/COBRE_NIAK.rst b/nilearn/datasets/description/cobre.rst similarity index 100% rename from nilearn/datasets/description/COBRE_NIAK.rst rename to nilearn/datasets/description/cobre.rst diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 3f12e0ee8f..9dfe8af7f0 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1414,7 +1414,7 @@ def fetch_megatrawls_netmats(dimensionality=100, timeseries='eigen_regression', description=description) -def fetch_cobre_niak(n_subjects=10, data_dir=None, url=None, verbose=1): +def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): """Fetch COBRE datasets preprocessed using NIAK 0.12.4 pipeline. Downloads and returns preprocessed resting state fMRI datasets and @@ -1430,13 +1430,13 @@ def fetch_cobre_niak(n_subjects=10, data_dir=None, url=None, verbose=1): NOTE: The number of time samples vary, as some samples have been removed if tagged with excessive motion. This means that data is already time - filtered. See output variable 'decription' for more details. + filtered. See output variable 'description' for more details. .. versionadded 0.2.3 Parameters ---------- - n_subject: int, optional + n_subjects: int, optional The number of subjects to load from maximum of 146 subjects. By default, 10 subjects will be loaded. If n_subjects=None, all subjects will be loaded. @@ -1469,7 +1469,7 @@ def fetch_cobre_niak(n_subjects=10, data_dir=None, url=None, verbose=1): url = "https://ndownloader.figshare.com/articles/1160600/versions/15" opts = {'uncompress': True} - dataset_name = 'COBRE_NIAK' + dataset_name = 'cobre' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) @@ -1494,9 +1494,9 @@ def fetch_cobre_niak(n_subjects=10, data_dir=None, url=None, verbose=1): else: n_subjects = max_subjects - func_filenames = [('fmri_' + i.strip(' "\'') + + func_filenames = [('fmri_' + i.decode().strip(' "\'') + '_session1' + '_run1.nii.gz') for i in ids] - mats_filenames = [('fmri_' + i.strip(' "\'') + + mats_filenames = [('fmri_' + i.decode().strip(' "\'') + '_session1' + '_run1_extra.mat') for i in ids] func_files = [(path, url, opts) for path in func_filenames] diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index a7590841e2..e711b2572e 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -435,7 +435,7 @@ def test_fetch_megatrawls_netmats(): @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) -def test_fetch_cobre_niak(): +def test_fetch_cobre(): local_url = "file://" + tst.datadir ids_sc = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 16, 21, 22, 25, 28, 29, 32, 34, 37, 39, 40, 41, 42, 44, 46, 47, 49, 59, 60, @@ -448,14 +448,14 @@ def test_fetch_cobre_niak(): 104, 107, 111, 113, 114, 115, 116, 118, 119, 120, 121, 123, 124, 125, 127, 128, 129, 130, 131, 134, 135, 136, 138, 139, 140, 141, 144, 146, 147] - ids_sch = [('szxxx0040%03d' % i) for i in ids_sc] - ids_cont = ids_cont = [('contxxx0040%03d' % i) for i in ids_con] + ids_sch = [('szxxx0040%03d' % i).encode() for i in ids_sc] + ids_cont = ids_cont = [('contxxx0040%03d' % i).encode() for i in ids_con] subs = np.array(ids_sch + ids_cont, dtype='S17') subs = subs.view(dtype=[('subject_type', 'S17')]) tst.mock_fetch_files.add_csv('cobre_model_group.csv', subs) # All subjects - cobre_data = func.fetch_cobre_niak(n_subjects=None, data_dir=tst.tmpdir, - url=local_url) + cobre_data = func.fetch_cobre(n_subjects=None, data_dir=tst.tmpdir, + url=local_url) phenotypic_names = ['phenotypic', 'mat_files', 'description', 'func'] # test length of functional filenames to max 146 @@ -476,12 +476,12 @@ def test_fetch_cobre_niak(): assert_not_equal(cobre_data.description, '') # Fetch only 30 subjects - data_30_subjects = func.fetch_cobre_niak(n_subjects=30, url=local_url, - data_dir=tst.tmpdir) + data_30_subjects = func.fetch_cobre(n_subjects=30, url=local_url, + data_dir=tst.tmpdir) assert_equal(len(data_30_subjects.func), 30) assert_equal(len(data_30_subjects.mat_files), 30) # Test more than maximum subjects - test_150_subjects = func.fetch_cobre_niak(n_subjects=150, url=local_url, - data_dir=tst.datadir) + test_150_subjects = func.fetch_cobre(n_subjects=150, url=local_url, + data_dir=tst.datadir) assert_equal(len(test_150_subjects.func), 146) From 4bede95e7881470c59a0abfc76a7c885bdbcdfb7 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 16 Feb 2016 10:58:32 +0100 Subject: [PATCH 0154/1925] Fixing python 35 compatibility failures --- nilearn/datasets/tests/test_func.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index e711b2572e..5615777eb9 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -457,13 +457,13 @@ def test_fetch_cobre(): cobre_data = func.fetch_cobre(n_subjects=None, data_dir=tst.tmpdir, url=local_url) - phenotypic_names = ['phenotypic', 'mat_files', 'description', 'func'] + phenotypic_names = ['description', 'func', 'mat_files', 'phenotypic'] # test length of functional filenames to max 146 assert_equal(len(cobre_data.func), 146) # test length of corresponding matlab files of same length to max 146 assert_equal(len(cobre_data.mat_files), 146) # test return type variables - assert_equal(cobre_data.keys(), phenotypic_names) + assert_equal(sorted(cobre_data), phenotypic_names) # test functional filenames in a list assert_true(isinstance(cobre_data.func, list)) # test matlab files in a list From a9f9c5e31b8e55098e980f2e3fbfd80a43fabfc1 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 16 Feb 2016 22:57:37 +0100 Subject: [PATCH 0155/1925] Datasets structure link and comments --- nilearn/datasets/func.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 9dfe8af7f0..4984b584d4 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -7,7 +7,6 @@ import numpy as np import nibabel from sklearn.datasets.base import Bunch -from sklearn.utils import shuffle from .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr, _read_md5_sum_file, _tree, _filter_columns) @@ -1464,6 +1463,11 @@ def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): - 'phenotypic': ndarray Contains data of clinical variables, sex, age, FD. - 'description': data description of the release and references. + + Notes + ----- + More information about datasets structure, See: + https://figshare.com/articles/COBRE_preprocessed_with_NIAK_0_12_4/1160600 """ if url is None: url = "https://ndownloader.figshare.com/articles/1160600/versions/15" @@ -1484,14 +1488,11 @@ def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): # Get the ids of the datasets ids = csv_array['subject_type'] max_subjects = len(ids) - if n_subjects is not None: - if n_subjects < max_subjects: - # shuffle datasets to have almost equal balance between sch vs ctrl - ids = shuffle(ids, random_state=0, n_samples=n_subjects) - elif n_subjects > max_subjects: - warnings.warn('Warning: there are only %d subjects' % max_subjects) - n_subjects = max_subjects - else: + if n_subjects is None: + n_subjects = max_subjects + + if n_subjects > max_subjects: + warnings.warn('Warning: there are only %d subjects' % max_subjects) n_subjects = max_subjects func_filenames = [('fmri_' + i.decode().strip(' "\'') + @@ -1505,5 +1506,15 @@ def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): func_files = _fetch_files(data_dir, func_files, verbose=verbose) mat_files = _fetch_files(data_dir, mat_files, verbose=verbose) + if n_subjects < max_subjects: + first_split = n_subjects/2 + second_split = n_subjects - first_split + func = func_files[0:71][:first_split] + func.extend(func_files[72:146][:second_split]) + func_files = func + mats = mat_files[0:71][:first_split] + mats.extend(mat_files[72:146][:second_split]) + mat_files = mats + return Bunch(func=func_files, mat_files=mat_files, phenotypic=csv_array, description=fdescr) From cc4dd733a9a7ea24f7004f2f1228abcdf338f5c2 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 16 Feb 2016 23:15:04 +0100 Subject: [PATCH 0156/1925] Fixing python 35 failure --- nilearn/datasets/func.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 4984b584d4..d414667d58 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1507,7 +1507,7 @@ def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): mat_files = _fetch_files(data_dir, mat_files, verbose=verbose) if n_subjects < max_subjects: - first_split = n_subjects/2 + first_split = int(n_subjects/2) second_split = n_subjects - first_split func = func_files[0:71][:first_split] func.extend(func_files[72:146][:second_split]) From 0f789a1a6e9347a25b6933a19603bb42dc2928c1 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 8 Feb 2016 09:47:10 +0100 Subject: [PATCH 0157/1925] [Datasets] Fetcher for Multiscale Brain Parcellations --- doc/modules/reference.rst | 1 + nilearn/datasets/__init__.py | 3 +- nilearn/datasets/atlas.py | 98 +++++++++++++ .../description/urchs_multiscale_2015.rst | 133 ++++++++++++++++++ nilearn/datasets/tests/test_atlas.py | 39 +++++ 5 files changed, 273 insertions(+), 1 deletion(-) create mode 100644 nilearn/datasets/description/urchs_multiscale_2015.rst diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index d09739dbe5..18b1af6e9a 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -70,6 +70,7 @@ uses. fetch_atlas_smith_2009 fetch_atlas_yeo_2011 fetch_atlas_aal + fetch_atlas_basc_multiscale fetch_abide_pcp fetch_adhd fetch_haxby diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index d107d0e639..1d5b0c59a9 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -11,7 +11,8 @@ from .atlas import (fetch_atlas_craddock_2012, fetch_atlas_destrieux_2009, fetch_atlas_harvard_oxford, fetch_atlas_msdl, fetch_atlas_power_2011, fetch_atlas_smith_2009, - fetch_atlas_yeo_2011, fetch_atlas_aal) + fetch_atlas_yeo_2011, fetch_atlas_aal, + fetch_atlas_basc_multiscale) __all__ = ['fetch_icbm152_2009', 'load_mni152_template', 'fetch_oasis_vbm', 'fetch_haxby_simple', 'fetch_haxby', 'fetch_nyu_rest', diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 1874fa1a82..ccf6e5732a 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -618,3 +618,101 @@ def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True, 'labels': labels_dict} return Bunch(**params) + + +def fetch_atlas_basc_multiscale(version='sym', data_dir=None, + resume=True, verbose=1): + """Downloads and load Multiscale functional Brain parcellations + + This atlas includes group brain parcellations generated from + resting-state functional magnetic resonance images for about + 200 young healthy subjects. + + Multiple scales (number of networks) are available, and includes + 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations + have been generated using a method called bootstrap analysis of + stable clusters called as BASC, (Bellec et al., 2010) and the + scales have been selected using a data-driven method called MSTEPS + (Bellec, 2013). + + Note that two versions of the template are available, 'sym' or 'asym'. + The 'asym' type contains brain images that have been registered in the + asymmetric version of the MNI brain template (reflecting that the brain + is asymmetric), while the 'sym' type contains images registered in the + symmetric version of the MNI template. The symmetric template has been + forced to be symmetric anatomically, and is therefore ideally suited to + study homotopic functional connections in fMRI: finding homotopic regions + simply consists of flipping the x-axis of the template. + + Parameters + ---------- + version: str, optional + Available versions are 'sym' or 'asym'. By default all scales of + brain parcellations of version 'sym' will be returned. + + data_dir: str, optional + directory where data should be downloaded and unpacked. + + url: str, optional + url of file to download. + + resume: bool + whether to resumed download of a partly-downloaded file. + + verbose: int + verbosity level (0 means no message). + + Returns + ------- + data: sklearn.datasets.base.Bunch + dictionary-like object, Keys are: + + - "scale007", "scale012", "scale020", "scale036", "scale064", + "scale122", "scale197", "scale325", "scale444": str, path + to Nifti file of various scales of brain parcellations. + + - "description": details about the data release. + + References + ---------- + Bellec P, Rosa-Neto P, Lyttelton OC, Benali H, Evans AC, Jul. 2010. + Multi-level bootstrap analysis of stable clusters in resting-state fMRI. + NeuroImage 51 (3), 1126-1139. + URL http://dx.doi.org/10.1016/j.neuroimage.2010.02.082 + + Bellec P, Jun. 2013. Mining the Hierarchy of Resting-State Brain Networks: + Selection of Representative Clusters in a Multiscale Structure. + Pattern Recognition in Neuroimaging (PRNI), 2013 pp. 54-57. + """ + versions = ['sym', 'asym'] + if version not in versions: + raise ValueError('The version of Brain parcellations requested "%s" ' + 'does not exist. Please choose one among them %s.' % + (version, str(versions))) + + keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', + 'scale122', 'scale197', 'scale325', 'scale444'] + + if version == 'sym': + url = "https://ndownloader.figshare.com/files/1861819" + elif version == 'asym': + url = "https://ndownloader.figshare.com/files/1861820" + opts = {'uncompress': True} + + dataset_name = "urchs_multiscale_2015" + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) + + folder_name = 'template_cambridge_basc_multiscale_nii_' + version + basenames = ['template_cambridge_basc_multiscale_' + version + '_' + key + '.nii.gz' + for key in keys] + + filenames = [(os.path.join(folder_name, basename), url, opts) + for basename in basenames] + data = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose) + + descr = _get_dataset_descr(dataset_name) + + params = dict(zip(keys, data)) + params['description'] = descr + + return Bunch(**params) diff --git a/nilearn/datasets/description/urchs_multiscale_2015.rst b/nilearn/datasets/description/urchs_multiscale_2015.rst new file mode 100644 index 0000000000..84f0b089e7 --- /dev/null +++ b/nilearn/datasets/description/urchs_multiscale_2015.rst @@ -0,0 +1,133 @@ +An atlas of Multiscale Brain Parcellations + + +Content +------- +This work is a derivative from the Cambridge sample found in the [1000 +functional connectome project] +(http://fcon_1000.projects.nitrc.org/fcpClassic/FcpTable.html) (Liu et +al., 2009), originally released under Creative Commons -- Attribution +Non-Commercial. It includes group brain parcellations generated from +resting-state functional magnetic resonance images for about 200 young +healthy subjects. Multiple scales (number of networks) are available, +and includes 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations +have been generated using a method called bootstrap analysis of stable clusters +(BASC, Bellec et al., 2010) and the scales have been selected using a data-driven +method called MSTEPS (Bellec, 2013). + + +This release more specifically contains the following files: + :'description': a markdown (text) description of the release. + :'scale007', 'scale012', 'scale020', 'scale036', 'scale064', + 'scale122', 'scale197', 'scale325', 'scale444' +brain_parcellation_cambridge_basc_multiscale_(sym,asym)_scale(NNN).nii.gz: +a 3D volume .nii format at 3 mm isotropic resolution, in the MNI non-linear +2009a space (http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009). +Region number I is filled with Is (background is filled with 0s). + + +Note that two versions of the template are available, ending with either +nii_sym or nii_asym. The asym flavor contains brain images that have been +registered in the asymmetric version of the MNI brain template (reflecting +that the brain is asymmetric), while with the sym flavor they have been +registered in the symmetric version of the MNI template. The symmetric +template has been forced to be symmetric anatomically, and is therefore +ideally suited to study homotopic functional connections in fMRI: finding +homotopic regions simply consists of flipping the x-axis of the template. + + +Preprocessing +------------- +The datasets were analysed using the NeuroImaging Analysis Kit (NIAK +https://github.com/SIMEXP/niak) version 0.12.14, under CentOS version 6.3 with +Octave (http://gnu.octave.org) version 3.8.1 and the Minc toolkit +(http://www.bic.mni.mcgill.ca/ServicesSoftware/ServicesSoftwareMincToolKit) +version 0.3.18. +Each fMRI dataset was corrected for inter-slice difference in acquisition time +and the parameters of a rigid-body motion were estimated for each time frame. +Rigid-body motion was estimated within as well as between runs, using the +median volume of the first run as a target. The median volume of one selected +fMRI run for each subject was coregistered with a T1 individual scan using +Minctracc (Collins and Evans, 1998), which was itself non-linearly transformed +to the Montreal Neurological Institute (MNI) template (Fonov et al., 2011) +using the CIVET pipeline (Ad-Dabbagh et al., 2006). The MNI symmetric template +was generated from the ICBM152 sample of 152 young adults, after 40 iterations +of non-linear coregistration. The rigid-body transform, fMRI-to-T1 transform +and T1-to-stereotaxic transform were all combined, and the functional volumes +were resampled in the MNI space at a 3 mm isotropic resolution. The +"scrubbing" method of (Power et al., 2012), was used to remove the volumes +with excessive motion (frame displacement greater than 0.5 mm). A minimum +number of 60 unscrubbed volumes per run, corresponding to ~180 s of +acquisition, was then required for further analysis. The following nuisance +parameters were regressed out from the time series at each voxel: slow time +drifts (basis of discrete cosines with a 0.01 Hz high-pass cut-off), average +signals in conservative masks of the white matter and the lateral ventricles +as well as the first principal components (95% energy) of the +six rigid-body motion parameters and their squares (Giove et al., 2009). The +fMRI volumes were finally spatially smoothed with a 6 mm isotropic Gaussian +blurring kernel. + + +Bootstrap Analysis of Stable Clusters +------------------------------------- +Brain parcellations were derived using BASC (Bellec et al. 2010). A region +growing algorithm was first applied to reduce the brain into regions of +roughly equal size, set to 1000 mm3. The BASC used 100 replications of a +hierarchical clustering with Ward's criterion on resampled individual time +series, using circular block bootstrap. A consensus clustering (hierarchical +with Ward's criterion) was generated across all the individual clustering +replications pooled together, hence generating group clusters. The generation +of group clusters was itself replicated by bootstraping subjects 500 times, +and a (final) consensus clustering (hierarchical Ward's criterion) was +generated on the replicated group clusters. The MSTEPS procedure (Bellec et +al., 2013) was implemented to select a data-driven subset of scales in the +range 5-500, approximating the group stability matrices up to 5% residual +energy, through linear interpolation over selected scales. Note that the +number of scales itself was selected by the MSTEPS procedure in a data-driven +fashion, and that the number of individual, group and final (consensus) number +of clusters were not necessarily identical. + + +References +---------- +Ad-Dabbagh Y, Einarson D, Lyttelton O, Muehlboeck J S, Mok K, +Ivanov O, Vincent R D, Lepage C, Lerch J, Fombonne E, Evans A C, +2006. The CIVET Image-Processing Environment: A Fully Automated +Comprehensive Pipeline for Anatomical Neuroimaging Research. +In: Corbetta, M. (Ed.), Proceedings of the 12th Annual Meeting +of the Human Brain Mapping Organization. Neuroimage, Florence, Italy. + +Bellec P, Rosa-Neto P, Lyttelton O C, Benali H, Evans A C, Jul. 2010 +Multi-level bootstrap analysis of stable clusters in resting-state fMRI. +NeuroImage 51 (3), 1126-1139. +URL http://dx.doi.org/10.1016/j.neuroimage.2010.02.082 + +Bellec P, Jun. 2013. Mining the Hierarchy of Resting-State Brain Networks: +Selection of Representative Clusters in a Multiscale Structure. In: Pattern +Recognition in Neuroimaging (PRNI), 2013 International Workshop on. pp. +54-57. + +Collins D L, Evans A C, 1997. Animal: validation and applications of +nonlinear registration-based segmentation. International Journal of +Pattern Recognition and Artificial Intelligence 11, 1271-1294. + +Fonov V, Evans A C, Botteron K, Almli C R, McKinstry, R C, Collins D L, +Jan. 2011. Unbiased average age-appropriate atlases for pediatric +studies. NeuroImage 54 (1), 313-327. +URL http://dx.doi.org/10.1016/j.neuroimage.2010.07.033 + +Giove F, Gili T, Iacovella V, Macaluso E, Maraviglia B, Oct. 2009. +Images-based suppression of unwanted global signals in resting-state +functional connectivity studies. Magnetic resonance imaging 27 (8), 1058-1064. +URL http://dx.doi.org/10.1016/j.mri.2009.06.004 + +Liu H, Stufflebeam S M, Sepulcre J, Hedden T, Buckner R L, Dec. 2009 +Evidence from intrinsic activity that asymmetry of the human brain +is controlled by multiple factors. Proceedings of the National Academy +of Sciences 106 (48), 20499-20503. +URL http://dx.doi.org/10.1073/pnas.0908073106 + +Power J D, Barnes K A, Snyder A Z, Schlaggar B L, Petersen S E, Feb 2012 +Spurious but systematic correlations in functional connectivity +MRI networks arise from subject motion. NeuroImage 59 (3), 2142-2154. +URL http://dx.doi.org/10.1016/j.neuroimage.2011.10.018 diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index d971294a2f..da48703b50 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -270,3 +270,42 @@ def test_fetch_atlas_aal(): data_dir=tst.tmpdir, verbose=0) assert_not_equal(dataset.description, '') + + +@with_setup(setup_mock, teardown_mock) +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_atlas_basc_multiscale(): + # default version='sym' + data_sym = atlas.fetch_atlas_basc_multiscale(data_dir=tst.tmpdir, verbose=0) + # version='asym' + data_asym = atlas.fetch_atlas_basc_multiscale(version='asym', verbose=0, + data_dir=tst.tmpdir) + + keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', + 'scale122', 'scale197', 'scale325', 'scale444'] + + dataset_name = 'urchs_multiscale_2015' + name_sym = 'template_cambridge_basc_multiscale_nii_sym' + basenames_sym = ['template_cambridge_basc_multiscale_sym_' + key + '.nii.gz' + for key in keys] + for key, basename_sym in zip(keys, basenames_sym): + assert_equal(data_sym[key], os.path.join(tst.tmpdir, dataset_name, + name_sym, basename_sym)) + + name_asym = 'template_cambridge_basc_multiscale_nii_asym' + basenames_asym = ['template_cambridge_basc_multiscale_asym_' + key + '.nii.gz' + for key in keys] + for key, basename_asym in zip(keys, basenames_asym): + assert_equal(data_asym[key], os.path.join(tst.tmpdir, dataset_name, + name_asym, basename_asym)) + + assert_equal(len(data_sym), 10) + assert_raises_regex(ValueError, + 'The version of Brain parcellations requested "aym"', + atlas.fetch_atlas_basc_multiscale, version="aym", + data_dir=tst.tmpdir, verbose=0) + + assert_equal(len(tst.mock_url_request.urls), 2) + assert_not_equal(data_sym.description, '') + assert_not_equal(data_asym.description, '') + From 547856b69d2c71a47ff2d39c5b5e52daca1f3355 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 8 Feb 2016 22:40:55 +0100 Subject: [PATCH 0158/1925] Fixed changing description filename to basc_multiscale_2015 and flake8 --- nilearn/datasets/atlas.py | 9 +++++---- ...ultiscale_2015.rst => basc_multiscale_2015.rst} | 0 nilearn/datasets/tests/test_atlas.py | 14 +++++++------- 3 files changed, 12 insertions(+), 11 deletions(-) rename nilearn/datasets/description/{urchs_multiscale_2015.rst => basc_multiscale_2015.rst} (100%) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index ccf6e5732a..2fe8618c3d 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -699,12 +699,13 @@ def fetch_atlas_basc_multiscale(version='sym', data_dir=None, url = "https://ndownloader.figshare.com/files/1861820" opts = {'uncompress': True} - dataset_name = "urchs_multiscale_2015" - data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) + dataset_name = "basc_multiscale_2015" + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) folder_name = 'template_cambridge_basc_multiscale_nii_' + version - basenames = ['template_cambridge_basc_multiscale_' + version + '_' + key + '.nii.gz' - for key in keys] + basenames = ['template_cambridge_basc_multiscale_' + version + + '_' + key + '.nii.gz' for key in keys] filenames = [(os.path.join(folder_name, basename), url, opts) for basename in basenames] diff --git a/nilearn/datasets/description/urchs_multiscale_2015.rst b/nilearn/datasets/description/basc_multiscale_2015.rst similarity index 100% rename from nilearn/datasets/description/urchs_multiscale_2015.rst rename to nilearn/datasets/description/basc_multiscale_2015.rst diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index da48703b50..6f750930b8 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -276,7 +276,8 @@ def test_fetch_atlas_aal(): @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_atlas_basc_multiscale(): # default version='sym' - data_sym = atlas.fetch_atlas_basc_multiscale(data_dir=tst.tmpdir, verbose=0) + data_sym = atlas.fetch_atlas_basc_multiscale(data_dir=tst.tmpdir, + verbose=0) # version='asym' data_asym = atlas.fetch_atlas_basc_multiscale(version='asym', verbose=0, data_dir=tst.tmpdir) @@ -284,17 +285,17 @@ def test_fetch_atlas_basc_multiscale(): keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444'] - dataset_name = 'urchs_multiscale_2015' + dataset_name = 'basc_multiscale_2015' name_sym = 'template_cambridge_basc_multiscale_nii_sym' - basenames_sym = ['template_cambridge_basc_multiscale_sym_' + key + '.nii.gz' - for key in keys] + basenames_sym = ['template_cambridge_basc_multiscale_sym_' + + key + '.nii.gz' for key in keys] for key, basename_sym in zip(keys, basenames_sym): assert_equal(data_sym[key], os.path.join(tst.tmpdir, dataset_name, name_sym, basename_sym)) name_asym = 'template_cambridge_basc_multiscale_nii_asym' - basenames_asym = ['template_cambridge_basc_multiscale_asym_' + key + '.nii.gz' - for key in keys] + basenames_asym = ['template_cambridge_basc_multiscale_asym_' + + key + '.nii.gz' for key in keys] for key, basename_asym in zip(keys, basenames_asym): assert_equal(data_asym[key], os.path.join(tst.tmpdir, dataset_name, name_asym, basename_asym)) @@ -308,4 +309,3 @@ def test_fetch_atlas_basc_multiscale(): assert_equal(len(tst.mock_url_request.urls), 2) assert_not_equal(data_sym.description, '') assert_not_equal(data_asym.description, '') - From 8bb34a4e882ed2a7f01b54ac696b62f4bf5a7447 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 9 Feb 2016 16:51:50 +0100 Subject: [PATCH 0159/1925] Documentation correction --- nilearn/datasets/atlas.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 2fe8618c3d..65e27783fb 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -622,13 +622,13 @@ def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True, def fetch_atlas_basc_multiscale(version='sym', data_dir=None, resume=True, verbose=1): - """Downloads and load Multiscale functional Brain parcellations + """Downloads and loads multiscale functional brain parcellations This atlas includes group brain parcellations generated from - resting-state functional magnetic resonance images for about + resting-state functional magnetic resonance images from about 200 young healthy subjects. - Multiple scales (number of networks) are available, and includes + Multiple scales (number of networks) are available, among 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations have been generated using a method called bootstrap analysis of stable clusters called as BASC, (Bellec et al., 2010) and the From 594f9394e560c58d362adee38107b4f86a0dcc92 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sun, 14 Feb 2016 21:37:45 +0100 Subject: [PATCH 0160/1925] Example focussing on Visualizing three scales of networks --- .../plot_multiscale_parcellations.py | 43 +++++++++++++++++++ .../description/basc_multiscale_2015.rst | 2 +- 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 examples/01_plotting/plot_multiscale_parcellations.py diff --git a/examples/01_plotting/plot_multiscale_parcellations.py b/examples/01_plotting/plot_multiscale_parcellations.py new file mode 100644 index 0000000000..d109195f04 --- /dev/null +++ b/examples/01_plotting/plot_multiscale_parcellations.py @@ -0,0 +1,43 @@ +""" +Visualizing multiscale functional brain parcellations +===================================================== + +This example shows how to download and fetch brain parcellations of +multiple networks using :func:`nilearn.datasets.fetch_atlas_basc_multiscale` +and visualize them using plotting function :func:`nilearn.plotting.plot_roi`. + +We show here only three different networks of symmetric version. For more +details about different versions and different networks, please refer to its +documentation. +""" + +################################################################################ +# Fetching multiscale group brain parcellations +# Import datasets module and use fetch_atlas_basc_multiscale function +from nilearn import datasets + +parcellations = datasets.fetch_atlas_basc_multiscale(version='sym') + +# We show here networks of 64, 197, 444 +networks_64 = parcellations['scale064'] +networks_197 = parcellations['scale197'] +networks_444 = parcellations['scale444'] + +################################################################################ +# Visualizing brain parcellations +# Import plotting module and use plot_roi function, since the maps are in 3D +import matplotlib.pyplot as plt +from nilearn import plotting + +# The coordinates of all plots are selected automatically by itself +# We manually change the colormap of our choice +plotting.plot_roi(networks_64, cmap=plotting.cm.bwr, + title='64 regions of brain clusters') + +plotting.plot_roi(networks_197, cmap=plotting.cm.bwr, + title='197 regions of brain clusters') + +plotting.plot_roi(networks_444, cmap=plotting.cm.bwr_r, + title='444 regions of brain clusters') + +plotting.show() diff --git a/nilearn/datasets/description/basc_multiscale_2015.rst b/nilearn/datasets/description/basc_multiscale_2015.rst index 84f0b089e7..3de652cc9a 100644 --- a/nilearn/datasets/description/basc_multiscale_2015.rst +++ b/nilearn/datasets/description/basc_multiscale_2015.rst @@ -1,4 +1,4 @@ -An atlas of Multiscale Brain Parcellations +An atlas of multiscale brain parcellations Content From 4d13192d28fb645a5d8f42c05ffcc695486a859e Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sun, 14 Feb 2016 21:46:09 +0100 Subject: [PATCH 0161/1925] Documentation version added --- nilearn/datasets/atlas.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 65e27783fb..18034431eb 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -644,6 +644,8 @@ def fetch_atlas_basc_multiscale(version='sym', data_dir=None, study homotopic functional connections in fMRI: finding homotopic regions simply consists of flipping the x-axis of the template. + .. versionadded:: 0.2.3 + Parameters ---------- version: str, optional From 92d193a01108004fe7f4190786728d426761b65f Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Sun, 14 Feb 2016 21:55:30 +0100 Subject: [PATCH 0162/1925] pep8 corrections --- examples/01_plotting/plot_multiscale_parcellations.py | 5 ++--- nilearn/datasets/tests/test_atlas.py | 8 ++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/examples/01_plotting/plot_multiscale_parcellations.py b/examples/01_plotting/plot_multiscale_parcellations.py index d109195f04..07e3541b37 100644 --- a/examples/01_plotting/plot_multiscale_parcellations.py +++ b/examples/01_plotting/plot_multiscale_parcellations.py @@ -11,7 +11,7 @@ documentation. """ -################################################################################ +############################################################################### # Fetching multiscale group brain parcellations # Import datasets module and use fetch_atlas_basc_multiscale function from nilearn import datasets @@ -23,10 +23,9 @@ networks_197 = parcellations['scale197'] networks_444 = parcellations['scale444'] -################################################################################ +############################################################################### # Visualizing brain parcellations # Import plotting module and use plot_roi function, since the maps are in 3D -import matplotlib.pyplot as plt from nilearn import plotting # The coordinates of all plots are selected automatically by itself diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index 6f750930b8..d36397a695 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -287,15 +287,15 @@ def test_fetch_atlas_basc_multiscale(): dataset_name = 'basc_multiscale_2015' name_sym = 'template_cambridge_basc_multiscale_nii_sym' - basenames_sym = ['template_cambridge_basc_multiscale_sym_' - + key + '.nii.gz' for key in keys] + basenames_sym = ['template_cambridge_basc_multiscale_sym_' + + key + '.nii.gz' for key in keys] for key, basename_sym in zip(keys, basenames_sym): assert_equal(data_sym[key], os.path.join(tst.tmpdir, dataset_name, name_sym, basename_sym)) name_asym = 'template_cambridge_basc_multiscale_nii_asym' - basenames_asym = ['template_cambridge_basc_multiscale_asym_' - + key + '.nii.gz' for key in keys] + basenames_asym = ['template_cambridge_basc_multiscale_asym_' + + key + '.nii.gz' for key in keys] for key, basename_asym in zip(keys, basenames_asym): assert_equal(data_asym[key], os.path.join(tst.tmpdir, dataset_name, name_asym, basename_asym)) From c8ea1c7636f5628cfe66802d6d77c99d7491513e Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Mon, 15 Feb 2016 09:57:09 +0100 Subject: [PATCH 0163/1925] Cosmit changes in example --- examples/01_plotting/plot_multiscale_parcellations.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/01_plotting/plot_multiscale_parcellations.py b/examples/01_plotting/plot_multiscale_parcellations.py index 07e3541b37..555e54c883 100644 --- a/examples/01_plotting/plot_multiscale_parcellations.py +++ b/examples/01_plotting/plot_multiscale_parcellations.py @@ -6,14 +6,15 @@ multiple networks using :func:`nilearn.datasets.fetch_atlas_basc_multiscale` and visualize them using plotting function :func:`nilearn.plotting.plot_roi`. -We show here only three different networks of symmetric version. For more +We show here only three different networks of 'symmetric' version. For more details about different versions and different networks, please refer to its documentation. """ ############################################################################### # Fetching multiscale group brain parcellations -# Import datasets module and use fetch_atlas_basc_multiscale function + +# import datasets module and use `fetch_atlas_basc_multiscale` function from nilearn import datasets parcellations = datasets.fetch_atlas_basc_multiscale(version='sym') @@ -25,7 +26,8 @@ ############################################################################### # Visualizing brain parcellations -# Import plotting module and use plot_roi function, since the maps are in 3D + +# import plotting module and use `plot_roi` function, since the maps are in 3D from nilearn import plotting # The coordinates of all plots are selected automatically by itself From 55cd8bcbaedec22d952e61f71197b394302289fd Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Tue, 16 Feb 2016 14:51:44 +0100 Subject: [PATCH 0164/1925] Function name change to basc_multiscale_2015 --- doc/modules/reference.rst | 2 +- doc/whats_new.rst | 2 ++ .../01_plotting/plot_multiscale_parcellations.py | 6 +++--- nilearn/datasets/__init__.py | 5 +++-- nilearn/datasets/atlas.py | 4 ++-- nilearn/datasets/tests/test_atlas.py | 13 +++++++------ 6 files changed, 18 insertions(+), 14 deletions(-) diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index 18b1af6e9a..de6cdae867 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -70,7 +70,7 @@ uses. fetch_atlas_smith_2009 fetch_atlas_yeo_2011 fetch_atlas_aal - fetch_atlas_basc_multiscale + fetch_atlas_basc_multiscale_2015 fetch_abide_pcp fetch_adhd fetch_haxby diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 5fd94ee5f7..75b1712c38 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -10,6 +10,8 @@ New features image or a list of images using :func:`nilearn.image.math_img`. - Downloader for COBRE datasets of 146 rest fMRI subjects with :func:`nilearn.datasets.fetch_cobre` + - Fetcher for multiscale functional brain parcellations (BASC) + :func:`nilearn.datasets.fetch_atlas_basc_multiscale_2015` 0.2.2 ====== diff --git a/examples/01_plotting/plot_multiscale_parcellations.py b/examples/01_plotting/plot_multiscale_parcellations.py index 555e54c883..c1b4cf51b0 100644 --- a/examples/01_plotting/plot_multiscale_parcellations.py +++ b/examples/01_plotting/plot_multiscale_parcellations.py @@ -3,7 +3,7 @@ ===================================================== This example shows how to download and fetch brain parcellations of -multiple networks using :func:`nilearn.datasets.fetch_atlas_basc_multiscale` +multiple networks using :func:`nilearn.datasets.fetch_atlas_basc_multiscale_2015` and visualize them using plotting function :func:`nilearn.plotting.plot_roi`. We show here only three different networks of 'symmetric' version. For more @@ -14,10 +14,10 @@ ############################################################################### # Fetching multiscale group brain parcellations -# import datasets module and use `fetch_atlas_basc_multiscale` function +# import datasets module and use `fetch_atlas_basc_multiscale_2015` function from nilearn import datasets -parcellations = datasets.fetch_atlas_basc_multiscale(version='sym') +parcellations = datasets.fetch_atlas_basc_multiscale_2015(version='sym') # We show here networks of 64, 197, 444 networks_64 = parcellations['scale064'] diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index 1d5b0c59a9..902618bb6e 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -12,7 +12,7 @@ fetch_atlas_harvard_oxford, fetch_atlas_msdl, fetch_atlas_power_2011, fetch_atlas_smith_2009, fetch_atlas_yeo_2011, fetch_atlas_aal, - fetch_atlas_basc_multiscale) + fetch_atlas_basc_multiscale_2015) __all__ = ['fetch_icbm152_2009', 'load_mni152_template', 'fetch_oasis_vbm', 'fetch_haxby_simple', 'fetch_haxby', 'fetch_nyu_rest', @@ -22,4 +22,5 @@ 'fetch_atlas_harvard_oxford', 'fetch_atlas_msdl', 'fetch_atlas_power_2011', 'fetch_atlas_smith_2009', 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', - 'fetch_megatrawls_netmats', 'fetch_cobre'] + 'fetch_megatrawls_netmats', 'fetch_cobre', + 'fetch_atlas_basc_multiscale_2015'] diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 18034431eb..e949f1634f 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -620,8 +620,8 @@ def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True, return Bunch(**params) -def fetch_atlas_basc_multiscale(version='sym', data_dir=None, - resume=True, verbose=1): +def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, + resume=True, verbose=1): """Downloads and loads multiscale functional brain parcellations This atlas includes group brain parcellations generated from diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index d36397a695..3fd2838c53 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -274,13 +274,14 @@ def test_fetch_atlas_aal(): @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) -def test_fetch_atlas_basc_multiscale(): +def test_fetch_atlas_basc_multiscale_2015(): # default version='sym' - data_sym = atlas.fetch_atlas_basc_multiscale(data_dir=tst.tmpdir, - verbose=0) + data_sym = atlas.fetch_atlas_basc_multiscale_2015(data_dir=tst.tmpdir, + verbose=0) # version='asym' - data_asym = atlas.fetch_atlas_basc_multiscale(version='asym', verbose=0, - data_dir=tst.tmpdir) + data_asym = atlas.fetch_atlas_basc_multiscale_2015(version='asym', + verbose=0, + data_dir=tst.tmpdir) keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444'] @@ -303,7 +304,7 @@ def test_fetch_atlas_basc_multiscale(): assert_equal(len(data_sym), 10) assert_raises_regex(ValueError, 'The version of Brain parcellations requested "aym"', - atlas.fetch_atlas_basc_multiscale, version="aym", + atlas.fetch_atlas_basc_multiscale_2015, version="aym", data_dir=tst.tmpdir, verbose=0) assert_equal(len(tst.mock_url_request.urls), 2) From da638fc7a29e9b692023ef000341728a78ca08b6 Mon Sep 17 00:00:00 2001 From: KamalakerDadi Date: Wed, 17 Feb 2016 10:19:53 +0100 Subject: [PATCH 0165/1925] Added datasets structure link --- nilearn/datasets/atlas.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index e949f1634f..2815bdc325 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -649,8 +649,8 @@ def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, Parameters ---------- version: str, optional - Available versions are 'sym' or 'asym'. By default all scales of - brain parcellations of version 'sym' will be returned. + Available versions are 'sym' or 'asym'. By default all scales of + brain parcellations of version 'sym' will be returned. data_dir: str, optional directory where data should be downloaded and unpacked. @@ -685,6 +685,13 @@ def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, Bellec P, Jun. 2013. Mining the Hierarchy of Resting-State Brain Networks: Selection of Representative Clusters in a Multiscale Structure. Pattern Recognition in Neuroimaging (PRNI), 2013 pp. 54-57. + + Notes + ----- + More information about dataset's structure, See: + https://figshare.com/articles/ + Group_multiscale_functional_template_generated_with_BASC_on_the_Cambridge_sample + /1285615 """ versions = ['sym', 'asym'] if version not in versions: From 50d2eef83336ce967bebd69cba582b2bc892cd3e Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 17 Feb 2016 10:54:29 +0100 Subject: [PATCH 0166/1925] performing 3 memory consumption measure and keep the max to make memory profiling more robust --- nilearn/_utils/testing.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/nilearn/_utils/testing.py b/nilearn/_utils/testing.py index 3bef5ad1c5..be690b03e7 100644 --- a/nilearn/_utils/testing.py +++ b/nilearn/_utils/testing.py @@ -70,9 +70,14 @@ def with_memory_profiler(func): def memory_used(func, *args, **kwargs): """Compute memory usage when executing func.""" - gc.collect() - mem_use = memory_usage((func, args, kwargs), interval=0.001) - return max(mem_use) - min(mem_use) + result = [] + # memory profiler results are not stable enough, we compute the + # memory consumption 3 times and keep the maximum measure. + for _ in range(3): + gc.collect() + mem_use = memory_usage((func, args, kwargs), interval=0.001) + result.append(max(mem_use) - min(mem_use)) + return max(result) except ImportError: def with_memory_profiler(func): From f0668bceb079fb1786d1b43e4fb48d7174786692 Mon Sep 17 00:00:00 2001 From: Alexandre Abadie Date: Wed, 17 Feb 2016 11:58:13 +0100 Subject: [PATCH 0167/1925] compute memory usage of 3 calls of func --- nilearn/_utils/testing.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/nilearn/_utils/testing.py b/nilearn/_utils/testing.py index be690b03e7..51c1ff55f1 100644 --- a/nilearn/_utils/testing.py +++ b/nilearn/_utils/testing.py @@ -70,14 +70,13 @@ def with_memory_profiler(func): def memory_used(func, *args, **kwargs): """Compute memory usage when executing func.""" - result = [] - # memory profiler results are not stable enough, we compute the - # memory consumption 3 times and keep the maximum measure. - for _ in range(3): - gc.collect() - mem_use = memory_usage((func, args, kwargs), interval=0.001) - result.append(max(mem_use) - min(mem_use)) - return max(result) + def func_3_times(*args, **kwargs): + for _ in range(3): + func(*args, **kwargs) + + gc.collect() + mem_use = memory_usage((func_3_times, args, kwargs), interval=0.001) + return max(mem_use) - min(mem_use) except ImportError: def with_memory_profiler(func): From 718a4fa1dcad396681f0d641abea244d32f2ec41 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Wed, 17 Feb 2016 15:36:08 +0100 Subject: [PATCH 0168/1925] Shorten BASC Multiscale URL. --- nilearn/datasets/atlas.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 2815bdc325..23085e1b60 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -688,10 +688,8 @@ def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, Notes ----- - More information about dataset's structure, See: - https://figshare.com/articles/ - Group_multiscale_functional_template_generated_with_BASC_on_the_Cambridge_sample - /1285615 + For more information on this dataset's structure, see + https://figshare.com/articles/basc/1285615 """ versions = ['sym', 'asym'] if version not in versions: From e6bf5eacc9db6c5ef10bac6aed6c13b56019984d Mon Sep 17 00:00:00 2001 From: Salma Date: Sat, 16 Jan 2016 22:25:05 +0100 Subject: [PATCH 0169/1925] dosenbach2010 rois as in paper --- doc/modules/reference.rst | 1 + nilearn/datasets/__init__.py | 5 +- nilearn/datasets/atlas.py | 29 ++++ nilearn/datasets/data/dosenbach_2010.csv | 161 ++++++++++++++++++ .../datasets/description/dosenbach_2010.rst | 19 +++ nilearn/datasets/tests/test_atlas.py | 7 + 6 files changed, 220 insertions(+), 2 deletions(-) create mode 100644 nilearn/datasets/data/dosenbach_2010.csv create mode 100644 nilearn/datasets/description/dosenbach_2010.rst diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index de6cdae867..bc70465f5f 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -71,6 +71,7 @@ uses. fetch_atlas_yeo_2011 fetch_atlas_aal fetch_atlas_basc_multiscale_2015 + fetch_atlas_dosenbach_2010 fetch_abide_pcp fetch_adhd fetch_haxby diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index 902618bb6e..0f3f9c4ea8 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -12,7 +12,8 @@ fetch_atlas_harvard_oxford, fetch_atlas_msdl, fetch_atlas_power_2011, fetch_atlas_smith_2009, fetch_atlas_yeo_2011, fetch_atlas_aal, - fetch_atlas_basc_multiscale_2015) + fetch_atlas_basc_multiscale_2015, + fetch_atlas_dosenbach_2010) __all__ = ['fetch_icbm152_2009', 'load_mni152_template', 'fetch_oasis_vbm', 'fetch_haxby_simple', 'fetch_haxby', 'fetch_nyu_rest', @@ -23,4 +24,4 @@ 'fetch_atlas_power_2011', 'fetch_atlas_smith_2009', 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', 'fetch_megatrawls_netmats', 'fetch_cobre', - 'fetch_atlas_basc_multiscale_2015'] + 'fetch_atlas_basc_multiscale_2015', 'fetch_atlas_dosenbach_2010'] diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 23085e1b60..7d8116d4b4 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -724,3 +724,32 @@ def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, params['description'] = descr return Bunch(**params) + + +def fetch_atlas_dosenbach_2010(): + """Download and load the Dosenbach et al. brain atlas composed of 160 ROIs. + + Returns + ------- + data: sklearn.datasets.base.Bunch + dictionary-like object, contains: + - "rois": coordinates of 160 ROIs in MNI space + - "labels": ROIs labels + - "networks": networks names + + + References + ---------- + Dosenbach N.U., Nardos B., et al. "Prediction of individual brain maturity + using fMRI.", 2010, Science 329, 1358-1361. + """ + dataset_name = 'dosenbach_2010' + fdescr = _get_dataset_descr(dataset_name) + package_directory = os.path.dirname(os.path.abspath(__file__)) + csv = os.path.join(package_directory, "data", "dosenbach_2010.csv") + out_csv = np.recfromcsv(csv) + params = dict(rois=out_csv[['x', 'y', 'z']], + labels=out_csv[['name', 'number']], + networks=out_csv['network'], description=fdescr) + + return Bunch(**params) diff --git a/nilearn/datasets/data/dosenbach_2010.csv b/nilearn/datasets/data/dosenbach_2010.csv new file mode 100644 index 0000000000..e6de285980 --- /dev/null +++ b/nilearn/datasets/data/dosenbach_2010.csv @@ -0,0 +1,161 @@ +number,x,y,z,name,network +1,6,64,3,vmPFC,default +2,29,57,18,aPFC,fronto-parietal +3,-29,57,10,aPFC,fronto-parietal +4,0,51,32,mPFC,default +5,-25,51,27,aPFC,default +6,9,51,16,vmPFC,default +7,-6,50,-1,vmPFC,default +8,27,49,26,aPFC,cingulo-opercular +9,42,48,-3,vent aPFC,fronto-parietal +10,-43,47,2,vent aPFC,fronto-parietal +11,-11,45,17,vmPFC,default +12,39,42,16,vlPFC,fronto-parietal +13,8,42,-5,vmPFC,default +14,9,39,20,ACC,default +15,46,39,-15,vlPFC,default +16,40,36,29,dlPFC,fronto-parietal +17,23,33,47,sup frontal,default +18,34,32,7,vPFC,cingulo-opercular +19,-2,30,27,ACC,cingulo-opercular +20,-16,29,54,sup frontal,default +21,-1,28,40,ACC,fronto-parietal +22,46,28,31,dlPFC,fronto-parietal +23,-52,28,17,vPFC,fronto-parietal +24,-44,27,33,dlPFC,fronto-parietal +25,51,23,8,vFC,cingulo-opercular +26,38,21,-1,ant insula,cingulo-opercular +27,9,20,34,dACC,cingulo-opercular +28,-36,18,2,ant insula,cingulo-opercular +29,40,17,40,dFC,fronto-parietal +30,-6,17,34,basal ganglia,cingulo-opercular +31,0,15,45,mFC,cingulo-opercular +32,58,11,14,frontal,sensorimotor +33,-46,10,14,vFC,cingulo-opercular +34,44,8,34,dFC,fronto-parietal +35,60,8,34,dFC,sensorimotor +36,-42,7,36,dFC,fronto-parietal +37,-55,7,23,vFC,sensorimotor +38,-20,6,7,basal ganglia,cingulo-opercular +39,14,6,7,basal ganglia,cingulo-opercular +40,-48,6,1,vFC,cingulo-opercular +41,10,5,51,pre-SMA,sensorimotor +42,43,1,12,vFC,sensorimotor +43,0,-1,52,SMA,sensorimotor +44,37,-2,-3,mid insula,cingulo-opercular +45,53,-3,32,frontal,sensorimotor +46,58,-3,17,precentral gyrus,sensorimotor +47,-12,-3,13,thalamus,cingulo-opercular +48,-42,-3,11,mid insula,sensorimotor +49,-44,-6,49,precentral gyrus,sensorimotor +50,-26,-8,54,parietal,sensorimotor +51,46,-8,24,precentral gyrus,sensorimotor +52,-54,-9,23,precentral gyrus,sensorimotor +53,44,-11,38,precentral gyrus,sensorimotor +54,-47,-12,36,parietal,sensorimotor +55,33,-12,16,mid insula,sensorimotor +56,-36,-12,15,mid insula,sensorimotor +57,-12,-12,6,thalamus,cingulo-opercular +58,11,-12,6,thalamus,cingulo-opercular +59,32,-12,2,mid insula,cingulo-opercular +60,59,-13,8,temporal,sensorimotor +61,-30,-14,1,mid insula,cingulo-opercular +62,-38,-15,59,parietal,sensorimotor +63,52,-15,-13,inf temporal,default +64,-47,-18,50,parietal,sensorimotor +65,46,-20,45,parietal,sensorimotor +66,-55,-22,38,parietal,sensorimotor +67,-54,-22,22,precentral gyrus,sensorimotor +68,-54,-22,9,temporal,sensorimotor +69,41,-23,55,parietal,sensorimotor +70,42,-24,17,post insula,sensorimotor +71,11,-24,2,basal ganglia,cingulo-opercular +72,-59,-25,-15,inf temporal,default +73,1,-26,31,post cingulate,default +74,18,-27,62,parietal,sensorimotor +75,-38,-27,60,parietal,sensorimotor +76,-30,-28,9,post insula,cingulo-opercular +77,-24,-30,64,parietal,sensorimotor +78,51,-30,5,temporal,cingulo-opercular +79,-41,-31,48,post parietal,sensorimotor +80,-4,-31,-4,post cingulate,cingulo-opercular +81,54,-31,-18,fusiform,cingulo-opercular +82,-41,-37,16,temporal,sensorimotor +83,-53,-37,13,temporal,sensorimotor +84,28,-37,-15,fusiform,default +85,-3,-38,45,precuneus,default +86,34,-39,65,sup parietal,sensorimotor +87,8,-40,50,precuneus,cingulo-opercular +88,-41,-40,42,IPL,fronto-parietal +89,58,-41,20,parietal,cingulo-opercular +90,-8,-41,3,post cingulate,default +91,-61,-41,-2,inf temporal,default +92,-28,-42,-11,occipital,default +93,-5,-43,25,post cingulate,default +94,9,-43,25,precuneus,default +95,43,-43,8,temporal,cingulo-opercular +96,54,-44,43,IPL,fronto-parietal +97,-55,-44,30,parietal,cingulo-opercular +98,-28,-44,-25,lat cerebellum,cerebellum +99,-35,-46,48,post parietal,fronto-parietal +100,42,-46,21,sup temporal,cingulo-opercular +101,-48,-47,49,IPL,fronto-parietal +102,-41,-47,29,angular gyrus,cingulo-opercular +103,-59,-47,11,temporal,cingulo-opercular +104,-53,-50,39,IPL,fronto-parietal +105,5,-50,33,precuneus,default +106,-18,-50,1,occipital,occipital +107,44,-52,47,IPL,fronto-parietal +108,-5,-52,17,post cingulate,default +109,-24,-54,-21,lat cerebellum,cerebellum +110,-37,-54,-37,inf cerebellum,cerebellum +111,10,-55,17,post cingulate,default +112,-6,-56,29,precuneus,default +113,-34,-57,-24,lat cerebellum,cerebellum +114,-32,-58,46,IPS,fronto-parietal +115,-11,-58,17,post cingulate,default +116,32,-59,41,IPS,fronto-parietal +117,51,-59,34,angular gyrus,default +118,-34,-60,-5,occipital,occipital +119,36,-60,-8,occipital,occipital +120,-6,-60,-15,med cerebellum,cerebellum +121,-25,-60,-34,inf cerebellum,cerebellum +122,32,-61,-31,inf cerebellum,cerebellum +123,46,-62,5,temporal,occipital +124,-48,-63,35,angular gyrus,default +125,-52,-63,15,TPJ,cingulo-opercular +126,-44,-63,-7,occipital,occipital +127,-16,-64,-21,med cerebellum,cerebellum +128,21,-64,-22,lat cerebellum,cerebellum +129,19,-66,-1,occipital,occipital +130,1,-66,-24,med cerebellum,cerebellum +131,-34,-67,-29,inf cerebellum,cerebellum +132,11,-68,42,precuneus,default +133,17,-68,20,occipital,occipital +134,-36,-69,40,IPS,default +135,39,-71,13,occipital,occipital +136,-9,-72,41,occipital,default +137,45,-72,29,occipital,default +138,-11,-72,-14,med cerebellum,cerebellum +139,29,-73,29,occipital,occipital +140,33,-73,-30,inf cerebellum,cerebellum +141,-2,-75,32,occipital,default +142,-29,-75,28,occipital,occipital +143,5,-75,-11,med cerebellum,cerebellum +144,14,-75,-21,med cerebellum,cerebellum +145,-16,-76,33,occipital,occipital +146,-42,-76,26,occipital,default +147,9,-76,14,occipital,occipital +148,15,-77,32,occipital,occipital +149,20,-78,-2,occipital,occipital +150,-21,-79,-33,inf cerebellum,cerebellum +151,-6,-79,-33,inf cerebellum,cerebellum +152,-5,-80,9,post occipital,occipital +153,29,-81,14,post occipital,occipital +154,33,-81,-2,post occipital,occipital +155,18,-81,-33,inf cerebellum,cerebellum +156,-37,-83,-2,post occipital,occipital +157,-29,-88,8,post occipital,occipital +158,13,-91,2,post occipital,occipital +159,27,-91,2,post occipital,occipital +160,-4,-94,12,post occipital,occipital diff --git a/nilearn/datasets/description/dosenbach_2010.rst b/nilearn/datasets/description/dosenbach_2010.rst new file mode 100644 index 0000000000..0a9b5b74eb --- /dev/null +++ b/nilearn/datasets/description/dosenbach_2010.rst @@ -0,0 +1,19 @@ +Dosenbach 2010 atlas + + +Notes +----- +160 ROIs obtained by meta-analysis. + + +Content +------- + :'rois': Coordinates of ROIs in MNI space. + :'labels': ROIs labels. + :'networks': Networks names. + + +References +---------- +Dosenbach N.U., Nardos B., et al. "Prediction of individual brain maturity +using fMRI.", 2010, Science 329, 1358-1361. diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index 3fd2838c53..52f232d9df 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -310,3 +310,10 @@ def test_fetch_atlas_basc_multiscale_2015(): assert_equal(len(tst.mock_url_request.urls), 2) assert_not_equal(data_sym.description, '') assert_not_equal(data_asym.description, '') + + +def test_fetch_atlas_dosenbach_2010(): + bunch = atlas.fetch_atlas_dosenbach_2010() + assert_equal(len(bunch.rois), 160) + assert_equal(len(bunch.networks), 6) + assert_not_equal(bunch.description, '') From 6c40f49b2b23315f526f59354cd35dab4ae2cb4c Mon Sep 17 00:00:00 2001 From: Salma Date: Sun, 17 Jan 2016 16:46:00 +0100 Subject: [PATCH 0170/1925] str labels --- nilearn/datasets/atlas.py | 12 ++++++++++-- nilearn/datasets/tests/test_atlas.py | 3 ++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 7d8116d4b4..edbdabd5cc 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -727,7 +727,8 @@ def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, def fetch_atlas_dosenbach_2010(): - """Download and load the Dosenbach et al. brain atlas composed of 160 ROIs. + """Download and load the Dosenbach et al. brain atlas composed of 160 ROIs + grouped in 6 networks. Returns ------- @@ -748,8 +749,15 @@ def fetch_atlas_dosenbach_2010(): package_directory = os.path.dirname(os.path.abspath(__file__)) csv = os.path.join(package_directory, "data", "dosenbach_2010.csv") out_csv = np.recfromcsv(csv) + + # We add the ROI number to its name, since names are not unique + names = out_csv['name'] + numbers = out_csv['number'] + labels = np.array([name + ' ' + str(number) for (name, number) in + zip(names, numbers)]) + params = dict(rois=out_csv[['x', 'y', 'z']], - labels=out_csv[['name', 'number']], + labels=labels, networks=out_csv['network'], description=fdescr) return Bunch(**params) diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index 52f232d9df..14e00ef1ab 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -315,5 +315,6 @@ def test_fetch_atlas_basc_multiscale_2015(): def test_fetch_atlas_dosenbach_2010(): bunch = atlas.fetch_atlas_dosenbach_2010() assert_equal(len(bunch.rois), 160) - assert_equal(len(bunch.networks), 6) + assert_equal(len(np.unique(bunch.labels)), 160) + assert_equal(len(np.unique(bunch.networks)), 6) assert_not_equal(bunch.description, '') From 2f3c1d50e675cd654a26b772bbcbb0c5aeb958f6 Mon Sep 17 00:00:00 2001 From: Salma Date: Mon, 18 Jan 2016 11:46:14 +0100 Subject: [PATCH 0171/1925] use coords in function naming --- nilearn/datasets/atlas.py | 4 +--- nilearn/datasets/description/dosenbach_2010.rst | 5 ++++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index edbdabd5cc..573615ef11 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -738,7 +738,6 @@ def fetch_atlas_dosenbach_2010(): - "labels": ROIs labels - "networks": networks names - References ---------- Dosenbach N.U., Nardos B., et al. "Prediction of individual brain maturity @@ -753,9 +752,8 @@ def fetch_atlas_dosenbach_2010(): # We add the ROI number to its name, since names are not unique names = out_csv['name'] numbers = out_csv['number'] - labels = np.array([name + ' ' + str(number) for (name, number) in + labels = np.array(['{0} {1}'.format(name, number) for (name, number) in zip(names, numbers)]) - params = dict(rois=out_csv[['x', 'y', 'z']], labels=labels, networks=out_csv['network'], description=fdescr) diff --git a/nilearn/datasets/description/dosenbach_2010.rst b/nilearn/datasets/description/dosenbach_2010.rst index 0a9b5b74eb..bd9353e76c 100644 --- a/nilearn/datasets/description/dosenbach_2010.rst +++ b/nilearn/datasets/description/dosenbach_2010.rst @@ -3,7 +3,10 @@ Dosenbach 2010 atlas Notes ----- -160 ROIs obtained by meta-analysis. +160 ROIs covering much of the cerebral cortex and cerebellum. +They were obtained from meta-analyses of fMRI activation studies +and assigned into 6 networks according to a modularity analysis of +resting-state data. Content From ae4ac66b339970ff6ce23c22f92ccfef7e2aa2a6 Mon Sep 17 00:00:00 2001 From: Salma Date: Thu, 4 Feb 2016 17:04:50 +0100 Subject: [PATCH 0172/1925] address Danilo's comments --- nilearn/datasets/atlas.py | 5 +++-- nilearn/datasets/description/dosenbach_2010.rst | 2 +- nilearn/datasets/tests/test_atlas.py | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 573615ef11..a620910a26 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -727,8 +727,9 @@ def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, def fetch_atlas_dosenbach_2010(): - """Download and load the Dosenbach et al. brain atlas composed of 160 ROIs - grouped in 6 networks. + """Load the Dosenbach et al. 160 ROIs. These ROIs cover + much of the cerebral cortex and cerebellum and are assigned to 6 + networks. Returns ------- diff --git a/nilearn/datasets/description/dosenbach_2010.rst b/nilearn/datasets/description/dosenbach_2010.rst index bd9353e76c..6e0dd86565 100644 --- a/nilearn/datasets/description/dosenbach_2010.rst +++ b/nilearn/datasets/description/dosenbach_2010.rst @@ -3,7 +3,7 @@ Dosenbach 2010 atlas Notes ----- -160 ROIs covering much of the cerebral cortex and cerebellum. +160 regions of interest covering much of the cerebral cortex and cerebellum. They were obtained from meta-analyses of fMRI activation studies and assigned into 6 networks according to a modularity analysis of resting-state data. diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index 14e00ef1ab..ae7477b274 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -315,6 +315,6 @@ def test_fetch_atlas_basc_multiscale_2015(): def test_fetch_atlas_dosenbach_2010(): bunch = atlas.fetch_atlas_dosenbach_2010() assert_equal(len(bunch.rois), 160) - assert_equal(len(np.unique(bunch.labels)), 160) + assert_equal(len(bunch.labels), 160) assert_equal(len(np.unique(bunch.networks)), 6) assert_not_equal(bunch.description, '') From fbb3f459bfff4beec57354695ee9cdfad2dbfce9 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 18 Feb 2016 10:18:04 +0100 Subject: [PATCH 0173/1925] Fix ABIDE fetcher for P3 --- nilearn/datasets/func.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index d414667d58..06ead5e25b 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1112,11 +1112,11 @@ def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', 'ABIDE_Initiative') if quality_checked: - kwargs['qc_rater_1'] = 'OK' - kwargs['qc_anat_rater_2'] = ['OK', 'maybe'] - kwargs['qc_func_rater_2'] = ['OK', 'maybe'] - kwargs['qc_anat_rater_3'] = 'OK' - kwargs['qc_func_rater_3'] = 'OK' + kwargs['qc_rater_1'] = b'OK' + kwargs['qc_anat_rater_2'] = [b'OK', b'maybe'] + kwargs['qc_func_rater_2'] = [b'OK', b'maybe'] + kwargs['qc_anat_rater_3'] = b'OK' + kwargs['qc_func_rater_3'] = b'OK' # Fetch the phenotypic file and load it csv = 'Phenotypic_V1_0b_preprocessed1.csv' From 2d806ec1507b4f7b5cd742aedb71d8467b58c572 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Thu, 18 Feb 2016 10:18:17 +0100 Subject: [PATCH 0174/1925] Prioritize data_dir in dataset search --- nilearn/datasets/utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index 54ec6ddf02..f0df7f4159 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -210,15 +210,17 @@ def _get_dataset_dir(dataset_name, data_dir=None, default_paths=None, # dataset name to the path. paths = [] + # Check data_dir which force storage in a specific location + if data_dir is not None: + paths.extend([(d, False) for d in data_dir.split(os.pathsep)]) + # Search given environment variables if default_paths is not None: for default_path in default_paths: paths.extend([(d, True) for d in default_path.split(os.pathsep)]) - # Check data_dir which force storage in a specific location - if data_dir is not None: - paths.extend([(d, False) for d in data_dir.split(os.pathsep)]) - else: + # If data_dir has not been specified, then we crawl default locations + if data_dir is None: global_data = os.getenv('NILEARN_SHARED_DATA') if global_data is not None: paths.extend([(d, False) for d in global_data.split(os.pathsep)]) From 5bb54a3a7af3531a183b964a4e47cac95ff7a439 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Wed, 17 Feb 2016 11:05:17 +0100 Subject: [PATCH 0175/1925] Load COBRE files per subject --- nilearn/datasets/func.py | 82 ++++++++++++++++++++++++++------------- nilearn/datasets/utils.py | 2 +- 2 files changed, 56 insertions(+), 28 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 06ead5e25b..95ab5e4846 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -4,6 +4,7 @@ import warnings import os import re +import json import numpy as np import nibabel from sklearn.datasets.base import Bunch @@ -1470,24 +1471,48 @@ def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): https://figshare.com/articles/COBRE_preprocessed_with_NIAK_0_12_4/1160600 """ if url is None: - url = "https://ndownloader.figshare.com/articles/1160600/versions/15" - opts = {'uncompress': True} + # Here we use the file that provides URL for all others + url = "https://figshare.com/api/articles/1160600/15/files" dataset_name = 'cobre' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) - fdescr = _get_dataset_descr(dataset_name) + + # First, fetch the file that references all individual URLs + files = _fetch_files(data_dir, + [("files", url + "?offset=0&limit=300", {})], + verbose=verbose)[0] + files = json.load(open(files, 'r')) + + # Index files by name + files_ = {} + for f in files: + files_[f['name']] = f + files = files_ + # Fetch the phenotypic file and load it csv_name = 'cobre_model_group.csv' + csv_file = _fetch_files(data_dir, [(csv_name, + files[csv_name]['downloadUrl'], + {'md5': files[csv_name]['md5'], + 'move': csv_name})], + verbose=verbose)[0] - csv_file = _fetch_files(data_dir, [(csv_name, url, opts)], verbose=verbose) # Load file in filename to numpy arrays - names = ['subject_type', 'sz', 'age', 'sex', 'fd'] - csv_array = np.recfromcsv(csv_file[0], names=names, skip_header=True) - # Get the ids of the datasets - ids = csv_array['subject_type'] - max_subjects = len(ids) + names = ['id', 'sz', 'age', 'sex', 'fd'] + csv_array = np.recfromcsv(csv_file, names=names, skip_header=True) + # Change dtype of id and condition column + csv_array = csv_array.astype( + [('id', '|U17'), + ('sz', 'bool'), + ('age', ' Date: Wed, 17 Feb 2016 14:32:58 +0100 Subject: [PATCH 0176/1925] Make tests pass --- nilearn/_utils/testing.py | 2 +- nilearn/datasets/func.py | 46 +++++++++++++++-------------- nilearn/datasets/tests/test_func.py | 41 ++++++++++++++++++++----- 3 files changed, 59 insertions(+), 30 deletions(-) diff --git a/nilearn/_utils/testing.py b/nilearn/_utils/testing.py index 51c1ff55f1..a90751257a 100644 --- a/nilearn/_utils/testing.py +++ b/nilearn/_utils/testing.py @@ -260,7 +260,7 @@ def __init__(self): def add_csv(self, filename, content): self.csv_files[filename] = content - + def __call__(self, *args, **kwargs): """Load requested dataset, downloading it if needed or requested. diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 95ab5e4846..db50cd378c 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1484,7 +1484,6 @@ def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): [("files", url + "?offset=0&limit=300", {})], verbose=verbose)[0] files = json.load(open(files, 'r')) - # Index files by name files_ = {} for f in files: @@ -1493,11 +1492,11 @@ def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): # Fetch the phenotypic file and load it csv_name = 'cobre_model_group.csv' - csv_file = _fetch_files(data_dir, [(csv_name, - files[csv_name]['downloadUrl'], - {'md5': files[csv_name]['md5'], - 'move': csv_name})], - verbose=verbose)[0] + csv_file = _fetch_files( + data_dir, [(csv_name, files[csv_name]['downloadUrl'], + {'md5': files[csv_name].get('md5', None), + 'move': csv_name})], + verbose=verbose)[0] # Load file in filename to numpy arrays names = ['id', 'sz', 'age', 'sex', 'fd'] @@ -1507,7 +1506,7 @@ def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): [('id', '|U17'), ('sz', 'bool'), ('age', ' Date: Thu, 18 Feb 2016 23:43:38 +0100 Subject: [PATCH 0177/1925] Fix comments --- nilearn/datasets/func.py | 2 +- nilearn/datasets/tests/test_func.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index db50cd378c..45085aceb7 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1504,7 +1504,7 @@ def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): # Change dtype of id and condition column csv_array = csv_array.astype( [('id', '|U17'), - ('sz', 'bool'), + ('sz', ' Date: Fri, 19 Feb 2016 00:11:09 +0100 Subject: [PATCH 0178/1925] Replace fetch_atlas_{power,dosenbach} by fetch_coords --- doc/modules/reference.rst | 4 ++-- nilearn/datasets/__init__.py | 10 ++++++---- nilearn/datasets/atlas.py | 16 +++++++++++----- nilearn/datasets/tests/test_atlas.py | 8 ++++---- 4 files changed, 23 insertions(+), 15 deletions(-) diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index bc70465f5f..aef801682f 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -66,12 +66,12 @@ uses. fetch_atlas_destrieux_2009 fetch_atlas_harvard_oxford fetch_atlas_msdl - fetch_atlas_power_2011 + fetch_coords_power_2011 fetch_atlas_smith_2009 fetch_atlas_yeo_2011 fetch_atlas_aal fetch_atlas_basc_multiscale_2015 - fetch_atlas_dosenbach_2010 + fetch_coords_dosenbach_2010 fetch_abide_pcp fetch_adhd fetch_haxby diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index 0f3f9c4ea8..8df7de6173 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -10,10 +10,11 @@ fetch_megatrawls_netmats, fetch_cobre) from .atlas import (fetch_atlas_craddock_2012, fetch_atlas_destrieux_2009, fetch_atlas_harvard_oxford, fetch_atlas_msdl, - fetch_atlas_power_2011, fetch_atlas_smith_2009, + fetch_atlas_power_2011, fetch_coords_power_2011, + fetch_atlas_smith_2009, fetch_atlas_yeo_2011, fetch_atlas_aal, fetch_atlas_basc_multiscale_2015, - fetch_atlas_dosenbach_2010) + fetch_coords_dosenbach_2010) __all__ = ['fetch_icbm152_2009', 'load_mni152_template', 'fetch_oasis_vbm', 'fetch_haxby_simple', 'fetch_haxby', 'fetch_nyu_rest', @@ -21,7 +22,8 @@ 'fetch_abide_pcp', 'fetch_localizer_calculation_task', 'fetch_atlas_craddock_2012', 'fetch_atlas_destrieux_2009', 'fetch_atlas_harvard_oxford', 'fetch_atlas_msdl', - 'fetch_atlas_power_2011', 'fetch_atlas_smith_2009', + 'fetch_atlas_power_2011', 'fetch_coords_power_2011', + 'fetch_atlas_smith_2009', 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', 'fetch_megatrawls_netmats', 'fetch_cobre', - 'fetch_atlas_basc_multiscale_2015', 'fetch_atlas_dosenbach_2010'] + 'fetch_atlas_basc_multiscale_2015', 'fetch_coords_dosenbach_2010'] diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index a620910a26..35b00a618b 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -4,9 +4,9 @@ import os import xml.etree.ElementTree import numpy as np -from scipy import ndimage from sklearn.datasets.base import Bunch +from sklearn.utils import deprecated #from . import utils from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr @@ -332,7 +332,13 @@ def fetch_atlas_msdl(data_dir=None, url=None, resume=True, verbose=1): return Bunch(labels=files[0], maps=files[1], description=fdescr) +@deprecated('This function has been replace by fetch_coords_power_2011 and ' + 'will be removed in nilearn 0.2.5') def fetch_atlas_power_2011(): + return fetch_coords_power_2011() + + +def fetch_coords_power_2011(): """Download and load the Power et al. brain atlas composed of 264 ROIs. Returns @@ -366,9 +372,9 @@ def fetch_atlas_smith_2009(data_dir=None, mirror='origin', url=None, Path of the data directory. Used to force data storage in a non- standard location. Default: None (meaning: default) mirror: string, optional - By default, the dataset is downloaded from the original website of the atlas. - Specifying "nitrc" will force download from a mirror, with potentially - higher bandwith. + By default, the dataset is downloaded from the original website of the + atlas. Specifying "nitrc" will force download from a mirror, with + potentially higher bandwith. url: string, optional Download URL of the dataset. Overwrite the default URL. @@ -726,7 +732,7 @@ def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, return Bunch(**params) -def fetch_atlas_dosenbach_2010(): +def fetch_coords_dosenbach_2010(): """Load the Dosenbach et al. 160 ROIs. These ROIs cover much of the cerebral cortex and cerebellum and are assigned to 6 networks. diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index ae7477b274..f9d2ea38dc 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -192,8 +192,8 @@ def test_fetch_atlas_smith_2009(): assert_not_equal(bunch.description, '') -def test_fetch_atlas_power_2011(): - bunch = atlas.fetch_atlas_power_2011() +def test_fetch_coords_power_2011(): + bunch = atlas.fetch_coords_power_2011() assert_equal(len(bunch.rois), 264) assert_not_equal(bunch.description, '') @@ -312,8 +312,8 @@ def test_fetch_atlas_basc_multiscale_2015(): assert_not_equal(data_asym.description, '') -def test_fetch_atlas_dosenbach_2010(): - bunch = atlas.fetch_atlas_dosenbach_2010() +def test_fetch_coords_dosenbach_2010(): + bunch = atlas.fetch_coords_dosenbach_2010() assert_equal(len(bunch.rois), 160) assert_equal(len(bunch.labels), 160) assert_equal(len(np.unique(bunch.networks)), 6) From 0ba52aeefc81a398a23ef4fb1bfaa802243414f6 Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Fri, 19 Feb 2016 13:09:27 +0100 Subject: [PATCH 0179/1925] DOC: point to FSL/SPM equivalents --- nilearn/image/image.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 0e1838e751..1c517d10b4 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -706,6 +706,12 @@ def math_img(formula, **imgs): >>> result_img = math_img("img1 + img2", ... img1=anatomical_image, img2=log_img) + Notes + ----- + + This function is the Python equivalent of ImCal in SPM or fslmaths + in FSL. + """ try: # Check that input images are valid niimg and have a compatible shape From adc0ecf7aeeca7bf6f8cb6654c4593e69448f383 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Fri, 19 Feb 2016 17:55:39 +0100 Subject: [PATCH 0180/1925] Update what's new for release --- doc/whats_new.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 75b1712c38..eef4c1efa4 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -4,15 +4,23 @@ Changelog --------- +The 0.2.3 is a small feature release for BrainHack 2016. + New features ............ - Mathematical formulas based on numpy functions can be applied on an image or a list of images using :func:`nilearn.image.math_img`. - Downloader for COBRE datasets of 146 rest fMRI subjects with :func:`nilearn.datasets.fetch_cobre` + - Downloader for Dosenbach atlas + :func:`nilearn.datasets.fetch_coords_dosenbach_2010` - Fetcher for multiscale functional brain parcellations (BASC) :func:`nilearn.datasets.fetch_atlas_basc_multiscale_2015` +Bug fixes +......... + - Better dimming on white background for plotting + 0.2.2 ====== From 96a3f0f72b4f25af771116251324cbec4c0d2055 Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Fri, 19 Feb 2016 18:04:46 +0100 Subject: [PATCH 0181/1925] Bump version --- nilearn/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/version.py b/nilearn/version.py index e0b77a19b0..551451b96d 100644 --- a/nilearn/version.py +++ b/nilearn/version.py @@ -21,7 +21,7 @@ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # -__version__ = '0.2.2' +__version__ = '0.2.3' _NILEARN_INSTALL_MSG = 'See %s for installation information.' % ( 'http://nilearn.github.io/introduction.html#installation') From 65553a82c47546647b946164b7ff0ef8ddd0348e Mon Sep 17 00:00:00 2001 From: Alexandre Abraham Date: Sat, 20 Feb 2016 10:29:40 +0100 Subject: [PATCH 0182/1925] Update version on website news --- doc/themes/nilearn/layout.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/themes/nilearn/layout.html b/doc/themes/nilearn/layout.html index 8822cd5dcd..c3bc653e24 100644 --- a/doc/themes/nilearn/layout.html +++ b/doc/themes/nilearn/layout.html @@ -195,7 +195,7 @@

    Machine learning for Neuro-Imaging in Python

    News