diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000..6984730035 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,63 @@ +version: 2 + +jobs: + build: + docker: + - image: circleci/python:3.6 + environment: + DISTRIB: "conda" + PYTHON_VERSION: "3.6" + NUMPY_VERSION: "*" + SCIPY_VERSION: "*" + SCIKIT_LEARN_VERSION: "*" + MATPLOTLIB_VERSION: "*" + + steps: + - checkout + # Get rid of existing virtualenvs on circle ci as they conflict with conda. + # Trick found here: + # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 + - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs + # We need to remove conflicting texlive packages. + - run: sudo -E apt-get -yq remove texlive-binaries --purge + # Installing required packages for `make -C doc check command` to work. + - run: sudo -E apt-get -yq update + - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra + - restore_cache: + key: v1-packages+datasets-{{ .Branch }} + - run: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh + - run: chmod +x ~/miniconda.sh && ~/miniconda.sh -b + - run: + name: Setup conda path in env variables + command: | + echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV + - run: + name: Create conda env + command: | + conda create -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ + lxml mkl sphinx pillow pandas -yq + conda install -n testenv nibabel -c conda-forge -yq + - run: + name: Running CircleCI test (make html) + command: | + source activate testenv + pip install -e . + set -o pipefail && cd doc && make html-strict 2>&1 | tee ~/log.txt + no_output_timeout: 5h + - save_cache: + key: v1-packages+datasets-{{ .Branch }} + paths: + - $HOME/nilearn_data + - $HOME/miniconda3 + + - store_artifacts: + path: doc/_build/html + - store_artifacts: + path: coverage + - store_artifacts: + path: $HOME/log.txt + destination: log.txt + + + + diff --git a/.gitignore b/.gitignore index aa06e3863d..4beefc864a 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,5 @@ tags *.tgz .idea/ + +doc/themes/nilearn/static/jquery.js \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 9efc43c123..7572544ac5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,27 +20,33 @@ matrix: include: # Oldest supported versions (with neurodebian) - env: DISTRIB="conda" PYTHON_VERSION="2.7" - NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.14" - SCIKIT_LEARN_VERSION="0.15.1" MATPLOTLIB_VERSION="1.3.1" - PANDAS_VERSION="0.13.0" NIBABEL_VERSION="2.0.2" COVERAGE="true" + NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.17" + SCIKIT_LEARN_VERSION="0.18" MATPLOTLIB_VERSION="1.5.1" + PANDAS_VERSION="0.18.0" NIBABEL_VERSION="2.0.2" COVERAGE="true" # Oldest supported versions without matplotlib - env: DISTRIB="conda" PYTHON_VERSION="2.7" - NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.14" - SCIKIT_LEARN_VERSION="0.15" + NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.17" + SCIKIT_LEARN_VERSION="0.18" # Fake Ubuntu Xenial (Travis doesn't support Xenial yet) - env: DISTRIB="conda" PYTHON_VERSION="2.7" - NUMPY_VERSION="1.11" SCIPY_VERSION="0.17" - SCIKIT_LEARN_VERSION="0.17" + NUMPY_VERSION="1.13" SCIPY_VERSION="0.19" + SCIKIT_LEARN_VERSION="0.18.1" NIBABEL_VERSION="2.0.2" # Python 3.4 with intermediary versions - env: DISTRIB="conda" PYTHON_VERSION="3.4" - NUMPY_VERSION="1.8" SCIPY_VERSION="0.14" - SCIKIT_LEARN_VERSION="0.15" MATPLOTLIB_VERSION="1.4" + NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.17" + SCIKIT_LEARN_VERSION="0.18" MATPLOTLIB_VERSION="1.5.1" # Most recent versions - env: DISTRIB="conda" PYTHON_VERSION="3.5" NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" - # FLAKE8 linting on diff wrt common ancestor with upstream/master + LXML_VERSION="*" + - env: DISTRIB="conda" PYTHON_VERSION="3.7" + NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" + SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" + LXML_VERSION="*" + + # FLAKE8 linting on diff wrt common ancestor with upstream/master # Note: the python value is only there to trigger allow_failures - python: 2.7 env: DISTRIB="conda" PYTHON_VERSION="2.7" FLAKE8_VERSION="*" SKIP_TESTS="true" diff --git a/AUTHORS.rst b/AUTHORS.rst index 516858ed0c..c67751492c 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -24,6 +24,8 @@ particular: * Andres Hoyos Idrobo * Salma Bougacha * Mehdi Rahim +* Sylvain Lanuzel +* `Kshitij Chawla `_ Many of also contributed outside of Parietal, notably: @@ -43,6 +45,8 @@ Mehdi Rahim, Philippe Gervais where payed by the `NiConnect project, funded by the French `Investissement d'Avenir `_. +NiLearn is also supported by `DigiCosme `_ |digicomse logo| + .. _citing: Citing nilearn @@ -69,3 +73,7 @@ guarantee the future of the toolkit, if you use it, please cite it. See the scikit-learn documentation on `how to cite `_. + +.. |digicomse logo| image:: logos/digi-saclay-logo-small.png + :height: 25 + :alt: DigiComse Logo \ No newline at end of file diff --git a/README.rst b/README.rst index 382a062817..7885ed878b 100644 --- a/README.rst +++ b/README.rst @@ -40,13 +40,13 @@ The required dependencies to use the software are: * Python >= 2.7, * setuptools -* Numpy >= 1.6.1 -* SciPy >= 0.14 -* Scikit-learn >= 0.15 +* Numpy >= 1.11 +* SciPy >= 0.17 +* Scikit-learn >= 0.18 * Nibabel >= 2.0.2 If you are using nilearn plotting functionalities or running the -examples, matplotlib >= 1.1.1 is required. +examples, matplotlib >= 1.5.1 is required. If you want to run the tests, you need nose >= 1.2.1 and coverage >= 3.6. diff --git a/circle.yml b/circle.yml deleted file mode 100644 index e9ab38b13f..0000000000 --- a/circle.yml +++ /dev/null @@ -1,47 +0,0 @@ -machine: - environment: - PATH: /home/ubuntu/miniconda2/bin:$PATH - -dependencies: - cache_directories: - - "~/nilearn_data" - - pre: - # Get rid of existing virtualenvs on circle ci as they conflict with conda. - # Trick found here: - # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 - - cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs - # We need to remove conflicting texlive packages. - - sudo -E apt-get -yq remove texlive-binaries --purge - # Installing required packages for `make -C doc check command` to work. - - sudo -E apt-get -yq update - - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - - override: - # Moving to nilearn directory before performing the installation. - - cd ~/nilearn - - source continuous_integration/install.sh: - environment: - DISTRIB: "conda" - PYTHON_VERSION: "3.5" - NUMPY_VERSION: "*" - SCIPY_VERSION: "*" - SCIKIT_LEARN_VERSION: "*" - MATPLOTLIB_VERSION: "*" - - conda install sphinx coverage pillow pandas -y -n testenv - - # Generating html documentation (with warnings as errors) - # we need to do this here so the datasets will be cached - - source continuous_integration/circle_ci_test_doc.sh: - timeout: 2500 # seconds - -test: - override: - # override is needed otherwise nosetests is run by default - - echo "Documentation has been built in the 'dependencies' step. No additional test to run" - -general: - artifacts: - - "doc/_build/html" - - "coverage" - - "~/log.txt" diff --git a/continuous_integration/circle_ci_test_doc.sh b/continuous_integration/circle_ci_test_doc.sh deleted file mode 100644 index 3d74fc78c3..0000000000 --- a/continuous_integration/circle_ci_test_doc.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!bin/bash - -# on circle ci, each command run with it's own execution context so we have to -# activate the conda testenv on a per command basis. That's why we put calls to -# python (conda) in a dedicated bash script and we activate the conda testenv -# here. -source activate testenv - -# pipefail is necessary to propagate exit codes -set -o pipefail && cd doc && make html-strict 2>&1 | tee ~/log.txt diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index 7615f7f87c..512cbdf2f3 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -35,7 +35,8 @@ print_conda_requirements() { # - for scikit-learn, SCIKIT_LEARN_VERSION is used TO_INSTALL_ALWAYS="pip nose" REQUIREMENTS="$TO_INSTALL_ALWAYS" - TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn pandas flake8" + TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn pandas \ +flake8 lxml" for PACKAGE in $TO_INSTALL_MAYBE; do # Capitalize package name and add _VERSION PACKAGE_VERSION_VARNAME="${PACKAGE^^}_VERSION" @@ -61,10 +62,10 @@ create_new_conda_env() { # Use the miniconda installer for faster download / install of conda # itself - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \ + wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ -O ~/miniconda.sh chmod +x ~/miniconda.sh && ~/miniconda.sh -b - export PATH=$HOME/miniconda2/bin:$PATH + export PATH=$HOME/miniconda3/bin:$PATH echo $PATH conda update --quiet --yes conda diff --git a/doc/conf.py b/doc/conf.py index 475928cf07..baa278440e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -14,9 +14,30 @@ import sys import os +import shutil import sphinx from distutils.version import LooseVersion +# jquery is included in plotting package data because it is needed for +# interactive plots. It is also needed by the documentation, so we copy +# it to the themes/nilearn/static folder. +shutil.copy( + os.path.join(os.path.dirname(os.path.dirname(__file__)), + 'nilearn', 'plotting', 'data', 'js', 'jquery.min.js'), + os.path.join(os.path.dirname(__file__), 'themes', 'nilearn', 'static', + 'jquery.js')) + + +# -- Parallel computing ------------------------------------------------------ +try: + from sklearn.utils import parallel_backend, cpu_count + parallel_backend(max(cpu_count, 4)) +except: + pass + +# ---------------------------------------------------------------------------- + + # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory # is relative to the documentation root, use os.path.abspath to make it @@ -294,10 +315,10 @@ 'reference_url' : { 'nilearn': None, 'matplotlib': 'http://matplotlib.org', - 'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0', - 'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference', + 'numpy': 'http://docs.scipy.org/doc/numpy-1.11.0', + 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference', 'nibabel': 'http://nipy.org/nibabel', - 'sklearn': 'http://scikit-learn.org/0.17/', + 'sklearn': 'http://scikit-learn.org/0.18/', 'pandas': 'http://pandas.pydata.org'} } @@ -319,6 +340,8 @@ def touch_example_backreferences(app, what, name, obj, options, lines): # Add the 'copybutton' javascript, to hide/show the prompt in code # examples + + def setup(app): app.add_javascript('copybutton.js') app.connect('autodoc-process-docstring', touch_example_backreferences) diff --git a/doc/connectivity/functional_connectomes.rst b/doc/connectivity/functional_connectomes.rst index 4e96db8e0e..e816d1a081 100644 --- a/doc/connectivity/functional_connectomes.rst +++ b/doc/connectivity/functional_connectomes.rst @@ -205,29 +205,37 @@ In the case of the MSDL atlas with MNI coordinates for each region (see for instance example: :ref:`sphx_glr_auto_examples_03_connectivity_plot_probabilistic_atlas_extraction.py`). +.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_002.png + :target: ../auto_examples/03_connectivity/plot_probabilistic_atlas_extraction.html + .. For doctesting >>> from nilearn import datasets >>> atlas_filename = datasets.fetch_atlas_msdl().maps # doctest: +SKIP -For another atlas this information can be computed for each region with -the :func:`nilearn.plotting.find_xyz_cut_coords` function -(see example: -:ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py`):: +As you can see, the correlation matrix gives a very "full" graph: every +node is connected to every other one. This is because it also captures +indirect connections. In the next section we will see how to focus on +direct connections only. - >>> from nilearn import image, plotting - >>> atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in image.iter_img(atlas_filename)] # doctest: +SKIP +A functional connectome: extracting coordinates of regions +========================================================== +For atlases without readily available label coordinates, center coordinates +can be computed for each region on hard parcellation or probabilistic atlases. + * For hard parcellation atlases (eg. :func:`nilearn.datasets.fetch_atlas_destrieux_2009`), + use the :func:`nilearn.plotting.find_parcellation_cut_coords` + function. See example: + :ref:`sphx_glr_auto_examples_03_connectivity_plot_atlas_comparison.py` + * For probabilistic atlases (eg. :func:`nilearn.datasets.fetch_atlas_msdl`), use the + :func:`nilearn.plotting.find_probabilistic_atlas_cut_coords` function. + See example: :ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py`:: -.. image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_probabilistic_atlas_extraction_002.png - :target: ../auto_examples/03_connectivity/plot_probabilistic_atlas_extraction.html + >>> from nilearn import plotting + >>> atlas_region_coords = plotting.find_probabilistic_atlas_cut_coords(atlas_filename) # doctest: +SKIP -As you can see, the correlation matrix gives a very "full" graph: every -node is connected to every other one. This is because it also captures -indirect connections. In the next section we will see how to focus on -only direct connections. | diff --git a/doc/decoding/decoding_intro.rst b/doc/decoding/decoding_intro.rst index bf7a6b4b36..4b92f5ff76 100644 --- a/doc/decoding/decoding_intro.rst +++ b/doc/decoding/decoding_intro.rst @@ -224,10 +224,10 @@ in a `K-Fold strategy >>> cv = 2 There is a specific function, -:func:`sklearn.cross_validation.cross_val_score` that computes for you +:func:`sklearn.model_selection.cross_val_score` that computes for you the score for the different folds of cross-validation:: - >>> from sklearn.cross_validation import cross_val_score # doctest: +SKIP + >>> from sklearn.model_selection import cross_val_score # doctest: +SKIP >>> cv_scores = cross_val_score(svc, fmri_masked, target, cv=5) # doctest: +SKIP `cv=5` stipulates a 5-fold cross-validation. Note that this function is located @@ -267,7 +267,7 @@ caveats, and guidelines*, Neuroimage 2017). Here, in the Haxby example, we are going to leave a session out, in order to have a test set independent from the train set. For this, we are going to use the session label, present in the behavioral data file, and -:class:`sklearn.cross_validation.LeaveOneLabelOut`. +:class:`sklearn.model_selection.LeaveOneGroupOut`. .. note:: @@ -320,9 +320,9 @@ at chance, is to use a *"dummy"* classifier, **Permutation testing**: A more controlled way, but slower, is to do permutation testing on the labels, with -:func:`sklearn.cross_validation.permutation_test_score`:: +:func:`sklearn.model_selection.permutation_test_score`:: - >>> from sklearn.cross_validation import permutation_test_score + >>> from sklearn.model_selection import permutation_test_score >>> null_cv_scores = permutation_test_score(svc, fmri_masked, target, cv=cv) # doctest: +SKIP | diff --git a/doc/decoding/estimator_choice.rst b/doc/decoding/estimator_choice.rst index 0b4a33388c..78490a0f6b 100644 --- a/doc/decoding/estimator_choice.rst +++ b/doc/decoding/estimator_choice.rst @@ -115,7 +115,7 @@ not perform as well on new data. :scale: 60 With scikit-learn nested cross-validation is done via -:class:`sklearn.grid_search.GridSearchCV`. It is unfortunately time +:class:`sklearn.model_selection.GridSearchCV`. It is unfortunately time consuming, but the ``n_jobs`` argument can spread the load on multiple CPUs. diff --git a/doc/decoding/searchlight.rst b/doc/decoding/searchlight.rst index dc7e419fb5..a5a8ddba97 100644 --- a/doc/decoding/searchlight.rst +++ b/doc/decoding/searchlight.rst @@ -113,7 +113,7 @@ Kriegskorte et al. use a 5.6mm radius because it yielded the best detection performance in their simulation. .. literalinclude:: ../../examples/02_decoding/plot_haxby_searchlight.py - :start-after: import nilearn.decoding + :start-after: cv = KFold(n_splits=4) :end-before: # F-scores computation Visualization diff --git a/doc/developers/group_sparse_covariance.rst b/doc/developers/group_sparse_covariance.rst index da3cf7767e..56e15e1c45 100644 --- a/doc/developers/group_sparse_covariance.rst +++ b/doc/developers/group_sparse_covariance.rst @@ -135,7 +135,7 @@ Synthetic dataset ================= For testing purposes, a function for synthesis of signals based on sparse precision matrices has been written: -`nilearn._utils.testing.generate_group_sparse_gaussian_graphs`. +`nilearn._utils.data_gen.generate_group_sparse_gaussian_graphs`. Synthesizing such signals is a hard problem that wasn't solved in the present implementation. It is hopefully good enough. @@ -166,7 +166,7 @@ precise location of zeros. Two different sparsity patterns with the same number of zeros can lead to two significantly different sparsity level in precision matrices. In practice, it means that for a given value of the `density` parameter in -`nilearn._utils.testing.generate_group_sparse_gaussian_graphs`, +`nilearn._utils.data_gen.generate_group_sparse_gaussian_graphs`, the actual number of zeros in the precision matrices can fluctuate widely depending on the random number generation. diff --git a/doc/images/papaya_stat_map_plot_screenshot.png b/doc/images/papaya_stat_map_plot_screenshot.png new file mode 100644 index 0000000000..7950348745 Binary files /dev/null and b/doc/images/papaya_stat_map_plot_screenshot.png differ diff --git a/doc/images/papaya_stat_map_plot_screenshot_notebook.png b/doc/images/papaya_stat_map_plot_screenshot_notebook.png new file mode 100644 index 0000000000..b703dc597b Binary files /dev/null and b/doc/images/papaya_stat_map_plot_screenshot_notebook.png differ diff --git a/doc/images/plotly_connectome_plot.png b/doc/images/plotly_connectome_plot.png new file mode 100644 index 0000000000..e56d9b47eb Binary files /dev/null and b/doc/images/plotly_connectome_plot.png differ diff --git a/doc/images/plotly_markers_plot.png b/doc/images/plotly_markers_plot.png new file mode 100644 index 0000000000..be0e34d3cb Binary files /dev/null and b/doc/images/plotly_markers_plot.png differ diff --git a/doc/images/plotly_surface_atlas_plot.png b/doc/images/plotly_surface_atlas_plot.png new file mode 100644 index 0000000000..44058d0f66 Binary files /dev/null and b/doc/images/plotly_surface_atlas_plot.png differ diff --git a/doc/images/plotly_surface_plot.png b/doc/images/plotly_surface_plot.png new file mode 100644 index 0000000000..3a9b357009 Binary files /dev/null and b/doc/images/plotly_surface_plot.png differ diff --git a/doc/images/plotly_surface_plot_notebook_screenshot.png b/doc/images/plotly_surface_plot_notebook_screenshot.png new file mode 100644 index 0000000000..38f72c8c3c Binary files /dev/null and b/doc/images/plotly_surface_plot_notebook_screenshot.png differ diff --git a/doc/install_doc_component.html b/doc/install_doc_component.html index a81b5869f5..c6584d3961 100644 --- a/doc/install_doc_component.html +++ b/doc/install_doc_component.html @@ -46,14 +46,14 @@

We recommend that you install a complete 64 bit scientific Python distribution like Anaconda + https://www.anaconda.com/download/" target="_blank">Anaconda . Since it meets all the requirements of nilearn, it will save you time and trouble. You could also check PythonXY as an alternative.

Nilearn requires a Python installation and the following - dependencies: ipython, scikit-learn, matplotlib and nibabel.

+ dependencies: ipython, scipy, scikit-learn, matplotlib and nibabel.

Second: open a Command Prompt

(Press "Win-R", type "cmd" and press "Enter". This will open @@ -72,18 +72,18 @@

  • First: download and install 64 bit Anaconda

    We recommend that you install a complete 64 bit scientific Python distribution like + href="https://www.anaconda.com/download/" target="_blank"> Anaconda. Since it meets all the requirements of nilearn, it will save you time and trouble.

    Nilearn requires a Python installation and the following - dependencies: ipython, scikit-learn, matplotlib and nibabel.

    + dependencies: ipython, scipy, scikit-learn, matplotlib and nibabel.

    Second: open a Terminal

    (Navigate to /Applications/Utilities and double-click on @@ -113,7 +113,7 @@

    Install or ask your system administrator to install the following packages using the distribution package manager: ipython - , scikit-learn (sometimes called sklearn, + , scipy, scikit-learn (sometimes called sklearn, or python-sklearn), matplotlib (sometimes called python-matplotlib) and nibabel (sometimes called python-nibabel).

    @@ -122,7 +122,7 @@ that you install a complete 64 bit scientific Python distribution like + href="https://www.anaconda.com/download/" target="_blank"> Anaconda. Since it meets all the requirements of nilearn, it will save you time and trouble.

    diff --git a/doc/logos/digi-saclay-logo-small.png b/doc/logos/digi-saclay-logo-small.png new file mode 100644 index 0000000000..2190fc5a51 Binary files /dev/null and b/doc/logos/digi-saclay-logo-small.png differ diff --git a/doc/manipulating_images/manipulating_images.rst b/doc/manipulating_images/manipulating_images.rst index e338d985b8..57993de8ad 100644 --- a/doc/manipulating_images/manipulating_images.rst +++ b/doc/manipulating_images/manipulating_images.rst @@ -146,6 +146,8 @@ Computing and applying spatial masks Relevant functions: * compute a mask from EPI images: :func:`nilearn.masking.compute_epi_mask` +* compute a grey-matter mask using the MNI template: + :func:`nilearn.masking.compute_gray_matter_mask`. * compute a mask from images with a flat background: :func:`nilearn.masking.compute_background_mask` * compute for multiple sessions/subjects: @@ -166,16 +168,18 @@ can be computed from the data: the brain stands out of a constant background. This is typically the case when working on statistic maps output after a brain extraction - :func:`nilearn.masking.compute_epi_mask` for EPI images +- :func:`nilearn.masking.compute_gray_matter_mask` to compute a + gray-matter mask using the MNI template. -.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_visualization_002.png - :target: ../auto_examples/01_plotting/plot_visualization.html - :align: right - :scale: 50% .. literalinclude:: ../../examples/01_plotting/plot_visualization.py - :start-after: # Extracting a brain mask + :start-after: # Simple computation of a mask from the fMRI data :end-before: # Applying the mask to extract the corresponding time series +.. figure:: ../auto_examples/01_plotting/images/sphx_glr_plot_visualization_002.png + :target: ../auto_examples/01_plotting/plot_visualization.html + :scale: 50% + .. _mask_4d_2_3d: diff --git a/doc/manipulating_images/masker_objects.rst b/doc/manipulating_images/masker_objects.rst index c1dad572aa..ac8e774678 100644 --- a/doc/manipulating_images/masker_objects.rst +++ b/doc/manipulating_images/masker_objects.rst @@ -133,56 +133,37 @@ Alternatively, the mask computation parameters can still be modified. See the :class:`NiftiMasker` documentation for a complete list of mask computation parameters. -As a first example, we will now automatically build a mask from a dataset. -We will here use the Haxby dataset because it provides the original mask that -we can compare the data-derived mask against. - -Generate a mask with default parameters and visualize it (it is in the -`mask_img_` attribute of the masker): +The mask can be retrieved and visualized from the `mask_img_` attribute +of the masker: .. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py - :start-after: # Simple mask extraction from EPI images + :start-after: # We need to specify an 'epi' mask_strategy, as this is raw EPI data :end-before: # Generate mask with strong opening -.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_002.png - :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html - :scale: 50% - -Changing mask parameters: opening, cutoff -.......................................... - -We can then fine-tune the outline of the mask by increasing the number of -opening steps (`opening=10`) using the `mask_args` argument of the -:class:`NiftiMasker`. This effectively performs erosion and dilation operations -on the outer voxel layers of the mask, which can for example remove remaining -skull parts in the image. - -.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py - :start-after: # Generate mask with strong opening - :end-before: # Generate mask with a high lower cutoff - - -.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_003.png +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_004.png :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html :scale: 50% +Different masking strategies +............................. -Looking at the :func:`nilearn.masking.compute_epi_mask` called by the -:class:`NiftiMasker` object, we see two interesting parameters: -`lower_cutoff` and `upper_cutoff`. These set the grey-value bounds in -which the masking algorithm will search for its threshold -(0 being the minimum of the image and 1 the maximum). We will here increase -the lower cutoff to enforce selection of those -voxels that appear as bright in the EPI image. +The `mask_strategy` argument controls how the mask is computed: +* `background`: detects a continuous background +* `epi`: suitable for EPI images +* `template`: uses an MNI grey-matter template -.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py - :start-after: # Generate mask with a high lower cutoff - :end-before: ############################################################################### +Extra mask parameters: opening, cutoff... +.......................................... +The underlying function is :func:`nilearn.masking.compute_epi_mask` +called using the `mask_args` argument of the :class:`NiftiMasker`. +Controling these arguments set the fine aspects of the mask. See the +functions documentation, or :doc:`the NiftiMasker example +<../auto_examples/04_manipulating_images/plot_mask_computation>`. -.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_004.png +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_005.png :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html :scale: 50% @@ -196,17 +177,30 @@ preparation:: >>> from nilearn import input_data >>> masker = input_data.NiftiMasker() - >>> masker - NiftiMasker(detrend=False, high_pass=None, low_pass=None, mask_args=None, - mask_img=None, mask_strategy='background', - memory=Memory(cachedir=None), memory_level=1, sample_mask=None, + >>> masker # doctest: +ELLIPSIS + NiftiMasker(detrend=False, dtype=None, high_pass=None, low_pass=None, + mask_args=None, mask_img=None, mask_strategy='background', + memory=Memory(...), memory_level=1, sample_mask=None, sessions=None, smoothing_fwhm=None, standardize=False, t_r=None, target_affine=None, target_shape=None, verbose=0) +.. note:: + + From scikit-learn 0.20, the argument `cachedir` is deprecated in + favour of `location`. Hence `cachedir` might not be seen as here. + The meaning of each parameter is described in the documentation of :class:`NiftiMasker` (click on the name :class:`NiftiMasker`), here we comment on the most important. +.. topic:: **`dtype` argument** + + Forcing your data to have a `dtype` of **float32** can help + save memory and is often a good-enough numerical precision. + You can force this cast by choosing `dtype` to be 'auto'. + In the future this cast will be the default behaviour. + + .. seealso:: If you do not want to use the :class:`NiftiMasker` to perform these @@ -308,14 +302,14 @@ Inverse transform: unmasking data Once voxel signals have been processed, the result can be visualized as images after unmasking (masked-reduced data transformed back into -the original whole-brain space). This step is present in almost all -the :ref:`examples ` provided in nilearn. Below you will find +the original whole-brain space). This step is present in many +:ref:`examples ` provided in nilearn. Below you will find an excerpt of :ref:`the example performing Anova-SVM on the Haxby data `): .. literalinclude:: ../../examples/02_decoding/plot_haxby_anova_svm.py :start-after: # Look at the SVC's discriminating weights - :end-before: # Create the figure + :end-before: # Use the mean image as a background | diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index a03b51fb29..a3b0f3856e 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -76,6 +76,7 @@ uses. fetch_atlas_aal fetch_atlas_basc_multiscale_2015 fetch_atlas_allen_2011 + fetch_atlas_pauli_2017 fetch_coords_dosenbach_2010 fetch_abide_pcp fetch_adhd @@ -88,7 +89,7 @@ uses. fetch_miyawaki2008 fetch_nyu_rest fetch_surf_nki_enhanced - fetch_surf_fsaverage5 + fetch_surf_fsaverage fetch_atlas_surf_destrieux fetch_atlas_talairach fetch_oasis_vbm @@ -96,6 +97,8 @@ uses. fetch_cobre fetch_neurovault fetch_neurovault_ids + fetch_neurovault_auditory_computation_task + fetch_neurovault_motor_task get_data_dirs load_mni152_template load_mni152_brain_mask @@ -224,6 +227,8 @@ uses. compute_epi_mask compute_multi_epi_mask + compute_gray_matter_mask + compute_multi_gray_matter_mask compute_background_mask compute_multi_background_mask intersect_masks @@ -305,6 +310,8 @@ uses. find_cut_slices find_xyz_cut_coords + find_parcellation_cut_coords + find_probabilistic_atlas_cut_coords plot_anat plot_img plot_epi @@ -317,6 +324,11 @@ uses. plot_surf plot_surf_roi plot_surf_stat_map + view_surf + view_img_on_surf + view_connectome + view_markers + view_stat_map show **Classes**: diff --git a/doc/plotting/index.rst b/doc/plotting/index.rst index 90f24d68b1..3684fa9ad6 100644 --- a/doc/plotting/index.rst +++ b/doc/plotting/index.rst @@ -97,6 +97,11 @@ different heuristics to find cutting coordinates. |hack| Plotting a connectome + Functions for automatic extraction of coords based on + brain parcellations useful for :func:`plot_connectome` + are demonstrated in + **Example:** :ref:`sphx_glr_auto_examples_03_connectivity_plot_atlas_comparison.py` + |plot_prob_atlas| :func:`plot_prob_atlas` |hack| Plotting 4D probabilistic atlas maps @@ -226,6 +231,23 @@ Different display modes ================= ========================================================= +Available Colormaps +=================== + +Nilearn plotting library ships with a set of extra colormaps, as seen in the +image below + +.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_colormaps_001.png + :target: ../auto_examples/01_plotting/plot_colormaps.html + :scale: 50 + +These colormaps can be used as any other matplotlib colormap. + +.. image:: ../auto_examples/01_plotting/images/sphx_glr_plot_colormaps_002.png + :target: ../auto_examples/01_plotting/plot_colormaps.html + :scale: 50 + + .. _display_modules: Adding overlays, edges, contours, contour fillings and markers @@ -255,7 +277,7 @@ plot, and has methods to add overlays, contours or edge maps:: :scale: 50 .. |plot_overlay| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_overlay_002.png - :target: ../auto_examples/_01_plotting/plot_overlay.html + :target: ../auto_examples/01_plotting/plot_overlay.html :scale: 50 ================= ========================================================= @@ -327,6 +349,8 @@ that can be used to save the plot to an image file:: # Don't forget to close the display >>> display.close() # doctest: +SKIP +.. _surface-plotting: + Surface plotting ================ @@ -335,9 +359,6 @@ on a brain surface. .. versionadded:: 0.3 -NOTE: These functions works for only with matplotlib higher than 1.3.1. - - .. |plot_surf_roi| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_surf_atlas_001.png :target: ../auto_examples/01_plotting/plot_surf_atlas.html :scale: 50 @@ -363,3 +384,123 @@ NOTE: These functions works for only with matplotlib higher than 1.3.1. :ref:`sphx_glr_auto_examples_01_plotting_plot_surf_stat_map.py` ===================== =================================================================== + + +.. _interactive-plotting: + +Interactive plots +================= + +Nilearn also has functions for making interactive plots that can be +seen in a web browser. + +.. versionadded:: 0.5 + + Interactive plotting is new in nilearn 0.5 + +For 3D surface plots of statistical maps or surface atlases, use +:func:`view_img_on_surf` and :func:`view_surf`. Both produce a 3D plot on the +cortical surface. The difference is that :func:`view_surf` takes as input a +surface map and a cortical mesh, whereas :func:`view_img_on_surf` takes as input +a volume statistical map, and projects it on the cortical surface before making +the plot. + +For 3D plots of a connectome, use :func:`view_connectome`. To see only markers, +use :func:`view_markers`. + + +.. _interactive-surface-plotting: + +3D Plots of statistical maps or atlases on the cortical surface +--------------------------------------------------------------- + +:func:`view_img_on_surf`: Surface plot using a 3D statistical map:: + + >>> from nilearn import plotting, datasets # doctest: +SKIP + >>> img = datasets.fetch_localizer_button_task()['tmaps'][0] # doctest: +SKIP + >>> view = plotting.view_img_on_surf(img, threshold='90%', surf_mesh='fsaverage') # doctest: +SKIP + +If you are running a notebook, displaying ``view`` will embed an interactive +plot (this is the case for all interactive plots produced by nilearn's "view" +functions): + +.. image:: ../images/plotly_surface_plot_notebook_screenshot.png + +If you are not using a notebook, you can open the plot in a browser like this:: + + >>> view.open_in_browser() # doctest: +SKIP + +This will open this 3D plot in your web browser: + +.. image:: ../images/plotly_surface_plot.png + + +Or you can save it to an html file:: + + >>> view.save_as_html("surface_plot.html") # doctest: +SKIP + + +:func:`view_surf`: Surface plot using a surface map and a cortical mesh:: + + >>> from nilearn import plotting, datasets # doctest: +SKIP + >>> destrieux = datasets.fetch_atlas_surf_destrieux() # doctest: +SKIP + >>> fsaverage = datasets.fetch_surf_fsaverage() # doctest: +SKIP + >>> view = plotting.view_surf(fsaverage['infl_left'], destrieux['map_left'], # doctest: +SKIP + ... cmap='gist_ncar', symmetric_cmap=False) # doctest: +SKIP + ... + >>> view.open_in_browser() # doctest: +SKIP + + +.. image:: ../images/plotly_surface_atlas_plot.png + +.. _interactive-connectome-plotting: + +3D Plots of connectomes +----------------------- + +:func:`view_connectome`: 3D plot of a connectome:: + + >>> view = plotting.view_connectome(correlation_matrix, coords, threshold='90%') # doctest: +SKIP + >>> view.open_in_browser() # doctest: +SKIP + + +.. image:: ../images/plotly_connectome_plot.png + + +.. _interactive-markers-plotting: + +3D Plots of markers +------------------- + +:func:`view_markers`: showing markers (e.g. seed locations) in 3D:: + + >>> from nilearn import plotting # doctest: +SKIP + >>> dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)] # doctest: +SKIP + >>> view = plotting.view_markers( # doctest: +SKIP + >>> dmn_coords, ['red', 'cyan', 'magenta', 'orange'], marker_size=10) # doctest: +SKIP + >>> view.open_in_browser() # doctest: +SKIP + + + +.. image:: ../images/plotly_markers_plot.png + + +.. _interactive-stat-map-plotting: + +Interactive visualization of statistical map slices +--------------------------------------------------- + +:func:`view_stat_map`: open stat map in a Papaya viewer (https://github.com/rii-mango/Papaya):: + + >>> from nilearn import plotting, datasets # doctest: +SKIP + >>> img = datasets.fetch_localizer_button_task()['tmaps'][0] # doctest: +SKIP + >>> view = plotting.view_stat_map(img, threshold=2, vmax=4) # doctest: +SKIP + +in a Jupyter notebook, you can view the image like this: + +.. image:: ../images/papaya_stat_map_plot_screenshot_notebook.png + +Or you can open a viewer in your web browser if you are not in the +notebook:: + + >>> view.open_in_browser() # doctest: +SKIP diff --git a/doc/themes/nilearn/layout.html b/doc/themes/nilearn/layout.html index 9d2ce2e1e0..d4c42eb9ff 100644 --- a/doc/themes/nilearn/layout.html +++ b/doc/themes/nilearn/layout.html @@ -105,10 +105,10 @@ for(i in sections){ if(sections[i] > pos){ break; - }; - if($('a.internal[href$="' + i + '"]').is(':visible')){ + } + if($('a.internal[href$="' + i + '"]').is(':visible')){ current_section = i; - }; + } } $('a.internal[href$="' + current_section + '"]').addClass('active'); }); diff --git a/doc/themes/nilearn/static/copybutton.js b/doc/themes/nilearn/static/copybutton.js index b56d9b2f00..925d44f743 100644 --- a/doc/themes/nilearn/static/copybutton.js +++ b/doc/themes/nilearn/static/copybutton.js @@ -5,7 +5,7 @@ $(document).ready(function() { var div = $('.highlight-python .highlight,' + '.highlight-python3 .highlight,' + '.highlight-pycon .highlight,' + - '.highlight-default .highlight') + '.highlight-default .highlight'); var pre = div.find('pre'); // get the styles from the current theme @@ -21,14 +21,14 @@ $(document).ready(function() { 'border-width': border_width, 'color': border_color, 'text-size': '75%', 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em', 'border-radius': '0 3px 0 0' - } + }; // create and add the button to all the code blocks that contain >>> div.each(function(index) { var jthis = $(this); if (jthis.find('.gp').length > 0) { var button = $('>>>'); - button.css(button_styles) + button.css(button_styles); button.attr('title', hide_text); button.data('hidden', 'false'); jthis.prepend(button); diff --git a/doc/themes/nilearn/static/jquery.js b/doc/themes/nilearn/static/jquery.js deleted file mode 100644 index 16ad06c5ac..0000000000 --- a/doc/themes/nilearn/static/jquery.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! jQuery v1.7.2 jquery.com | jquery.org/license */ -(function(a,b){function cy(a){return f.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function cu(a){if(!cj[a]){var b=c.body,d=f("<"+a+">").appendTo(b),e=d.css("display");d.remove();if(e==="none"||e===""){ck||(ck=c.createElement("iframe"),ck.frameBorder=ck.width=ck.height=0),b.appendChild(ck);if(!cl||!ck.createElement)cl=(ck.contentWindow||ck.contentDocument).document,cl.write((f.support.boxModel?"":"")+""),cl.close();d=cl.createElement(a),cl.body.appendChild(d),e=f.css(d,"display"),b.removeChild(ck)}cj[a]=e}return cj[a]}function ct(a,b){var c={};f.each(cp.concat.apply([],cp.slice(0,b)),function(){c[this]=a});return c}function cs(){cq=b}function cr(){setTimeout(cs,0);return cq=f.now()}function ci(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}function ch(){try{return new a.XMLHttpRequest}catch(b){}}function cb(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var d=a.dataTypes,e={},g,h,i=d.length,j,k=d[0],l,m,n,o,p;for(g=1;g0){if(c!=="border")for(;e=0===c})}function S(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function K(){return!0}function J(){return!1}function n(a,b,c){var d=b+"defer",e=b+"queue",g=b+"mark",h=f._data(a,d);h&&(c==="queue"||!f._data(a,e))&&(c==="mark"||!f._data(a,g))&&setTimeout(function(){!f._data(a,e)&&!f._data(a,g)&&(f.removeData(a,d,!0),h.fire())},0)}function m(a){for(var b in a){if(b==="data"&&f.isEmptyObject(a[b]))continue;if(b!=="toJSON")return!1}return!0}function l(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(k,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:f.isNumeric(d)?+d:j.test(d)?f.parseJSON(d):d}catch(g){}f.data(a,c,d)}else d=b}return d}function h(a){var b=g[a]={},c,d;a=a.split(/\s+/);for(c=0,d=a.length;c)[^>]*$|#([\w\-]*)$)/,j=/\S/,k=/^\s+/,l=/\s+$/,m=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,n=/^[\],:{}\s]*$/,o=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,p=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,q=/(?:^|:|,)(?:\s*\[)+/g,r=/(webkit)[ \/]([\w.]+)/,s=/(opera)(?:.*version)?[ \/]([\w.]+)/,t=/(msie) ([\w.]+)/,u=/(mozilla)(?:.*? rv:([\w.]+))?/,v=/-([a-z]|[0-9])/ig,w=/^-ms-/,x=function(a,b){return(b+"").toUpperCase()},y=d.userAgent,z,A,B,C=Object.prototype.toString,D=Object.prototype.hasOwnProperty,E=Array.prototype.push,F=Array.prototype.slice,G=String.prototype.trim,H=Array.prototype.indexOf,I={};e.fn=e.prototype={constructor:e,init:function(a,d,f){var g,h,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!d&&c.body){this.context=c,this[0]=c.body,this.selector=a,this.length=1;return this}if(typeof a=="string"){a.charAt(0)!=="<"||a.charAt(a.length-1)!==">"||a.length<3?g=i.exec(a):g=[null,a,null];if(g&&(g[1]||!d)){if(g[1]){d=d instanceof e?d[0]:d,k=d?d.ownerDocument||d:c,j=m.exec(a),j?e.isPlainObject(d)?(a=[c.createElement(j[1])],e.fn.attr.call(a,d,!0)):a=[k.createElement(j[1])]:(j=e.buildFragment([g[1]],[k]),a=(j.cacheable?e.clone(j.fragment):j.fragment).childNodes);return e.merge(this,a)}h=c.getElementById(g[2]);if(h&&h.parentNode){if(h.id!==g[2])return f.find(a);this.length=1,this[0]=h}this.context=c,this.selector=a;return this}return!d||d.jquery?(d||f).find(a):this.constructor(d).find(a)}if(e.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return e.makeArray(a,this)},selector:"",jquery:"1.7.2",length:0,size:function(){return this.length},toArray:function(){return F.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=this.constructor();e.isArray(a)?E.apply(d,a):e.merge(d,a),d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")");return d},each:function(a,b){return e.each(this,a,b)},ready:function(a){e.bindReady(),A.add(a);return this},eq:function(a){a=+a;return a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(F.apply(this,arguments),"slice",F.call(arguments).join(","))},map:function(a){return this.pushStack(e.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:E,sort:[].sort,splice:[].splice},e.fn.init.prototype=e.fn,e.extend=e.fn.extend=function(){var a,c,d,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i=="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!="object"&&!e.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j0)return;A.fireWith(c,[e]),e.fn.trigger&&e(c).trigger("ready").off("ready")}},bindReady:function(){if(!A){A=e.Callbacks("once memory");if(c.readyState==="complete")return setTimeout(e.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",B,!1),a.addEventListener("load",e.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",B),a.attachEvent("onload",e.ready);var b=!1;try{b=a.frameElement==null}catch(d){}c.documentElement.doScroll&&b&&J()}}},isFunction:function(a){return e.type(a)==="function"},isArray:Array.isArray||function(a){return e.type(a)==="array"},isWindow:function(a){return a!=null&&a==a.window},isNumeric:function(a){return!isNaN(parseFloat(a))&&isFinite(a)},type:function(a){return a==null?String(a):I[C.call(a)]||"object"},isPlainObject:function(a){if(!a||e.type(a)!=="object"||a.nodeType||e.isWindow(a))return!1;try{if(a.constructor&&!D.call(a,"constructor")&&!D.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||D.call(a,d)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw new Error(a)},parseJSON:function(b){if(typeof b!="string"||!b)return null;b=e.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(n.test(b.replace(o,"@").replace(p,"]").replace(q,"")))return(new Function("return "+b))();e.error("Invalid JSON: "+b)},parseXML:function(c){if(typeof c!="string"||!c)return null;var d,f;try{a.DOMParser?(f=new DOMParser,d=f.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(g){d=b}(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&e.error("Invalid XML: "+c);return d},noop:function(){},globalEval:function(b){b&&j.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(w,"ms-").replace(v,x)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,d){var f,g=0,h=a.length,i=h===b||e.isFunction(a);if(d){if(i){for(f in a)if(c.apply(a[f],d)===!1)break}else for(;g0&&a[0]&&a[j-1]||j===0||e.isArray(a));if(k)for(;i1?i.call(arguments,0):b,j.notifyWith(k,e)}}function l(a){return function(c){b[a]=arguments.length>1?i.call(arguments,0):c,--g||j.resolveWith(j,b)}}var b=i.call(arguments,0),c=0,d=b.length,e=Array(d),g=d,h=d,j=d<=1&&a&&f.isFunction(a.promise)?a:f.Deferred(),k=j.promise();if(d>1){for(;c
    a",d=p.getElementsByTagName("*"),e=p.getElementsByTagName("a")[0];if(!d||!d.length||!e)return{};g=c.createElement("select"),h=g.appendChild(c.createElement("option")),i=p.getElementsByTagName("input")[0],b={leadingWhitespace:p.firstChild.nodeType===3,tbody:!p.getElementsByTagName("tbody").length,htmlSerialize:!!p.getElementsByTagName("link").length,style:/top/.test(e.getAttribute("style")),hrefNormalized:e.getAttribute("href")==="/a",opacity:/^0.55/.test(e.style.opacity),cssFloat:!!e.style.cssFloat,checkOn:i.value==="on",optSelected:h.selected,getSetAttribute:p.className!=="t",enctype:!!c.createElement("form").enctype,html5Clone:c.createElement("nav").cloneNode(!0).outerHTML!=="<:nav>",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,pixelMargin:!0},f.boxModel=b.boxModel=c.compatMode==="CSS1Compat",i.checked=!0,b.noCloneChecked=i.cloneNode(!0).checked,g.disabled=!0,b.optDisabled=!h.disabled;try{delete p.test}catch(r){b.deleteExpando=!1}!p.addEventListener&&p.attachEvent&&p.fireEvent&&(p.attachEvent("onclick",function(){b.noCloneEvent=!1}),p.cloneNode(!0).fireEvent("onclick")),i=c.createElement("input"),i.value="t",i.setAttribute("type","radio"),b.radioValue=i.value==="t",i.setAttribute("checked","checked"),i.setAttribute("name","t"),p.appendChild(i),j=c.createDocumentFragment(),j.appendChild(p.lastChild),b.checkClone=j.cloneNode(!0).cloneNode(!0).lastChild.checked,b.appendChecked=i.checked,j.removeChild(i),j.appendChild(p);if(p.attachEvent)for(n in{submit:1,change:1,focusin:1})m="on"+n,o=m in p,o||(p.setAttribute(m,"return;"),o=typeof p[m]=="function"),b[n+"Bubbles"]=o;j.removeChild(p),j=g=h=p=i=null,f(function(){var d,e,g,h,i,j,l,m,n,q,r,s,t,u=c.getElementsByTagName("body")[0];!u||(m=1,t="padding:0;margin:0;border:",r="position:absolute;top:0;left:0;width:1px;height:1px;",s=t+"0;visibility:hidden;",n="style='"+r+t+"5px solid #000;",q="
    "+""+"
    ",d=c.createElement("div"),d.style.cssText=s+"width:0;height:0;position:static;top:0;margin-top:"+m+"px",u.insertBefore(d,u.firstChild),p=c.createElement("div"),d.appendChild(p),p.innerHTML="
    t
    ",k=p.getElementsByTagName("td"),o=k[0].offsetHeight===0,k[0].style.display="",k[1].style.display="none",b.reliableHiddenOffsets=o&&k[0].offsetHeight===0,a.getComputedStyle&&(p.innerHTML="",l=c.createElement("div"),l.style.width="0",l.style.marginRight="0",p.style.width="2px",p.appendChild(l),b.reliableMarginRight=(parseInt((a.getComputedStyle(l,null)||{marginRight:0}).marginRight,10)||0)===0),typeof p.style.zoom!="undefined"&&(p.innerHTML="",p.style.width=p.style.padding="1px",p.style.border=0,p.style.overflow="hidden",p.style.display="inline",p.style.zoom=1,b.inlineBlockNeedsLayout=p.offsetWidth===3,p.style.display="block",p.style.overflow="visible",p.innerHTML="
    ",b.shrinkWrapBlocks=p.offsetWidth!==3),p.style.cssText=r+s,p.innerHTML=q,e=p.firstChild,g=e.firstChild,i=e.nextSibling.firstChild.firstChild,j={doesNotAddBorder:g.offsetTop!==5,doesAddBorderForTableAndCells:i.offsetTop===5},g.style.position="fixed",g.style.top="20px",j.fixedPosition=g.offsetTop===20||g.offsetTop===15,g.style.position=g.style.top="",e.style.overflow="hidden",e.style.position="relative",j.subtractsBorderForOverflowNotVisible=g.offsetTop===-5,j.doesNotIncludeMarginInBodyOffset=u.offsetTop!==m,a.getComputedStyle&&(p.style.marginTop="1%",b.pixelMargin=(a.getComputedStyle(p,null)||{marginTop:0}).marginTop!=="1%"),typeof d.style.zoom!="undefined"&&(d.style.zoom=1),u.removeChild(d),l=p=d=null,f.extend(b,j))});return b}();var j=/^(?:\{.*\}|\[.*\])$/,k=/([A-Z])/g;f.extend({cache:{},uuid:0,expando:"jQuery"+(f.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?f.cache[a[f.expando]]:a[f.expando];return!!a&&!m(a)},data:function(a,c,d,e){if(!!f.acceptData(a)){var g,h,i,j=f.expando,k=typeof c=="string",l=a.nodeType,m=l?f.cache:a,n=l?a[j]:a[j]&&j,o=c==="events";if((!n||!m[n]||!o&&!e&&!m[n].data)&&k&&d===b)return;n||(l?a[j]=n=++f.uuid:n=j),m[n]||(m[n]={},l||(m[n].toJSON=f.noop));if(typeof c=="object"||typeof c=="function")e?m[n]=f.extend(m[n],c):m[n].data=f.extend(m[n].data,c);g=h=m[n],e||(h.data||(h.data={}),h=h.data),d!==b&&(h[f.camelCase(c)]=d);if(o&&!h[c])return g.events;k?(i=h[c],i==null&&(i=h[f.camelCase(c)])):i=h;return i}},removeData:function(a,b,c){if(!!f.acceptData(a)){var d,e,g,h=f.expando,i=a.nodeType,j=i?f.cache:a,k=i?a[h]:h;if(!j[k])return;if(b){d=c?j[k]:j[k].data;if(d){f.isArray(b)||(b in d?b=[b]:(b=f.camelCase(b),b in d?b=[b]:b=b.split(" ")));for(e=0,g=b.length;e1,null,!1)},removeData:function(a){return this.each(function(){f.removeData(this,a)})}}),f.extend({_mark:function(a,b){a&&(b=(b||"fx")+"mark",f._data(a,b,(f._data(a,b)||0)+1))},_unmark:function(a,b,c){a!==!0&&(c=b,b=a,a=!1);if(b){c=c||"fx";var d=c+"mark",e=a?0:(f._data(b,d)||1)-1;e?f._data(b,d,e):(f.removeData(b,d,!0),n(b,c,"mark"))}},queue:function(a,b,c){var d;if(a){b=(b||"fx")+"queue",d=f._data(a,b),c&&(!d||f.isArray(c)?d=f._data(a,b,f.makeArray(c)):d.push(c));return d||[]}},dequeue:function(a,b){b=b||"fx";var c=f.queue(a,b),d=c.shift(),e={};d==="inprogress"&&(d=c.shift()),d&&(b==="fx"&&c.unshift("inprogress"),f._data(a,b+".run",e),d.call(a,function(){f.dequeue(a,b)},e)),c.length||(f.removeData(a,b+"queue "+b+".run",!0),n(a,b,"queue"))}}),f.fn.extend({queue:function(a,c){var d=2;typeof a!="string"&&(c=a,a="fx",d--);if(arguments.length1)},removeAttr:function(a){return this.each(function(){f.removeAttr(this,a)})},prop:function(a,b){return f.access(this,f.prop,a,b,arguments.length>1)},removeProp:function(a){a=f.propFix[a]||a;return this.each(function(){try{this[a]=b,delete this[a]}catch(c){}})},addClass:function(a){var b,c,d,e,g,h,i;if(f.isFunction(a))return this.each(function(b){f(this).addClass(a.call(this,b,this.className))});if(a&&typeof a=="string"){b=a.split(p);for(c=0,d=this.length;c-1)return!0;return!1},val:function(a){var c,d,e,g=this[0];{if(!!arguments.length){e=f.isFunction(a);return this.each(function(d){var g=f(this),h;if(this.nodeType===1){e?h=a.call(this,d,g.val()):h=a,h==null?h="":typeof h=="number"?h+="":f.isArray(h)&&(h=f.map(h,function(a){return a==null?"":a+""})),c=f.valHooks[this.type]||f.valHooks[this.nodeName.toLowerCase()];if(!c||!("set"in c)||c.set(this,h,"value")===b)this.value=h}})}if(g){c=f.valHooks[g.type]||f.valHooks[g.nodeName.toLowerCase()];if(c&&"get"in c&&(d=c.get(g,"value"))!==b)return d;d=g.value;return typeof d=="string"?d.replace(q,""):d==null?"":d}}}}),f.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c,d,e,g=a.selectedIndex,h=[],i=a.options,j=a.type==="select-one";if(g<0)return null;c=j?g:0,d=j?g+1:i.length;for(;c=0}),c.length||(a.selectedIndex=-1);return c}}},attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attr:function(a,c,d,e){var g,h,i,j=a.nodeType;if(!!a&&j!==3&&j!==8&&j!==2){if(e&&c in f.attrFn)return f(a)[c](d);if(typeof a.getAttribute=="undefined")return f.prop(a,c,d);i=j!==1||!f.isXMLDoc(a),i&&(c=c.toLowerCase(),h=f.attrHooks[c]||(u.test(c)?x:w));if(d!==b){if(d===null){f.removeAttr(a,c);return}if(h&&"set"in h&&i&&(g=h.set(a,d,c))!==b)return g;a.setAttribute(c,""+d);return d}if(h&&"get"in h&&i&&(g=h.get(a,c))!==null)return g;g=a.getAttribute(c);return g===null?b:g}},removeAttr:function(a,b){var c,d,e,g,h,i=0;if(b&&a.nodeType===1){d=b.toLowerCase().split(p),g=d.length;for(;i=0}})});var z=/^(?:textarea|input|select)$/i,A=/^([^\.]*)?(?:\.(.+))?$/,B=/(?:^|\s)hover(\.\S+)?\b/,C=/^key/,D=/^(?:mouse|contextmenu)|click/,E=/^(?:focusinfocus|focusoutblur)$/,F=/^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/,G=function( -a){var b=F.exec(a);b&&(b[1]=(b[1]||"").toLowerCase(),b[3]=b[3]&&new RegExp("(?:^|\\s)"+b[3]+"(?:\\s|$)"));return b},H=function(a,b){var c=a.attributes||{};return(!b[1]||a.nodeName.toLowerCase()===b[1])&&(!b[2]||(c.id||{}).value===b[2])&&(!b[3]||b[3].test((c["class"]||{}).value))},I=function(a){return f.event.special.hover?a:a.replace(B,"mouseenter$1 mouseleave$1")};f.event={add:function(a,c,d,e,g){var h,i,j,k,l,m,n,o,p,q,r,s;if(!(a.nodeType===3||a.nodeType===8||!c||!d||!(h=f._data(a)))){d.handler&&(p=d,d=p.handler,g=p.selector),d.guid||(d.guid=f.guid++),j=h.events,j||(h.events=j={}),i=h.handle,i||(h.handle=i=function(a){return typeof f!="undefined"&&(!a||f.event.triggered!==a.type)?f.event.dispatch.apply(i.elem,arguments):b},i.elem=a),c=f.trim(I(c)).split(" ");for(k=0;k=0&&(h=h.slice(0,-1),k=!0),h.indexOf(".")>=0&&(i=h.split("."),h=i.shift(),i.sort());if((!e||f.event.customEvent[h])&&!f.event.global[h])return;c=typeof c=="object"?c[f.expando]?c:new f.Event(h,c):new f.Event(h),c.type=h,c.isTrigger=!0,c.exclusive=k,c.namespace=i.join("."),c.namespace_re=c.namespace?new RegExp("(^|\\.)"+i.join("\\.(?:.*\\.)?")+"(\\.|$)"):null,o=h.indexOf(":")<0?"on"+h:"";if(!e){j=f.cache;for(l in j)j[l].events&&j[l].events[h]&&f.event.trigger(c,d,j[l].handle.elem,!0);return}c.result=b,c.target||(c.target=e),d=d!=null?f.makeArray(d):[],d.unshift(c),p=f.event.special[h]||{};if(p.trigger&&p.trigger.apply(e,d)===!1)return;r=[[e,p.bindType||h]];if(!g&&!p.noBubble&&!f.isWindow(e)){s=p.delegateType||h,m=E.test(s+h)?e:e.parentNode,n=null;for(;m;m=m.parentNode)r.push([m,s]),n=m;n&&n===e.ownerDocument&&r.push([n.defaultView||n.parentWindow||a,s])}for(l=0;le&&j.push({elem:this,matches:d.slice(e)});for(k=0;k0?this.on(b,null,a,c):this.trigger(b)},f.attrFn&&(f.attrFn[b]=!0),C.test(b)&&(f.event.fixHooks[b]=f.event.keyHooks),D.test(b)&&(f.event.fixHooks[b]=f.event.mouseHooks)}),function(){function x(a,b,c,e,f,g){for(var h=0,i=e.length;h0){k=j;break}}j=j[a]}e[h]=k}}}function w(a,b,c,e,f,g){for(var h=0,i=e.length;h+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,d="sizcache"+(Math.random()+"").replace(".",""),e=0,g=Object.prototype.toString,h=!1,i=!0,j=/\\/g,k=/\r\n/g,l=/\W/;[0,0].sort(function(){i=!1;return 0});var m=function(b,d,e,f){e=e||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!="string")return e;var i,j,k,l,n,q,r,t,u=!0,v=m.isXML(d),w=[],x=b;do{a.exec(""),i=a.exec(x);if(i){x=i[3],w.push(i[1]);if(i[2]){l=i[3];break}}}while(i);if(w.length>1&&p.exec(b))if(w.length===2&&o.relative[w[0]])j=y(w[0]+w[1],d,f);else{j=o.relative[w[0]]?[d]:m(w.shift(),d);while(w.length)b=w.shift(),o.relative[b]&&(b+=w.shift()),j=y(b,j,f)}else{!f&&w.length>1&&d.nodeType===9&&!v&&o.match.ID.test(w[0])&&!o.match.ID.test(w[w.length-1])&&(n=m.find(w.shift(),d,v),d=n.expr?m.filter(n.expr,n.set)[0]:n.set[0]);if(d){n=f?{expr:w.pop(),set:s(f)}:m.find(w.pop(),w.length===1&&(w[0]==="~"||w[0]==="+")&&d.parentNode?d.parentNode:d,v),j=n.expr?m.filter(n.expr,n.set):n.set,w.length>0?k=s(j):u=!1;while(w.length)q=w.pop(),r=q,o.relative[q]?r=w.pop():q="",r==null&&(r=d),o.relative[q](k,r,v)}else k=w=[]}k||(k=j),k||m.error(q||b);if(g.call(k)==="[object Array]")if(!u)e.push.apply(e,k);else if(d&&d.nodeType===1)for(t=0;k[t]!=null;t++)k[t]&&(k[t]===!0||k[t].nodeType===1&&m.contains(d,k[t]))&&e.push(j[t]);else for(t=0;k[t]!=null;t++)k[t]&&k[t].nodeType===1&&e.push(j[t]);else s(k,e);l&&(m(l,h,e,f),m.uniqueSort(e));return e};m.uniqueSort=function(a){if(u){h=i,a.sort(u);if(h)for(var b=1;b0},m.find=function(a,b,c){var d,e,f,g,h,i;if(!a)return[];for(e=0,f=o.order.length;e":function(a,b){var c,d=typeof b=="string",e=0,f=a.length;if(d&&!l.test(b)){b=b.toLowerCase();for(;e=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(j,"")},TAG:function(a,b){return a[1].replace(j,"").toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||m.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&m.error(a[0]);a[0]=e++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(j,"");!f&&o.attrMap[g]&&(a[1]=o.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(j,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=m(b[3],null,null,c);else{var g=m.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(o.match.POS.test(b[0])||o.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode&&a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!m(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){var b=a.getAttribute("type"),c=a.type;return a.nodeName.toLowerCase()==="input"&&"text"===c&&(b===c||b===null)},radio:function(a){return a.nodeName.toLowerCase()==="input"&&"radio"===a.type},checkbox:function(a){return a.nodeName.toLowerCase()==="input"&&"checkbox"===a.type},file:function(a){return a.nodeName.toLowerCase()==="input"&&"file"===a.type},password:function(a){return a.nodeName.toLowerCase()==="input"&&"password"===a.type},submit:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"submit"===a.type},image:function(a){return a.nodeName.toLowerCase()==="input"&&"image"===a.type},reset:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"reset"===a.type},button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&"button"===a.type||b==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)},focus:function(a){return a===a.ownerDocument.activeElement}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return bc[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=o.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||n([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,i=g.length;h=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||!!a.nodeName&&a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=m.attr?m.attr(a,c):o.attrHandle[c]?o.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":!f&&m.attr?d!=null:f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=o.setFilters[e];if(f)return f(a,c,b,d)}}},p=o.match.POS,q=function(a,b){return"\\"+(b-0+1)};for(var r in o.match)o.match[r]=new RegExp(o.match[r].source+/(?![^\[]*\])(?![^\(]*\))/.source),o.leftMatch[r]=new RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[r].source.replace(/\\(\d+)/g,q));o.match.globalPOS=p;var s=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(t){s=function(a,b){var c=0,d=b||[];if(g.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length=="number")for(var e=a.length;c",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(o.find.ID=function(a,c,d){if(typeof c.getElementById!="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},o.filter.ID=function(a,b){var c=typeof a.getAttributeNode!="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(o.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="",a.firstChild&&typeof a.firstChild.getAttribute!="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(o.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=m,b=c.createElement("div"),d="__sizzle__";b.innerHTML="

    ";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){m=function(b,e,f,g){e=e||c;if(!g&&!m.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return s(e.getElementsByTagName(b),f);if(h[2]&&o.find.CLASS&&e.getElementsByClassName)return s(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return s([e.body],f);if(h&&h[3]){var i=e.getElementById(h[3]);if(!i||!i.parentNode)return s([],f);if(i.id===h[3])return s([i],f)}try{return s(e.querySelectorAll(b),f)}catch(j){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var k=e,l=e.getAttribute("id"),n=l||d,p=e.parentNode,q=/^\s*[+~]/.test(b);l?n=n.replace(/'/g,"\\$&"):e.setAttribute("id",n),q&&p&&(e=e.parentNode);try{if(!q||p)return s(e.querySelectorAll("[id='"+n+"'] "+b),f)}catch(r){}finally{l||k.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)m[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector;if(b){var d=!b.call(c.createElement("div"),"div"),e=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(f){e=!0}m.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!m.isXML(a))try{if(e||!o.match.PSEUDO.test(c)&&!/!=/.test(c)){var f=b.call(a,c);if(f||!d||a.document&&a.document.nodeType!==11)return f}}catch(g){}return m(c,null,null,[a]).length>0}}}(),function(){var a=c.createElement("div");a.innerHTML="
    ";if(!!a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;o.order.splice(1,0,"CLASS"),o.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?m.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?m.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:m.contains=function(){return!1},m.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var y=function(a,b,c){var d,e=[],f="",g=b.nodeType?[b]:b;while(d=o.match.PSEUDO.exec(a))f+=d[0],a=a.replace(o.match.PSEUDO,"");a=o.relative[a]?a+"*":a;for(var h=0,i=g.length;h0)for(h=g;h=0:f.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c=[],d,e,g=this[0];if(f.isArray(a)){var h=1;while(g&&g.ownerDocument&&g!==b){for(d=0;d-1:f.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b||g.nodeType===11)break}}c=c.length>1?f.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a)return this[0]&&this[0].parentNode?this.prevAll().length:-1;if(typeof a=="string")return f.inArray(this[0],f(a));return f.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a=="string"?f(a,b):f.makeArray(a&&a.nodeType?[a]:a),d=f.merge(this.get(),c);return this.pushStack(S(c[0])||S(d[0])?d:f.unique(d))},andSelf:function(){return this.add(this.prevObject)}}),f.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return f.dir(a,"parentNode")},parentsUntil:function(a,b,c){return f.dir(a,"parentNode",c)},next:function(a){return f.nth(a,2,"nextSibling")},prev:function(a){return f.nth(a,2,"previousSibling")},nextAll:function(a){return f.dir(a,"nextSibling")},prevAll:function(a){return f.dir(a,"previousSibling")},nextUntil:function(a,b,c){return f.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return f.dir(a,"previousSibling",c)},siblings:function(a){return f.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return f.sibling(a.firstChild)},contents:function(a){return f.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:f.makeArray(a.childNodes)}},function(a,b){f.fn[a]=function(c,d){var e=f.map(this,b,c);L.test(a)||(d=c),d&&typeof d=="string"&&(e=f.filter(d,e)),e=this.length>1&&!R[a]?f.unique(e):e,(this.length>1||N.test(d))&&M.test(a)&&(e=e.reverse());return this.pushStack(e,a,P.call(arguments).join(","))}}),f.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?f.find.matchesSelector(b[0],a)?[b[0]]:[]:f.find.matches(a,b)},dir:function(a,c,d){var e=[],g=a[c];while(g&&g.nodeType!==9&&(d===b||g.nodeType!==1||!f(g).is(d)))g.nodeType===1&&e.push(g),g=g[c];return e},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var V="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",W=/ jQuery\d+="(?:\d+|null)"/g,X=/^\s+/,Y=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Z=/<([\w:]+)/,$=/]","i"),bd=/checked\s*(?:[^=]|=\s*.checked.)/i,be=/\/(java|ecma)script/i,bf=/^\s*",""],legend:[1,"
    ","
    "],thead:[1,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],col:[2,"","
    "],area:[1,"",""],_default:[0,"",""]},bh=U(c);bg.optgroup=bg.option,bg.tbody=bg.tfoot=bg.colgroup=bg.caption=bg.thead,bg.th=bg.td,f.support.htmlSerialize||(bg._default=[1,"div
    ","
    "]),f.fn.extend({text:function(a){return f.access(this,function(a){return a===b?f.text(this):this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a))},null,a,arguments.length)},wrapAll:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapAll(a.call(this,b))});if(this[0]){var b=f(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapInner(a.call(this,b))});return this.each(function(){var b=f(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=f.isFunction(a);return this.each(function(c){f(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){f.nodeName(this,"body")||f(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=f -.clean(arguments);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,f.clean(arguments));return a}},remove:function(a,b){for(var c=0,d;(d=this[c])!=null;c++)if(!a||f.filter(a,[d]).length)!b&&d.nodeType===1&&(f.cleanData(d.getElementsByTagName("*")),f.cleanData([d])),d.parentNode&&d.parentNode.removeChild(d);return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&f.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!1:a,b=b==null?a:b;return this.map(function(){return f.clone(this,a,b)})},html:function(a){return f.access(this,function(a){var c=this[0]||{},d=0,e=this.length;if(a===b)return c.nodeType===1?c.innerHTML.replace(W,""):null;if(typeof a=="string"&&!ba.test(a)&&(f.support.leadingWhitespace||!X.test(a))&&!bg[(Z.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Y,"<$1>");try{for(;d1&&l0?this.clone(!0):this).get();f(e[h])[b](j),d=d.concat(j)}return this.pushStack(d,a,e.selector)}}),f.extend({clone:function(a,b,c){var d,e,g,h=f.support.html5Clone||f.isXMLDoc(a)||!bc.test("<"+a.nodeName+">")?a.cloneNode(!0):bo(a);if((!f.support.noCloneEvent||!f.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!f.isXMLDoc(a)){bk(a,h),d=bl(a),e=bl(h);for(g=0;d[g];++g)e[g]&&bk(d[g],e[g])}if(b){bj(a,h);if(c){d=bl(a),e=bl(h);for(g=0;d[g];++g)bj(d[g],e[g])}}d=e=null;return h},clean:function(a,b,d,e){var g,h,i,j=[];b=b||c,typeof b.createElement=="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);for(var k=0,l;(l=a[k])!=null;k++){typeof l=="number"&&(l+="");if(!l)continue;if(typeof l=="string")if(!_.test(l))l=b.createTextNode(l);else{l=l.replace(Y,"<$1>");var m=(Z.exec(l)||["",""])[1].toLowerCase(),n=bg[m]||bg._default,o=n[0],p=b.createElement("div"),q=bh.childNodes,r;b===c?bh.appendChild(p):U(b).appendChild(p),p.innerHTML=n[1]+l+n[2];while(o--)p=p.lastChild;if(!f.support.tbody){var s=$.test(l),t=m==="table"&&!s?p.firstChild&&p.firstChild.childNodes:n[1]===""&&!s?p.childNodes:[];for(i=t.length-1;i>=0;--i)f.nodeName(t[i],"tbody")&&!t[i].childNodes.length&&t[i].parentNode.removeChild(t[i])}!f.support.leadingWhitespace&&X.test(l)&&p.insertBefore(b.createTextNode(X.exec(l)[0]),p.firstChild),l=p.childNodes,p&&(p.parentNode.removeChild(p),q.length>0&&(r=q[q.length-1],r&&r.parentNode&&r.parentNode.removeChild(r)))}var u;if(!f.support.appendChecked)if(l[0]&&typeof (u=l.length)=="number")for(i=0;i1)},f.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=by(a,"opacity");return c===""?"1":c}return a.style.opacity}}},cssNumber:{fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":f.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,d,e){if(!!a&&a.nodeType!==3&&a.nodeType!==8&&!!a.style){var g,h,i=f.camelCase(c),j=a.style,k=f.cssHooks[i];c=f.cssProps[i]||i;if(d===b){if(k&&"get"in k&&(g=k.get(a,!1,e))!==b)return g;return j[c]}h=typeof d,h==="string"&&(g=bu.exec(d))&&(d=+(g[1]+1)*+g[2]+parseFloat(f.css(a,c)),h="number");if(d==null||h==="number"&&isNaN(d))return;h==="number"&&!f.cssNumber[i]&&(d+="px");if(!k||!("set"in k)||(d=k.set(a,d))!==b)try{j[c]=d}catch(l){}}},css:function(a,c,d){var e,g;c=f.camelCase(c),g=f.cssHooks[c],c=f.cssProps[c]||c,c==="cssFloat"&&(c="float");if(g&&"get"in g&&(e=g.get(a,!0,d))!==b)return e;if(by)return by(a,c)},swap:function(a,b,c){var d={},e,f;for(f in b)d[f]=a.style[f],a.style[f]=b[f];e=c.call(a);for(f in b)a.style[f]=d[f];return e}}),f.curCSS=f.css,c.defaultView&&c.defaultView.getComputedStyle&&(bz=function(a,b){var c,d,e,g,h=a.style;b=b.replace(br,"-$1").toLowerCase(),(d=a.ownerDocument.defaultView)&&(e=d.getComputedStyle(a,null))&&(c=e.getPropertyValue(b),c===""&&!f.contains(a.ownerDocument.documentElement,a)&&(c=f.style(a,b))),!f.support.pixelMargin&&e&&bv.test(b)&&bt.test(c)&&(g=h.width,h.width=c,c=e.width,h.width=g);return c}),c.documentElement.currentStyle&&(bA=function(a,b){var c,d,e,f=a.currentStyle&&a.currentStyle[b],g=a.style;f==null&&g&&(e=g[b])&&(f=e),bt.test(f)&&(c=g.left,d=a.runtimeStyle&&a.runtimeStyle.left,d&&(a.runtimeStyle.left=a.currentStyle.left),g.left=b==="fontSize"?"1em":f,f=g.pixelLeft+"px",g.left=c,d&&(a.runtimeStyle.left=d));return f===""?"auto":f}),by=bz||bA,f.each(["height","width"],function(a,b){f.cssHooks[b]={get:function(a,c,d){if(c)return a.offsetWidth!==0?bB(a,b,d):f.swap(a,bw,function(){return bB(a,b,d)})},set:function(a,b){return bs.test(b)?b+"px":b}}}),f.support.opacity||(f.cssHooks.opacity={get:function(a,b){return bq.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=f.isNumeric(b)?"alpha(opacity="+b*100+")":"",g=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&f.trim(g.replace(bp,""))===""){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bp.test(g)?g.replace(bp,e):g+" "+e}}),f(function(){f.support.reliableMarginRight||(f.cssHooks.marginRight={get:function(a,b){return f.swap(a,{display:"inline-block"},function(){return b?by(a,"margin-right"):a.style.marginRight})}})}),f.expr&&f.expr.filters&&(f.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!f.support.reliableHiddenOffsets&&(a.style&&a.style.display||f.css(a,"display"))==="none"},f.expr.filters.visible=function(a){return!f.expr.filters.hidden(a)}),f.each({margin:"",padding:"",border:"Width"},function(a,b){f.cssHooks[a+b]={expand:function(c){var d,e=typeof c=="string"?c.split(" "):[c],f={};for(d=0;d<4;d++)f[a+bx[d]+b]=e[d]||e[d-2]||e[0];return f}}});var bC=/%20/g,bD=/\[\]$/,bE=/\r?\n/g,bF=/#.*$/,bG=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,bH=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bI=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,bJ=/^(?:GET|HEAD)$/,bK=/^\/\//,bL=/\?/,bM=/)<[^<]*)*<\/script>/gi,bN=/^(?:select|textarea)/i,bO=/\s+/,bP=/([?&])_=[^&]*/,bQ=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,bR=f.fn.load,bS={},bT={},bU,bV,bW=["*/"]+["*"];try{bU=e.href}catch(bX){bU=c.createElement("a"),bU.href="",bU=bU.href}bV=bQ.exec(bU.toLowerCase())||[],f.fn.extend({load:function(a,c,d){if(typeof a!="string"&&bR)return bR.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var g=a.slice(e,a.length);a=a.slice(0,e)}var h="GET";c&&(f.isFunction(c)?(d=c,c=b):typeof c=="object"&&(c=f.param(c,f.ajaxSettings.traditional),h="POST"));var i=this;f.ajax({url:a,type:h,dataType:"html",data:c,complete:function(a,b,c){c=a.responseText,a.isResolved()&&(a.done(function(a){c=a}),i.html(g?f("
    ").append(c.replace(bM,"")).find(g):c)),d&&i.each(d,[c,b,a])}});return this},serialize:function(){return f.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?f.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||bN.test(this.nodeName)||bH.test(this.type))}).map(function(a,b){var c=f(this).val();return c==null?null:f.isArray(c)?f.map(c,function(a,c){return{name:b.name,value:a.replace(bE,"\r\n")}}):{name:b.name,value:c.replace(bE,"\r\n")}}).get()}}),f.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){f.fn[b]=function(a){return this.on(b,a)}}),f.each(["get","post"],function(a,c){f[c]=function(a,d,e,g){f.isFunction(d)&&(g=g||e,e=d,d=b);return f.ajax({type:c,url:a,data:d,success:e,dataType:g})}}),f.extend({getScript:function(a,c){return f.get(a,b,c,"script")},getJSON:function(a,b,c){return f.get(a,b,c,"json")},ajaxSetup:function(a,b){b?b$(a,f.ajaxSettings):(b=a,a=f.ajaxSettings),b$(a,b);return a},ajaxSettings:{url:bU,isLocal:bI.test(bV[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded; charset=UTF-8",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":bW},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":f.parseJSON,"text xml":f.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:bY(bS),ajaxTransport:bY(bT),ajax:function(a,c){function w(a,c,l,m){if(s!==2){s=2,q&&clearTimeout(q),p=b,n=m||"",v.readyState=a>0?4:0;var o,r,u,w=c,x=l?ca(d,v,l):b,y,z;if(a>=200&&a<300||a===304){if(d.ifModified){if(y=v.getResponseHeader("Last-Modified"))f.lastModified[k]=y;if(z=v.getResponseHeader("Etag"))f.etag[k]=z}if(a===304)w="notmodified",o=!0;else try{r=cb(d,x),w="success",o=!0}catch(A){w="parsererror",u=A}}else{u=w;if(!w||a)w="error",a<0&&(a=0)}v.status=a,v.statusText=""+(c||w),o?h.resolveWith(e,[r,w,v]):h.rejectWith(e,[v,w,u]),v.statusCode(j),j=b,t&&g.trigger("ajax"+(o?"Success":"Error"),[v,d,o?r:u]),i.fireWith(e,[v,w]),t&&(g.trigger("ajaxComplete",[v,d]),--f.active||f.event.trigger("ajaxStop"))}}typeof a=="object"&&(c=a,a=b),c=c||{};var d=f.ajaxSetup({},c),e=d.context||d,g=e!==d&&(e.nodeType||e instanceof f)?f(e):f.event,h=f.Deferred(),i=f.Callbacks("once memory"),j=d.statusCode||{},k,l={},m={},n,o,p,q,r,s=0,t,u,v={readyState:0,setRequestHeader:function(a,b){if(!s){var c=a.toLowerCase();a=m[c]=m[c]||a,l[a]=b}return this},getAllResponseHeaders:function(){return s===2?n:null},getResponseHeader:function(a){var c;if(s===2){if(!o){o={};while(c=bG.exec(n))o[c[1].toLowerCase()]=c[2]}c=o[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){s||(d.mimeType=a);return this},abort:function(a){a=a||"abort",p&&p.abort(a),w(0,a);return this}};h.promise(v),v.success=v.done,v.error=v.fail,v.complete=i.add,v.statusCode=function(a){if(a){var b;if(s<2)for(b in a)j[b]=[j[b],a[b]];else b=a[v.status],v.then(b,b)}return this},d.url=((a||d.url)+"").replace(bF,"").replace(bK,bV[1]+"//"),d.dataTypes=f.trim(d.dataType||"*").toLowerCase().split(bO),d.crossDomain==null&&(r=bQ.exec(d.url.toLowerCase()),d.crossDomain=!(!r||r[1]==bV[1]&&r[2]==bV[2]&&(r[3]||(r[1]==="http:"?80:443))==(bV[3]||(bV[1]==="http:"?80:443)))),d.data&&d.processData&&typeof d.data!="string"&&(d.data=f.param(d.data,d.traditional)),bZ(bS,d,c,v);if(s===2)return!1;t=d.global,d.type=d.type.toUpperCase(),d.hasContent=!bJ.test(d.type),t&&f.active++===0&&f.event.trigger("ajaxStart");if(!d.hasContent){d.data&&(d.url+=(bL.test(d.url)?"&":"?")+d.data,delete d.data),k=d.url;if(d.cache===!1){var x=f.now(),y=d.url.replace(bP,"$1_="+x);d.url=y+(y===d.url?(bL.test(d.url)?"&":"?")+"_="+x:"")}}(d.data&&d.hasContent&&d.contentType!==!1||c.contentType)&&v.setRequestHeader("Content-Type",d.contentType),d.ifModified&&(k=k||d.url,f.lastModified[k]&&v.setRequestHeader("If-Modified-Since",f.lastModified[k]),f.etag[k]&&v.setRequestHeader("If-None-Match",f.etag[k])),v.setRequestHeader("Accept",d.dataTypes[0]&&d.accepts[d.dataTypes[0]]?d.accepts[d.dataTypes[0]]+(d.dataTypes[0]!=="*"?", "+bW+"; q=0.01":""):d.accepts["*"]);for(u in d.headers)v.setRequestHeader(u,d.headers[u]);if(d.beforeSend&&(d.beforeSend.call(e,v,d)===!1||s===2)){v.abort();return!1}for(u in{success:1,error:1,complete:1})v[u](d[u]);p=bZ(bT,d,c,v);if(!p)w(-1,"No Transport");else{v.readyState=1,t&&g.trigger("ajaxSend",[v,d]),d.async&&d.timeout>0&&(q=setTimeout(function(){v.abort("timeout")},d.timeout));try{s=1,p.send(l,w)}catch(z){if(s<2)w(-1,z);else throw z}}return v},param:function(a,c){var d=[],e=function(a,b){b=f.isFunction(b)?b():b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=f.ajaxSettings.traditional);if(f.isArray(a)||a.jquery&&!f.isPlainObject(a))f.each(a,function(){e(this.name,this.value)});else for(var g in a)b_(g,a[g],c,e);return d.join("&").replace(bC,"+")}}),f.extend({active:0,lastModified:{},etag:{}});var cc=f.now(),cd=/(\=)\?(&|$)|\?\?/i;f.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return f.expando+"_"+cc++}}),f.ajaxPrefilter("json jsonp",function(b,c,d){var e=typeof b.data=="string"&&/^application\/x\-www\-form\-urlencoded/.test(b.contentType);if(b.dataTypes[0]==="jsonp"||b.jsonp!==!1&&(cd.test(b.url)||e&&cd.test(b.data))){var g,h=b.jsonpCallback=f.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,i=a[h],j=b.url,k=b.data,l="$1"+h+"$2";b.jsonp!==!1&&(j=j.replace(cd,l),b.url===j&&(e&&(k=k.replace(cd,l)),b.data===k&&(j+=(/\?/.test(j)?"&":"?")+b.jsonp+"="+h))),b.url=j,b.data=k,a[h]=function(a){g=[a]},d.always(function(){a[h]=i,g&&f.isFunction(i)&&a[h](g[0])}),b.converters["script json"]=function(){g||f.error(h+" was not called");return g[0]},b.dataTypes[0]="json";return"script"}}),f.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){f.globalEval(a);return a}}}),f.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),f.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.head||c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(c||!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var ce=a.ActiveXObject?function(){for(var a in cg)cg[a](0,1)}:!1,cf=0,cg;f.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&ch()||ci()}:ch,function(a){f.extend(f.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(f.ajaxSettings.xhr()),f.support.ajax&&f.ajaxTransport(function(c){if(!c.crossDomain||f.support.cors){var d;return{send:function(e,g){var h=c.xhr(),i,j;c.username?h.open(c.type,c.url,c.async,c.username,c.password):h.open(c.type,c.url,c.async);if(c.xhrFields)for(j in c.xhrFields)h[j]=c.xhrFields[j];c.mimeType&&h.overrideMimeType&&h.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(j in e)h.setRequestHeader(j,e[j])}catch(k){}h.send(c.hasContent&&c.data||null),d=function(a,e){var j,k,l,m,n;try{if(d&&(e||h.readyState===4)){d=b,i&&(h.onreadystatechange=f.noop,ce&&delete cg[i]);if(e)h.readyState!==4&&h.abort();else{j=h.status,l=h.getAllResponseHeaders(),m={},n=h.responseXML,n&&n.documentElement&&(m.xml=n);try{m.text=h.responseText}catch(a){}try{k=h.statusText}catch(o){k=""}!j&&c.isLocal&&!c.crossDomain?j=m.text?200:404:j===1223&&(j=204)}}}catch(p){e||g(-1,p)}m&&g(j,k,m,l)},!c.async||h.readyState===4?d():(i=++cf,ce&&(cg||(cg={},f(a).unload(ce)),cg[i]=d),h.onreadystatechange=d)},abort:function(){d&&d(0,1)}}}});var cj={},ck,cl,cm=/^(?:toggle|show|hide)$/,cn=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,co,cp=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]],cq;f.fn.extend({show:function(a,b,c){var d,e;if(a||a===0)return this.animate(ct("show",3),a,b,c);for(var g=0,h=this.length;g=i.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),i.animatedProperties[this.prop]=!0;for(b in i.animatedProperties)i.animatedProperties[b]!==!0&&(g=!1);if(g){i.overflow!=null&&!f.support.shrinkWrapBlocks&&f.each(["","X","Y"],function(a,b){h.style["overflow"+b]=i.overflow[a]}),i.hide&&f(h).hide();if(i.hide||i.show)for(b in i.animatedProperties)f.style(h,b,i.orig[b]),f.removeData(h,"fxshow"+b,!0),f.removeData(h,"toggle"+b,!0);d=i.complete,d&&(i.complete=!1,d.call(h))}return!1}i.duration==Infinity?this.now=e:(c=e-this.startTime,this.state=c/i.duration,this.pos=f.easing[i.animatedProperties[this.prop]](this.state,c,0,1,i.duration),this.now=this.start+(this.end-this.start)*this.pos),this.update();return!0}},f.extend(f.fx,{tick:function(){var a,b=f.timers,c=0;for(;c-1,k={},l={},m,n;j?(l=e.position(),m=l.top,n=l.left):(m=parseFloat(h)||0,n=parseFloat(i)||0),f.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):e.css(k)}},f.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),d=cx.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(f.css(a,"marginTop"))||0,c.left-=parseFloat(f.css(a,"marginLeft"))||0,d.top+=parseFloat(f.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(f.css(b[0],"borderLeftWidth"))||0;return{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&!cx.test(a.nodeName)&&f.css(a,"position")==="static")a=a.offsetParent;return a})}}),f.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,c){var d=/Y/.test(c);f.fn[a]=function(e){return f.access(this,function(a,e,g){var h=cy(a);if(g===b)return h?c in h?h[c]:f.support.boxModel&&h.document.documentElement[e]||h.document.body[e]:a[e];h?h.scrollTo(d?f(h).scrollLeft():g,d?g:f(h).scrollTop()):a[e]=g},a,e,arguments.length,null)}}),f.each({Height:"height",Width:"width"},function(a,c){var d="client"+a,e="scroll"+a,g="offset"+a;f.fn["inner"+a]=function(){var a=this[0];return a?a.style?parseFloat(f.css(a,c,"padding")):this[c]():null},f.fn["outer"+a]=function(a){var b=this[0];return b?b.style?parseFloat(f.css(b,c,a?"margin":"border")):this[c]():null},f.fn[c]=function(a){return f.access(this,function(a,c,h){var i,j,k,l;if(f.isWindow(a)){i=a.document,j=i.documentElement[d];return f.support.boxModel&&j||i.body&&i.body[d]||j}if(a.nodeType===9){i=a.documentElement;if(i[d]>=i[e])return i[d];return Math.max(a.body[e],i[e],a.body[g],i[g])}if(h===b){k=f.css(a,c),l=parseFloat(k);return f.isNumeric(l)?l:k}f(a).css(c,h)},c,a,arguments.length,null)}}),a.jQuery=a.$=f,typeof define=="function"&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return f})})(window); \ No newline at end of file diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 3a7d0fc1a5..6bc6432565 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,3 +1,75 @@ +0.5.0 alpha +=========== + +This is an alpha release: to download it, you need to explicitly ask for +the version number:: + + pip install nilearn==0.5.0a0 + +Highlights +---------- + + - **Minimum supported versions of packages have been bumped up.** + - scikit-learn -- v0.18 + - scipy -- v0.17 + - pandas -- v0.18 + - numpy -- v1.11 + - matplotlib -- v1.5.1 + + - New :ref:`interactive plotting functions `, + eg for use in a notebook. + +Enhancements +------------ + + - All NiftiMaskers now have a `dtype` argument. For now the default behaviour + is to keep the same data type as the input data. + + - New functions :func:`nilearn.plotting.view_surf` and + :func:`nilearn.plotting.view_surf` and + :func:`nilearn.plotting.view_img_on_surf` for interactive visualization of + maps on the cortical surface in a web browser. + + - New functions :func:`nilearn.plotting.view_connectome` and + :func:`nilearn.plotting.view_markers` to visualize connectomes and + seed locations in 3D + + - New function :func:`nilearn.plotting.view_stat_map` for interactive + visualization of volumes with 3 orthogonal cuts. + + - Add :func:`nilearn.datasets.fetch_surf_fsaverage` to download either + fsaverage or fsaverage 5 (Freesurfer cortical meshes). + + - Added :func:`nilearn.datasets.fetch_atlas_pauli_2017` to download a + recent subcortical neuroimaging atlas. + + - Added :func:`nilearn.plotting.find_parcellation_cut_coords` for + extraction of coordinates on brain parcellations denoted as labels. + + - Added :func:`nilearn.plotting.find_probabilistic_atlas_cut_coords` for + extraction of coordinates on brain probabilistic maps. + + - Added :func:`nilearn.datasets.fetch_neurovault_auditory_computation_task` + and :func:`nilearn.datasets.fetch_neurovault_motor_task` for simple example data. + + +Changes +------- + + - `nilearn.datasets.fetch_surf_fsaverage5` is deprecated and will be + removed in a future release. Use :func:`nilearn.datasets.fetch_surf_fsaverage`, + with the parameter mesh="fsaverage5" (the default) instead. + + - fsaverage5 surface data files are now shipped directly with Nilearn. + Look to issue #1705 for discussion. + + - `sklearn.cross_validation` and `sklearn.grid_search` have been + replaced by `sklearn.model_selection` in all the examples. + + - Colorbars in plotting functions now have a middle gray background + suitable for use with custom colormaps with a non-unity alpha channel. + + 0.4.2 ===== Few important bugs fix release for OHBM conference. @@ -358,7 +430,7 @@ Enhancements - A function :func:`nilearn.plotting.plot_surf_roi` can be used for plotting statistical maps rois onto brain surface. - - A function :func:`nilearn.datasets.fetch_surf_fsaverage5` can be used + - A function `nilearn.datasets.fetch_surf_fsaverage5` can be used for surface data object to be as background map for the above plotting functions. @@ -897,3 +969,4 @@ Contributors (from ``git shortlog -ns 0.1``):: 1 Matthias Ekman 1 Michael Waskom 1 Vincent Michel + diff --git a/examples/01_plotting/plot_3d_map_to_surface_projection.py b/examples/01_plotting/plot_3d_map_to_surface_projection.py index 7048ceafd2..c8d08a0e6e 100644 --- a/examples/01_plotting/plot_3d_map_to_surface_projection.py +++ b/examples/01_plotting/plot_3d_map_to_surface_projection.py @@ -6,8 +6,6 @@ :func:`nilearn.surface.vol_to_surf`. Display a surface plot of the projected map using :func:`nilearn.plotting.plot_surf_stat_map`. -NOTE: Example needs matplotlib version higher than 1.3.1. - """ ############################################################################## @@ -16,14 +14,15 @@ from nilearn import datasets -localizer_dataset = datasets.fetch_localizer_button_task() -localizer_tmap = localizer_dataset.tmaps[0] +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] + ############################################################################## # Get a cortical mesh # ------------------- -fsaverage = datasets.fetch_surf_fsaverage5() +fsaverage = datasets.fetch_surf_fsaverage() ############################################################################## # Sample the 3D data around each node of the mesh @@ -31,7 +30,7 @@ from nilearn import surface -texture = surface.vol_to_surf(localizer_tmap, fsaverage.pial_right) +texture = surface.vol_to_surf(stat_img, fsaverage.pial_right) ############################################################################## # Plot the result @@ -40,17 +39,65 @@ from nilearn import plotting plotting.plot_surf_stat_map(fsaverage.infl_right, texture, hemi='right', - title='Surface right hemisphere', + title='Surface right hemisphere', colorbar=True, threshold=1., bg_map=fsaverage.sulc_right) ############################################################################## # Plot 3D image for comparison # ---------------------------- -plotting.plot_glass_brain(localizer_tmap, display_mode='r', plot_abs=False, +plotting.plot_glass_brain(stat_img, display_mode='r', plot_abs=False, title='Glass brain', threshold=2.) -plotting.plot_stat_map(localizer_tmap, display_mode='x', threshold=1., +plotting.plot_stat_map(stat_img, display_mode='x', threshold=1., cut_coords=range(0, 51, 10), title='Slices') + +############################################################################## +# Plot with higher-resolution mesh +# -------------------------------- +# +# `fetch_surf_fsaverage` takes a "mesh" argument which specifies +# wether to fetch the low-resolution fsaverage5 mesh, or the high-resolution +# fsaverage mesh. using mesh="fsaverage" will result in more memory usage and +# computation time, but finer visualizations. + +big_fsaverage = datasets.fetch_surf_fsaverage('fsaverage') +big_texture = surface.vol_to_surf(stat_img, big_fsaverage.pial_right) + +plotting.plot_surf_stat_map(big_fsaverage.infl_right, + big_texture, hemi='right', colorbar=True, + title='Surface right hemisphere: fine mesh', + threshold=1., bg_map=big_fsaverage.sulc_right) + + plotting.show() + + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_surf_stat_map` is to use +# :func:`nilearn.plotting.view_surf` or +# :func:`nilearn.plotting.view_img_on_surf` that give more interactive +# visualizations in a web browser. See :ref:`interactive-surface-plotting` for +# more details. + +view = plotting.view_surf(fsaverage.infl_right, texture, threshold='90%', + bg_map=fsaverage.sulc_right) +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view + +############################################################################## +# We don't need to do the projection ourselves, we can use view_img_on_surf: + +view = plotting.view_img_on_surf(stat_img, threshold='90%') +# view.open_in_browser() + +view diff --git a/examples/01_plotting/plot_colormaps.py b/examples/01_plotting/plot_colormaps.py new file mode 100644 index 0000000000..f72f1fb175 --- /dev/null +++ b/examples/01_plotting/plot_colormaps.py @@ -0,0 +1,51 @@ +""" +Matplotlib colormaps in Nilearn +================================ + +Visualize HCP connectome workbench color maps shipped with Nilearn +which can be used for plotting brain images on surface. + +See :ref:`surface-plotting` for surface plotting details. +""" +import numpy as np +import matplotlib.pyplot as plt + +from nilearn.plotting.cm import _cmap_d as nilearn_cmaps +from nilearn.plotting import show + +########################################################################### +# Plot color maps +# ---------------- + +nmaps = len(nilearn_cmaps) +a = np.outer(np.arange(0, 1, 0.01), np.ones(10)) + +# Initialize the figure +plt.figure(figsize=(10, 4.2)) +plt.subplots_adjust(top=0.4, bottom=0.05, left=0.01, right=0.99) + +for index, cmap in enumerate(nilearn_cmaps): + plt.subplot(1, nmaps + 1, index + 1) + plt.imshow(a, cmap=nilearn_cmaps[cmap]) + plt.axis('off') + plt.title(cmap, fontsize=10, va='bottom', rotation=90) + +########################################################################### +# Plot matplotlib color maps +# -------------------------- +plt.figure(figsize=(10, 5)) +plt.subplots_adjust(top=0.8, bottom=0.05, left=0.01, right=0.99) +deprecated_cmaps = ['Vega10', 'Vega20', 'Vega20b', 'Vega20c', 'spectral'] +m_cmaps = [] +for m in plt.cm.datad: + if not m.endswith("_r") and m not in deprecated_cmaps: + m_cmaps.append(m) +m_cmaps.sort() + +for index, cmap in enumerate(m_cmaps): + plt.subplot(1, len(m_cmaps) + 1, index + 1) + plt.imshow(a, cmap=plt.get_cmap(cmap), aspect='auto') + plt.axis('off') + plt.title(cmap, fontsize=10, va='bottom', rotation=90) + +show() diff --git a/examples/01_plotting/plot_demo_glass_brain.py b/examples/01_plotting/plot_demo_glass_brain.py index 4460cd0322..3fb2d86aca 100644 --- a/examples/01_plotting/plot_demo_glass_brain.py +++ b/examples/01_plotting/plot_demo_glass_brain.py @@ -12,8 +12,8 @@ from nilearn import datasets -localizer_dataset = datasets.fetch_localizer_button_task() -localizer_tmap_filename = localizer_dataset.tmaps[0] +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] ############################################################################### # Glass brain plotting: whole brain sagittal cuts @@ -21,21 +21,21 @@ from nilearn import plotting -plotting.plot_glass_brain(localizer_tmap_filename, threshold=3) +plotting.plot_glass_brain(stat_img, threshold=3) ############################################################################### -# Glass brain plotting: black backgrond -# ------------------------------------- +# Glass brain plotting: black background +# -------------------------------------- # On a black background (option "black_bg"), and with only the x and # the z view (option "display_mode"). plotting.plot_glass_brain( - localizer_tmap_filename, title='plot_glass_brain', + stat_img, title='plot_glass_brain', black_bg=True, display_mode='xz', threshold=3) ############################################################################### # Glass brain plotting: Hemispheric sagittal cuts # ----------------------------------------------- -plotting.plot_glass_brain(localizer_tmap_filename, +plotting.plot_glass_brain(stat_img, title='plot_glass_brain with display_mode="lyrz"', display_mode='lyrz', threshold=3) diff --git a/examples/01_plotting/plot_demo_glass_brain_extensive.py b/examples/01_plotting/plot_demo_glass_brain_extensive.py index b3a0a5ead4..8d752a6aa9 100644 --- a/examples/01_plotting/plot_demo_glass_brain_extensive.py +++ b/examples/01_plotting/plot_demo_glass_brain_extensive.py @@ -13,8 +13,8 @@ :ref:`Section 4.3 ` for more details about display objects in Nilearn. -Also, see :func:`nilearn.datasets.fetch_localizer_button_task` for details -about the plotting data and its experiments. +Also, see :func:`nilearn.datasets.fetch_neurovault_motor_task` for details +about the plotting data and associated meta-data. """ @@ -30,15 +30,13 @@ print('Datasets shipped with nilearn are stored in: %r' % datasets.get_data_dirs()) ############################################################################### -# Let us now retrieve a motor task contrast maps corresponding to second subject -# from a localizer experiment -tmap_filenames = datasets.fetch_localizer_button_task()['tmaps'] -print(tmap_filenames) - -############################################################################### -# tmap_filenames is returned as a list. We need to take first one -tmap_filename = tmap_filenames[0] +# Let us now retrieve a motor task contrast map +# corresponding to a group one-sample t-test +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] +# stat_img is just the name of the file that we downloded +print(stat_img) ############################################################################### # Demo glass brain plotting @@ -46,29 +44,29 @@ from nilearn import plotting # Whole brain sagittal cuts and map is thresholded at 3 -plotting.plot_glass_brain(tmap_filename, threshold=3) +plotting.plot_glass_brain(stat_img, threshold=3) ############################################################################### # With a colorbar -plotting.plot_glass_brain(tmap_filename, threshold=3, colorbar=True) +plotting.plot_glass_brain(stat_img, threshold=3, colorbar=True) ############################################################################### # Black background, and only the (x, z) cuts -plotting.plot_glass_brain(tmap_filename, title='plot_glass_brain', +plotting.plot_glass_brain(stat_img, title='plot_glass_brain', black_bg=True, display_mode='xz', threshold=3) ############################################################################### # Plotting the sign of the activation with plot_abs to False -plotting.plot_glass_brain(tmap_filename, threshold=0, colorbar=True, +plotting.plot_glass_brain(stat_img, threshold=0, colorbar=True, plot_abs=False) ############################################################################### # The sign of the activation and a colorbar -plotting.plot_glass_brain(tmap_filename, threshold=3, +plotting.plot_glass_brain(stat_img, threshold=3, colorbar=True, plot_abs=False) @@ -77,12 +75,12 @@ # --------------------------------------------------------- # # Hemispheric sagittal cuts -plotting.plot_glass_brain(tmap_filename, +plotting.plot_glass_brain(stat_img, title='plot_glass_brain with display_mode="lzr"', black_bg=True, display_mode='lzr', threshold=3) ############################################################################### -plotting.plot_glass_brain(tmap_filename, threshold=0, colorbar=True, +plotting.plot_glass_brain(stat_img, threshold=0, colorbar=True, title='plot_glass_brain with display_mode="lyrz"', plot_abs=False, display_mode='lyrz') @@ -97,16 +95,16 @@ # statistical maps with "add_contours" display = plotting.plot_glass_brain(None) # Here, we project statistical maps -display.add_contours(tmap_filename) +display.add_contours(stat_img) # and a title -display.title('"tmap_filename" on glass brain without threshold') +display.title('"stat_img" on glass brain without threshold') ############################################################################### # Plotting with `filled=True` implies contours with fillings. Here, we are not # specifying levels display = plotting.plot_glass_brain(None) # Here, we project statistical maps with filled=True -display.add_contours(tmap_filename, filled=True) +display.add_contours(stat_img, filled=True) # and a title display.title('Same map but with fillings in the contours') @@ -117,13 +115,13 @@ # Here, we set the threshold using parameter called `levels` with value given # in a list and choosing color to Red. display = plotting.plot_glass_brain(None) -display.add_contours(tmap_filename, levels=[3.], colors='r') -display.title('"tmap_filename" on glass brain with threshold') +display.add_contours(stat_img, levels=[3.], colors='r') +display.title('"stat_img" on glass brain with threshold') ############################################################################### # Plotting with same demonstration but inlcudes now filled=True display = plotting.plot_glass_brain(None) -display.add_contours(tmap_filename, filled=True, levels=[3.], colors='r') +display.add_contours(stat_img, filled=True, levels=[3.], colors='r') display.title('Same demonstration but using fillings inside contours') ############################################################################## @@ -132,13 +130,13 @@ # We can set black background using black_bg=True display = plotting.plot_glass_brain(None, black_bg=True) -display.add_contours(tmap_filename, levels=[3.], colors='g') -display.title('"tmap_filename" on glass brain with black background') +display.add_contours(stat_img, levels=[3.], colors='g') +display.title('"stat_img" on glass brain with black background') ############################################################################## # Black background plotting with filled in contours display = plotting.plot_glass_brain(None, black_bg=True) -display.add_contours(tmap_filename, filled=True, levels=[3.], colors='g') +display.add_contours(stat_img, filled=True, levels=[3.], colors='g') display.title('Glass brain with black background and filled in contours') ############################################################################## @@ -148,13 +146,13 @@ # Now, display_mode is chosen as 'lr' for both hemispheric plots display = plotting.plot_glass_brain(None, display_mode='lr') -display.add_contours(tmap_filename, levels=[3.], colors='r') -display.title('"tmap_filename" on glass brain only "l" "r" hemispheres') +display.add_contours(stat_img, levels=[3.], colors='r') +display.title('"stat_img" on glass brain only "l" "r" hemispheres') ############################################################################## # Filled contours in both hemispheric plotting, just by adding filled=True display = plotting.plot_glass_brain(None, display_mode='lr') -display.add_contours(tmap_filename, filled=True, levels=[3.], colors='r') +display.add_contours(stat_img, filled=True, levels=[3.], colors='r') display.title('Filled contours on glass brain only "l" "r" hemispheres') ############################################################################## @@ -164,14 +162,14 @@ # By default parameter `plot_abs` is True and sign of activations can be # displayed by changing `plot_abs` to False display = plotting.plot_glass_brain(None, plot_abs=False, display_mode='lzry') -display.add_contours(tmap_filename) +display.add_contours(stat_img) display.title("Contours with both sign of activations without threshold") ############################################################################## # Now, adding just filled=True to get positive and negative sign activations # with fillings in the contours display = plotting.plot_glass_brain(None, plot_abs=False, display_mode='lzry') -display.add_contours(tmap_filename, filled=True) +display.add_contours(stat_img, filled=True) display.title("Filled contours with both sign of activations without threshold") @@ -189,7 +187,7 @@ # color to each contour. Additionally, we also choose to plot contours with # thick line widths, For linewidths one value would be enough so that same # value is used for both contours. -display.add_contours(tmap_filename, levels=[-2.8, 3.], colors=['b', 'r'], +display.add_contours(stat_img, levels=[-2.8, 3.], colors=['b', 'r'], linewidths=4.) display.title('Contours with sign of activations with threshold') @@ -205,10 +203,10 @@ # Second, we plot negative sign of activation with levels given as negative # activation value in a list. Upper bound should be kept to -infinity -display.add_contours(tmap_filename, filled=True, levels=[-np.inf, -2.8], +display.add_contours(stat_img, filled=True, levels=[-np.inf, -2.8], colors='b') # Next, within same plotting object we plot positive sign of activation -display.add_contours(tmap_filename, filled=True, levels=[3.], colors='r') +display.add_contours(stat_img, filled=True, levels=[3.], colors='r') display.title('Now same plotting but with filled contours') # Finally, displaying them diff --git a/examples/01_plotting/plot_demo_more_plotting.py b/examples/01_plotting/plot_demo_more_plotting.py index 97c0ea3686..2fdc4d132b 100644 --- a/examples/01_plotting/plot_demo_more_plotting.py +++ b/examples/01_plotting/plot_demo_more_plotting.py @@ -38,9 +38,9 @@ haxby_func_filename = haxby_dataset.func[0] # localizer dataset to have contrast maps -localizer_dataset = datasets.fetch_localizer_button_task(get_anats=True) -localizer_anat_filename = localizer_dataset.anats[0] -localizer_tmap_filename = localizer_dataset.tmaps[0] +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] + ######################################## # Now, we show from here how to visualize the retrieved datasets using plotting @@ -57,7 +57,7 @@ # argument, is here a list of integers denotes coordinates of each slice # in the order [x, y, z]. By default the `colorbar` argument is set to True # in plot_stat_map. -plotting.plot_stat_map(localizer_tmap_filename, display_mode='ortho', +plotting.plot_stat_map(stat_img, display_mode='ortho', cut_coords=[36, -27, 60], title="display_mode='ortho', cut_coords=[36, -27, 60]") @@ -68,7 +68,7 @@ # string 'z' for axial direction and `cut_coords` as integer 5 without a # list implies that number of cuts in the slices should be maximum of 5. # The coordinates to cut the slices are selected automatically -plotting.plot_stat_map(localizer_tmap_filename, display_mode='z', cut_coords=5, +plotting.plot_stat_map(stat_img, display_mode='z', cut_coords=5, title="display_mode='z', cut_coords=5") ######################################## @@ -76,7 +76,7 @@ # ------------------------------------------------------------- # In this type, `display_mode` should be given as string 'x' for sagittal # view and coordinates should be given as integers in a list -plotting.plot_stat_map(localizer_tmap_filename, display_mode='x', +plotting.plot_stat_map(stat_img, display_mode='x', cut_coords=[-36, 36], title="display_mode='x', cut_coords=[-36, 36]") @@ -86,7 +86,7 @@ # For coronal view, `display_mode` is given as string 'y' and `cut_coords` # as integer 1 not as a list for single cut. The coordinates are selected # automatically -plotting.plot_stat_map(localizer_tmap_filename, display_mode='y', cut_coords=1, +plotting.plot_stat_map(stat_img, display_mode='y', cut_coords=1, title="display_mode='y', cut_coords=1") ######################################## @@ -94,7 +94,7 @@ # ------------------------------------------------- # The argument `colorbar` should be given as False to show plots without # a colorbar on the right side. -plotting.plot_stat_map(localizer_tmap_filename, display_mode='z', +plotting.plot_stat_map(stat_img, display_mode='z', cut_coords=1, colorbar=False, title="display_mode='z', cut_coords=1, colorbar=False") @@ -104,7 +104,7 @@ # argument display_mode='xz' where 'x' for sagittal and 'z' for axial view. # argument `cut_coords` should match with input number of views therefore two # integers should be given in a list to select the slices to be displayed -plotting.plot_stat_map(localizer_tmap_filename, display_mode='xz', +plotting.plot_stat_map(stat_img, display_mode='xz', cut_coords=[36, 60], title="display_mode='xz', cut_coords=[36, 60]") @@ -113,7 +113,7 @@ # ------------------------------------------------------------------- # display_mode='yx' for coronal and saggital view and coordinates will be # assigned in the order of direction as [x, y, z] -plotting.plot_stat_map(localizer_tmap_filename, display_mode='yx', +plotting.plot_stat_map(stat_img, display_mode='yx', cut_coords=[-27, 36], title="display_mode='yx', cut_coords=[-27, 36]") @@ -121,7 +121,7 @@ # Now, views are changed to 'coronal' and 'axial' views with coordinates # ----------------------------------------------------------------------- -plotting.plot_stat_map(localizer_tmap_filename, display_mode='yz', +plotting.plot_stat_map(stat_img, display_mode='yz', cut_coords=[-27, 60], title="display_mode='yz', cut_coords=[-27, 60]") @@ -213,13 +213,13 @@ # Contrast maps plotted with function `plot_stat_map` can be saved using an # inbuilt parameter output_file as filename + .extension as string. Valid # extensions are .png, .pdf, .svg -plotting.plot_stat_map(localizer_tmap_filename, +plotting.plot_stat_map(stat_img, title='Using plot_stat_map output_file', output_file='plot_stat_map.png') ######################################## # Another way of saving plots is using 'savefig' option from display object -display = plotting.plot_stat_map(localizer_tmap_filename, +display = plotting.plot_stat_map(stat_img, title='Using display savefig') display.savefig('plot_stat_map_from_display.png') # In non-interactive settings make sure you close your displays diff --git a/examples/01_plotting/plot_demo_plotting.py b/examples/01_plotting/plot_demo_plotting.py index a6bf2f29bb..d5871e550b 100644 --- a/examples/01_plotting/plot_demo_plotting.py +++ b/examples/01_plotting/plot_demo_plotting.py @@ -29,10 +29,9 @@ haxby_mask_filename = haxby_dataset.mask_vt[0] haxby_func_filename = haxby_dataset.func[0] -# localizer dataset to have contrast maps -localizer_dataset = datasets.fetch_localizer_button_task(get_anats=True) -localizer_anat_filename = localizer_dataset.anats[0] -localizer_tmap_filename = localizer_dataset.tmaps[0] +# one motor contrast map from NeuroVault +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] ############################################################################### # Plotting statistical maps with function `plot_stat_map` @@ -40,19 +39,38 @@ from nilearn import plotting -# Visualizing t-map image on subject specific anatomical image with manual +# Visualizing t-map image on EPI template with manual # positioning of coordinates using cut_coords given as a list -plotting.plot_stat_map(localizer_tmap_filename, bg_img=localizer_anat_filename, +plotting.plot_stat_map(stat_img, threshold=3, title="plot_stat_map", cut_coords=[36, -27, 66]) +############################################################################### +# Making interactive plots with function `view_stat_map` +# ------------------------------------------------------ +# An alternative to :func:`nilearn.plotting.plot_stat_map` is to use +# :func:`nilearn.plotting.view_stat_map` that gives more interactive +# visualizations in a web browser. See :ref:`interactive-stat-map-plotting` +# for more details. + +view = plotting.view_stat_map(stat_img, threshold=3) + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view + ############################################################################### # Plotting statistical maps in a glass brain with function `plot_glass_brain` # --------------------------------------------------------------------------- # # Now, the t-map image is mapped on glass brain representation where glass # brain is always a fixed background template -plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain', +plotting.plot_glass_brain(stat_img, title='plot_glass_brain', threshold=3) ############################################################################### diff --git a/examples/01_plotting/plot_dim_plotting.py b/examples/01_plotting/plot_dim_plotting.py index 04b7238f78..afecc379c3 100644 --- a/examples/01_plotting/plot_dim_plotting.py +++ b/examples/01_plotting/plot_dim_plotting.py @@ -10,21 +10,23 @@ This *dim* argument may also be useful for the plot_roi function used to display ROIs on top of a background image. + """ ######################################################################### # Retrieve the data: the localizer dataset with contrast maps -# ----------------------------------------------------------- +# ------------------------------------------------------------ from nilearn import datasets -localizer_dataset = datasets.fetch_localizer_button_task(get_anats=True) -localizer_anat_filename = localizer_dataset.anats[0] -localizer_tmap_filename = localizer_dataset.tmaps[0] - -######################################################################## +localizer_dataset = datasets.fetch_localizer_button_task() +# Contrast map of motor task +localizer_tmap_filename = localizer_dataset.tmap +# Subject specific anatomical image +localizer_anat_filename = localizer_dataset.anat +########################################################################### # Plotting with enhancement of background image with dim=-.5 -# ---------------------------------------------------------- +# -------------------------------------------------------------------------- from nilearn import plotting plotting.plot_stat_map(localizer_tmap_filename, diff --git a/examples/01_plotting/plot_prob_atlas.py b/examples/01_plotting/plot_prob_atlas.py index 2796dc4210..085c552d27 100644 --- a/examples/01_plotting/plot_prob_atlas.py +++ b/examples/01_plotting/plot_prob_atlas.py @@ -40,6 +40,9 @@ # Allen RSN networks allen = datasets.fetch_atlas_allen_2011() +# Pauli subcortical atlas +subcortex = datasets.fetch_atlas_pauli_2017() + # Visualization from nilearn import plotting @@ -52,6 +55,7 @@ 'Smith2009 70 Brainmap': smith.bm70, 'ICBM tissues': (icbm['wm'], icbm['gm'], icbm['csf']), 'Allen2011': allen.rsn28, + 'Pauli2017 Subcortical Atlas': subcortex.maps, } for name, atlas in sorted(atlas_types.items()): @@ -61,4 +65,5 @@ plotting.plot_prob_atlas(smith.bm10, title='Smith2009 10 Brainmap (with' ' colorbar)', colorbar=True) +print('ready') plotting.show() diff --git a/examples/01_plotting/plot_surf_atlas.py b/examples/01_plotting/plot_surf_atlas.py index a5e7a6f757..df2ea43aaf 100644 --- a/examples/01_plotting/plot_surf_atlas.py +++ b/examples/01_plotting/plot_surf_atlas.py @@ -10,8 +10,6 @@ See :ref:`plotting` for more details. -NOTE: Example needs matplotlib version higher than 1.3.1. - References ---------- @@ -35,7 +33,7 @@ # Retrieve fsaverage5 surface dataset for the plotting background. It contains # the surface template as pial and inflated version and a sulcal depth maps # which is used for shading -fsaverage = datasets.fetch_surf_fsaverage5() +fsaverage = datasets.fetch_surf_fsaverage() # The fsaverage dataset contains file names pointing to the file locations print('Fsaverage5 pial surface of left hemisphere is at: %s' % @@ -78,3 +76,23 @@ bg_map=fsaverage['sulc_left'], bg_on_data=True, darkness=.5) plotting.show() + + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_surf_roi` is to use +# :func:`nilearn.plotting.view_surf` for more interactive +# visualizations in a web browser. See :ref:`interactive-surface-plotting` for +# more details. + +view = plotting.view_surf(fsaverage.infl_left, parcellation, + cmap='gist_ncar', symmetric_cmap=False) +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view diff --git a/examples/01_plotting/plot_surf_stat_map.py b/examples/01_plotting/plot_surf_stat_map.py index 7913b77e1d..edd5b809dd 100644 --- a/examples/01_plotting/plot_surf_stat_map.py +++ b/examples/01_plotting/plot_surf_stat_map.py @@ -26,8 +26,6 @@ See :ref:`plotting` for more details on plotting tools. -NOTE: This example needs matplotlib version higher than 1.3.1. - References ---------- @@ -69,7 +67,7 @@ labels = destrieux_atlas['labels'] # Fsaverage5 surface template -fsaverage = datasets.fetch_surf_fsaverage5() +fsaverage = datasets.fetch_surf_fsaverage() # The fsaverage dataset contains file names pointing to # the file locations @@ -126,7 +124,7 @@ ############################################################################### # Display unthresholded stat map with dimmed background plotting.plot_surf_stat_map(fsaverage['pial_left'], stat_map=stat_map, - hemi='left', view='medial', + hemi='left', view='medial', colorbar=True, bg_map=fsaverage['sulc_left'], bg_on_data=True, darkness=.5, title='Correlation map') @@ -134,14 +132,14 @@ # Display unthresholded stat map without background map, transparency is # automatically set to .5, but can also be controlled with the alpha parameter plotting.plot_surf_stat_map(fsaverage['pial_left'], stat_map=stat_map, - hemi='left', view='medial', + hemi='left', view='medial', colorbar=True, title='Plotting without background') ############################################################################### # Many different options are available for plotting, for example thresholding, # or using custom colormaps plotting.plot_surf_stat_map(fsaverage['pial_left'], stat_map=stat_map, - hemi='left', view='medial', + hemi='left', view='medial', colorbar=True, bg_map=fsaverage['sulc_left'], bg_on_data=True, cmap='Spectral', threshold=.5, title='Threshold and colormap') @@ -151,7 +149,7 @@ # creating the figure plotting.plot_surf_stat_map(fsaverage['infl_left'], stat_map=stat_map, hemi='left', bg_map=fsaverage['sulc_left'], - bg_on_data=True, threshold=.6, + bg_on_data=True, threshold=.6, colorbar=True, output_file='plot_surf_stat_map.png') plotting.show() diff --git a/examples/01_plotting/plot_surface_projection_strategies.py b/examples/01_plotting/plot_surface_projection_strategies.py index af740a0a90..06d87f7596 100644 --- a/examples/01_plotting/plot_surface_projection_strategies.py +++ b/examples/01_plotting/plot_surface_projection_strategies.py @@ -25,6 +25,7 @@ from mpl_toolkits.mplot3d import Axes3D from nilearn.surface import surface +from nilearn.plotting import show ###################################################################### @@ -66,4 +67,4 @@ ax.scatter(*sample_points.T, color='r') -plt.show() +show() diff --git a/examples/02_decoding/plot_haxby_anova_svm.py b/examples/02_decoding/plot_haxby_anova_svm.py index 991d135d67..737d8719e3 100644 --- a/examples/02_decoding/plot_haxby_anova_svm.py +++ b/examples/02_decoding/plot_haxby_anova_svm.py @@ -88,15 +88,15 @@ ############################################################################# # Obtain prediction scores via cross validation # ----------------------------------------------- -from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score +from sklearn.model_selection import LeaveOneGroupOut, cross_val_score # Define the cross-validation scheme used for validation. -# Here we use a LeaveOneLabelOut cross-validation on the session label +# Here we use a LeaveOneGroupOut cross-validation on the session group # which corresponds to a leave-one-session-out -cv = LeaveOneLabelOut(session) +cv = LeaveOneGroupOut() # Compute the prediction accuracy for the different folds (i.e. session) -cv_scores = cross_val_score(anova_svc, X, conditions, cv=cv) +cv_scores = cross_val_score(anova_svc, X, conditions, cv=cv, groups=session) # Return the corresponding mean prediction accuracy classification_accuracy = cv_scores.mean() diff --git a/examples/02_decoding/plot_haxby_different_estimators.py b/examples/02_decoding/plot_haxby_different_estimators.py index def5b7bf5e..1d228c7284 100644 --- a/examples/02_decoding/plot_haxby_different_estimators.py +++ b/examples/02_decoding/plot_haxby_different_estimators.py @@ -54,28 +54,35 @@ svm = SVC(C=1., kernel="linear") # The logistic regression -from sklearn.linear_model import LogisticRegression, RidgeClassifier, \ - RidgeClassifierCV -logistic = LogisticRegression(C=1., penalty="l1") -logistic_50 = LogisticRegression(C=50., penalty="l1") -logistic_l2 = LogisticRegression(C=1., penalty="l2") +from sklearn.linear_model import (LogisticRegression, + RidgeClassifier, + RidgeClassifierCV, + ) +logistic = LogisticRegression(C=1., penalty="l1", solver='liblinear') +logistic_50 = LogisticRegression(C=50., penalty="l1", solver='liblinear') +logistic_l2 = LogisticRegression(C=1., penalty="l2", solver='liblinear') # Cross-validated versions of these classifiers -from sklearn.grid_search import GridSearchCV +from sklearn.model_selection import GridSearchCV # GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that # can significantly speed up the fitting process on computers with # multiple cores svm_cv = GridSearchCV(SVC(C=1., kernel="linear"), - param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]}, - scoring='f1', n_jobs=1) - -logistic_cv = GridSearchCV(LogisticRegression(C=1., penalty="l1"), - param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]}, - scoring='f1') -logistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty="l2"), - param_grid={ - 'C': [.1, .5, 1., 5., 10., 50., 100.]}, - scoring='f1') + param_grid={'C': [.1, 1., 10., 100.]}, + scoring='f1', n_jobs=1, cv=3, iid=False) + +logistic_cv = GridSearchCV( + LogisticRegression(C=1., penalty="l1", solver='liblinear'), + param_grid={'C': [.1, 1., 10., 100.]}, + scoring='f1', cv=3, iid=False, + ) +logistic_l2_cv = GridSearchCV( + LogisticRegression(C=1., penalty="l2", solver='liblinear'), + param_grid={ + 'C': [.1, 1., 10., 100.] + }, + scoring='f1', cv=3, iid=False, + ) # The ridge classifier has a specific 'CV' object that can set it's # parameters faster than using a GridSearchCV @@ -91,8 +98,8 @@ 'log l2': logistic_l2, 'log l2 cv': logistic_l2_cv, 'ridge': ridge, - 'ridge cv': ridge_cv} - + 'ridge cv': ridge_cv + } ############################################################################# # Here we compute prediction scores @@ -100,8 +107,8 @@ # Run time for all these classifiers # Make a data splitting object for cross validation -from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score -cv = LeaveOneLabelOut(session_labels) +from sklearn.model_selection import LeaveOneGroupOut, cross_val_score +cv = LeaveOneGroupOut() import time @@ -118,14 +125,21 @@ classifier, masked_timecourses, classification_target, - cv=cv, scoring="f1") - - print("%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs" % ( - classifier_name, category, - classifiers_scores[classifier_name][category].mean(), - classifiers_scores[classifier_name][category].std(), - time.time() - t0)) - + cv=cv, + groups=session_labels, + scoring="f1", + ) + + print( + "%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs" % + ( + classifier_name, + category, + classifiers_scores[classifier_name][category].mean(), + classifiers_scores[classifier_name][category].std(), + time.time() - t0, + ), + ) ############################################################################### # Then we make a rudimentary diagram @@ -148,10 +162,10 @@ plt.xlabel('Visual stimuli category') plt.ylim(ymin=0) plt.legend(loc='lower center', ncol=3) -plt.title('Category-specific classification accuracy for different classifiers') +plt.title( + 'Category-specific classification accuracy for different classifiers') plt.tight_layout() - ############################################################################### # Finally, w plot the face vs house map for the different classifiers diff --git a/examples/02_decoding/plot_haxby_full_analysis.py b/examples/02_decoding/plot_haxby_full_analysis.py index 82685d606a..ec08c6f99a 100644 --- a/examples/02_decoding/plot_haxby_full_analysis.py +++ b/examples/02_decoding/plot_haxby_full_analysis.py @@ -57,8 +57,8 @@ dummy_classifier = DummyClassifier() # Make a data splitting object for cross validation -from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score -cv = LeaveOneLabelOut(session_labels) +from sklearn.model_selection import LeaveOneGroupOut, cross_val_score +cv = LeaveOneGroupOut() mask_names = ['mask_vt', 'mask_face', 'mask_house'] @@ -83,13 +83,19 @@ classifier, masked_timecourses, classification_target, - cv=cv, scoring="roc_auc") + cv=cv, + groups=session_labels, + scoring="roc_auc", + ) mask_chance_scores[mask_name][category] = cross_val_score( dummy_classifier, masked_timecourses, classification_target, - cv=cv, scoring="roc_auc") + cv=cv, + groups=session_labels, + scoring="roc_auc", + ) print("Scores: %1.2f +- %1.2f" % ( mask_scores[mask_name][category].mean(), @@ -101,6 +107,8 @@ # --------------------------------------------------- import numpy as np import matplotlib.pyplot as plt +from nilearn.plotting import show + plt.figure() tick_position = np.arange(len(categories)) @@ -127,4 +135,4 @@ plt.tight_layout() -plt.show() +show() diff --git a/examples/02_decoding/plot_haxby_grid_search.py b/examples/02_decoding/plot_haxby_grid_search.py index 6630bd6996..23e0349c6c 100644 --- a/examples/02_decoding/plot_haxby_grid_search.py +++ b/examples/02_decoding/plot_haxby_grid_search.py @@ -97,8 +97,7 @@ anova_svc.fit(X, y) y_pred = anova_svc.predict(X) -from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score -cv = LeaveOneLabelOut(session[session < 10]) +from sklearn.model_selection import cross_val_score k_range = [10, 15, 30, 50, 150, 300, 500, 1000, 1500, 3000, 5000] cv_scores = [] @@ -107,7 +106,7 @@ for k in k_range: feature_selection.k = k cv_scores.append(np.mean( - cross_val_score(anova_svc, X[session < 10], y[session < 10]))) + cross_val_score(anova_svc, X[session < 10], y[session < 10], cv=3))) print("CV score: %.4f" % cv_scores[-1]) anova_svc.fit(X[session < 10], y[session < 10]) @@ -118,14 +117,15 @@ ########################################################################### # Nested cross-validation # ------------------------- -from sklearn.grid_search import GridSearchCV +from sklearn.model_selection import GridSearchCV # We are going to tune the parameter 'k' of the step called 'anova' in # the pipeline. Thus we need to address it as 'anova__k'. # Note that GridSearchCV takes an n_jobs argument that can make it go # much faster -grid = GridSearchCV(anova_svc, param_grid={'anova__k': k_range}, verbose=1) -nested_cv_scores = cross_val_score(grid, X, y) +grid = GridSearchCV(anova_svc, param_grid={'anova__k': k_range}, verbose=1, + cv=3) +nested_cv_scores = cross_val_score(grid, X, y, cv=3) print("Nested CV score: %.4f" % np.mean(nested_cv_scores)) @@ -133,6 +133,8 @@ # Plot the prediction scores using matplotlib # --------------------------------------------- from matplotlib import pyplot as plt +from nilearn.plotting import show + plt.figure(figsize=(6, 4)) plt.plot(cv_scores, label='Cross validation scores') plt.plot(scores_validation, label='Left-out validation data scores') @@ -145,4 +147,4 @@ color='r') plt.legend(loc='best', frameon=False) -plt.show() +show() diff --git a/examples/02_decoding/plot_haxby_multiclass.py b/examples/02_decoding/plot_haxby_multiclass.py index e31b2b0a68..6c47193e69 100644 --- a/examples/02_decoding/plot_haxby_multiclass.py +++ b/examples/02_decoding/plot_haxby_multiclass.py @@ -75,7 +75,7 @@ ############################################################################## # Now we compute cross-validation scores # ---------------------------------------- -from sklearn.cross_validation import cross_val_score +from sklearn.model_selection import cross_val_score cv_scores_ovo = cross_val_score(svc_ovo, X, y, cv=5, verbose=1) @@ -99,7 +99,7 @@ # We fit on the the first 10 sessions and plot a confusion matrix on the # last 2 sessions from sklearn.metrics import confusion_matrix -from nilearn.plotting import plot_matrix +from nilearn.plotting import plot_matrix, show svc_ovo.fit(X[session < 10], y[session < 10]) y_pred_ovo = svc_ovo.predict(X[session >= 10]) @@ -115,4 +115,4 @@ labels=unique_conditions, title='Confusion matrix: One vs All', cmap='hot_r') -plt.show() +show() diff --git a/examples/02_decoding/plot_haxby_searchlight.py b/examples/02_decoding/plot_haxby_searchlight.py index 866b931ba0..29bdd4e23e 100644 --- a/examples/02_decoding/plot_haxby_searchlight.py +++ b/examples/02_decoding/plot_haxby_searchlight.py @@ -69,8 +69,8 @@ # Here we use a KFold cross-validation on the session, which corresponds to # splitting the samples in 4 folds and make 4 runs using each fold as a test # set once and the others as learning sets -from sklearn.cross_validation import KFold -cv = KFold(y.size, n_folds=4) +from sklearn.model_selection import KFold +cv = KFold(n_splits=4) import nilearn.decoding # The radius is the one of the Searchlight sphere that will scan the volume diff --git a/examples/02_decoding/plot_haxby_stimuli.py b/examples/02_decoding/plot_haxby_stimuli.py index 6967b354f2..ba63648a27 100644 --- a/examples/02_decoding/plot_haxby_stimuli.py +++ b/examples/02_decoding/plot_haxby_stimuli.py @@ -11,6 +11,7 @@ import matplotlib.pyplot as plt from nilearn import datasets +from nilearn.plotting import show haxby_dataset = datasets.fetch_haxby(subjects=[], fetch_stimuli=True) stimulus_information = haxby_dataset.stimuli @@ -33,4 +34,4 @@ plt.axis("off") plt.suptitle(stim_type) -plt.show() +show() diff --git a/examples/02_decoding/plot_miyawaki_encoding.py b/examples/02_decoding/plot_miyawaki_encoding.py index 96e4471aca..8520076823 100644 --- a/examples/02_decoding/plot_miyawaki_encoding.py +++ b/examples/02_decoding/plot_miyawaki_encoding.py @@ -116,7 +116,7 @@ # activity in this voxel. from sklearn.linear_model import Ridge -from sklearn.cross_validation import KFold +from sklearn.model_selection import KFold ############################################################################## # Using 10-fold cross-validation, we partition the data into 10 'folds'. @@ -126,10 +126,10 @@ from sklearn.metrics import r2_score estimator = Ridge(alpha=100.) -cv = KFold(len(stimuli), 10) +cv = KFold(n_splits=10) scores = [] -for train, test in cv: +for train, test in cv.split(X=stimuli): # we train the Ridge estimator on the training set # and predict the fMRI activity for the test set predictions = Ridge(alpha=100.).fit( diff --git a/examples/02_decoding/plot_miyawaki_reconstruction.py b/examples/02_decoding/plot_miyawaki_reconstruction.py index 221023d5fd..5bd7c84027 100644 --- a/examples/02_decoding/plot_miyawaki_reconstruction.py +++ b/examples/02_decoding/plot_miyawaki_reconstruction.py @@ -252,7 +252,7 @@ def split_multi_scale(y, y_shape): # ground truth from matplotlib import pyplot as plt - +from nilearn.plotting import show for i in range(6): j = 10 * i @@ -274,4 +274,4 @@ def split_multi_scale(y, y_shape): interpolation='nearest') plt.savefig('miyawaki2008_reconstruction_%d' % i) -plt.show() +show() diff --git a/examples/02_decoding/plot_oasis_vbm.py b/examples/02_decoding/plot_oasis_vbm.py index d00793adcf..b8810422d8 100644 --- a/examples/02_decoding/plot_oasis_vbm.py +++ b/examples/02_decoding/plot_oasis_vbm.py @@ -129,7 +129,7 @@ display.title('SVM weights', y=1.2) # Measure accuracy with cross validation -from sklearn.cross_validation import cross_val_score +from sklearn.model_selection import cross_val_score cv_scores = cross_val_score(anova_svr, gm_maps_masked, age) # Return the corresponding mean prediction accuracy diff --git a/examples/02_decoding/plot_oasis_vbm_space_net.py b/examples/02_decoding/plot_oasis_vbm_space_net.py index 4fa4253d2e..2253dfa597 100644 --- a/examples/02_decoding/plot_oasis_vbm_space_net.py +++ b/examples/02_decoding/plot_oasis_vbm_space_net.py @@ -26,7 +26,7 @@ # Split data into training set and test set from sklearn.utils import check_random_state -from sklearn.cross_validation import train_test_split +from sklearn.model_selection import train_test_split rng = check_random_state(42) gm_imgs_train, gm_imgs_test, age_train, age_test = train_test_split( gm_imgs, age, train_size=.6, random_state=rng) @@ -61,7 +61,7 @@ ########################################################################### # Visualize the resulting maps -from nilearn.plotting import plot_stat_map +from nilearn.plotting import plot_stat_map, show # weights map background_img = gm_imgs[0] plot_stat_map(coef_img, background_img, title="graph-net weights", @@ -86,4 +86,4 @@ ax2.set_xlabel("subject") plt.legend(loc="best") -plt.show() +show() diff --git a/examples/02_decoding/plot_simulated_data.py b/examples/02_decoding/plot_simulated_data.py index 7e179d1f98..4e954c8418 100644 --- a/examples/02_decoding/plot_simulated_data.py +++ b/examples/02_decoding/plot_simulated_data.py @@ -40,13 +40,14 @@ from sklearn import linear_model, svm from sklearn.utils import check_random_state -from sklearn.cross_validation import KFold +from sklearn.model_selection import KFold from sklearn.feature_selection import f_regression import nibabel from nilearn import decoding import nilearn.masking +from nilearn.plotting import show ############################################################################## @@ -63,8 +64,8 @@ def create_simulation_data(snr=0, n_samples=2 * 100, size=12, random_state=1): w[0:roi_size, -roi_size:, -roi_size:] = -0.6 w[-roi_size:, 0:roi_size:, -roi_size:] = 0.5 w[(size - roi_size) // 2:(size + roi_size) // 2, - (size - roi_size) // 2:(size + roi_size) // 2, - (size - roi_size) // 2:(size + roi_size) // 2] = 0.5 + (size - roi_size) // 2:(size + roi_size) // 2, + (size - roi_size) // 2:(size + roi_size) // 2] = 0.5 w = w.ravel() # Generate smooth background noise XX = generator.randn(n_samples, size, size, size) @@ -159,11 +160,16 @@ def plot_slices(data, title=None): l1_ratio=0.05)), ('ridge_cv', linear_model.RidgeCV(alphas=[100, 10, 1, 0.1], cv=5)), ('svr', svm.SVR(kernel='linear', C=0.001)), - ('searchlight', decoding.SearchLight( - mask_img, process_mask_img=process_mask_img, - radius=2.7, scoring='r2', estimator=svm.SVR(kernel="linear"), - cv=KFold(y_train.size, n_folds=4), - verbose=1, n_jobs=1)) + ('searchlight', decoding.SearchLight(mask_img, + process_mask_img=process_mask_img, + radius=2.7, + scoring='r2', + estimator=svm.SVR(kernel="linear"), + cv=KFold(n_splits=4), + verbose=1, + n_jobs=1, + ) + ) ] ############################################################################### @@ -189,14 +195,14 @@ def plot_slices(data, title=None): coefs = np.reshape(coefs, [size, size, size]) score = estimator.score(X_test, y_test) title = '%s: prediction score %.3f, training time: %.2fs' % ( - estimator.__class__.__name__, score, - elapsed_time) + estimator.__class__.__name__, score, + elapsed_time) else: # Searchlight coefs = estimator.scores_ title = '%s: training time: %.2fs' % ( - estimator.__class__.__name__, - elapsed_time) + estimator.__class__.__name__, + elapsed_time) # We use the plot_slices function provided in the example to # plot the results @@ -211,7 +217,7 @@ def plot_slices(data, title=None): p_values[p_values > 10] = 10 plot_slices(p_values, title="f_regress") -plt.show() +show() ############################################################################### # An exercice to go further @@ -226,5 +232,3 @@ def plot_slices(data, title=None): # slow. from sklearn.feature_selection import RFE - - diff --git a/examples/03_connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py index 5aac6f4401..29882c5fc3 100644 --- a/examples/03_connectivity/plot_adhd_spheres.py +++ b/examples/03_connectivity/plot_adhd_spheres.py @@ -90,3 +90,23 @@ display_mode='lyrz') plotting.show() + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_connectome` is to use +# :func:`nilearn.plotting.view_connectome` that gives more interactive +# visualizations in a web browser. See :ref:`interactive-connectome-plotting` +# for more details. + + +view = plotting.view_connectome(partial_correlation_matrix, dmn_coords) + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view diff --git a/examples/03_connectivity/plot_atlas_comparison.py b/examples/03_connectivity/plot_atlas_comparison.py new file mode 100644 index 0000000000..462c28c48d --- /dev/null +++ b/examples/03_connectivity/plot_atlas_comparison.py @@ -0,0 +1,115 @@ +""" +Comparing connectomes on different reference atlases +==================================================== + +This examples shows how to turn a parcellation into connectome for +visualization. This requires choosing centers for each parcel +or network, via :func:`nilearn.plotting.find_parcellation_cut_coords` for +parcellation based on labels and +:func:`nilearn.plotting.find_probabilistic_atlas_cut_coords` for +parcellation based on probabilistic values. + +In the intermediary steps, we make use of +:class:`nilearn.input_data.NiftiLabelsMasker` and +:class:`nilearn.input_data.NiftiMapsMasker` to extract time series from nifti +objects using different parcellation atlases. +The time series of all subjects of the ADHD Dataset are concatenated and +given directly to :class:`nilearn.connectome.ConnectivityMeasure` for +computing parcel-wise correlation matrices for each atlas across all subjects. + +Mean correlation matrix is displayed on glass brain on extracted coordinates. + +# author: Amadeus Kanaan + +""" + +#################################################################### +# Load atlases +# ------------- +from nilearn import datasets + +yeo = datasets.fetch_atlas_yeo_2011() +print('Yeo atlas nifti image (3D) with 17 parcels and liberal mask is located ' + 'at: %s' % yeo['thick_17']) + +######################################################################### +# Load functional data +# -------------------- +data = datasets.fetch_adhd(n_subjects=10) + +print('Functional nifti images (4D, e.g., one subject) are located at : %r' + % data['func'][0]) +print('Counfound csv files (of same subject) are located at : %r' + % data['confounds'][0]) + +########################################################################## +# Extract coordinates on Yeo atlas - parcellations +# ------------------------------------------------ +from nilearn.input_data import NiftiLabelsMasker +from nilearn.connectome import ConnectivityMeasure + +# ConenctivityMeasure from Nilearn uses simple 'correlation' to compute +# connectivity matrices for all subjects in a list +connectome_measure = ConnectivityMeasure(kind='correlation') + +# useful for plotting connectivity interactions on glass brain +from nilearn import plotting + +# create masker to extract functional data within atlas parcels +masker = NiftiLabelsMasker(labels_img=yeo['thick_17'], standardize=True, + memory='nilearn_cache') + +# extract time series from all subjects and concatenate them +time_series = [] +for func, confounds in zip(data.func, data.confounds): + time_series.append(masker.fit_transform(func, confounds=confounds)) + +# calculate correlation matrices across subjects and display +correlation_matrices = connectome_measure.fit_transform(time_series) + +# Mean correlation matrix across 10 subjects can be grabbed like this, +# using connectome measure object +mean_correlation_matrix = connectome_measure.mean_ + +# grab center coordinates for atlas labels +coordinates = plotting.find_parcellation_cut_coords(labels_img=yeo['thick_17']) + +# plot connectome with 80% edge strength in the connectivity +plotting.plot_connectome(mean_correlation_matrix, coordinates, + edge_threshold="80%", + title='Yeo Atlas 17 thick (func)') + +########################################################################## +# Load probabilistic atlases - extracting coordinates on brain maps +# ----------------------------------------------------------------- + +msdl = datasets.fetch_atlas_msdl() + +########################################################################## +# Iterate over fetched atlases to extract coordinates - probabilistic +# ------------------------------------------------------------------- +from nilearn.input_data import NiftiMapsMasker + +# create masker to extract functional data within atlas parcels +masker = NiftiMapsMasker(maps_img=msdl['maps'], standardize=True, + memory='nilearn_cache') + +# extract time series from all subjects and concatenate them +time_series = [] +for func, confounds in zip(data.func, data.confounds): + time_series.append(masker.fit_transform(func, confounds=confounds)) + +# calculate correlation matrices across subjects and display +correlation_matrices = connectome_measure.fit_transform(time_series) + +# Mean correlation matrix across 10 subjects can be grabbed like this, +# using connectome measure object +mean_correlation_matrix = connectome_measure.mean_ + +# grab center coordinates for probabilistic atlas +coordinates = plotting.find_probabilistic_atlas_cut_coords(maps_img=msdl['maps']) + +# plot connectome with 80% edge strength in the connectivity +plotting.plot_connectome(mean_correlation_matrix, coordinates, + edge_threshold="80%", title='MSDL (probabilistic)') +plotting.show() diff --git a/examples/03_connectivity/plot_compare_resting_state_decomposition.py b/examples/03_connectivity/plot_compare_resting_state_decomposition.py index 67c91d9615..d3f8c93bfe 100644 --- a/examples/03_connectivity/plot_compare_resting_state_decomposition.py +++ b/examples/03_connectivity/plot_compare_resting_state_decomposition.py @@ -43,11 +43,16 @@ ############################################################################### # Dictionary learning # -------------------- +# +# We use as "template" as a strategy to compute the mask, as this leads +# to slightly faster and more reproducible results. However, the images +# need to be in MNI template space dict_learning = DictLearning(n_components=n_components, memory="nilearn_cache", memory_level=2, verbose=1, random_state=0, - n_epochs=1) + n_epochs=1, + mask_strategy='template') ############################################################################### # CanICA # ------ @@ -55,7 +60,8 @@ memory="nilearn_cache", memory_level=2, threshold=3., n_init=1, - verbose=1) + verbose=1, + mask_strategy='template') ############################################################################### # Fit both estimators @@ -84,7 +90,7 @@ from nilearn.image import index_img # Selecting specific maps to display: maps were manually chosen to be similar -indices = {dict_learning: 1, canica: 31} +indices = {dict_learning: 25, canica: 33} # We select relevant cut coordinates for displaying cut_component = index_img(components_imgs[0], indices[dict_learning]) cut_coords = find_xyz_cut_coords(cut_component) diff --git a/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py b/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py index 70eb4126c5..fc77374edd 100644 --- a/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py +++ b/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py @@ -129,9 +129,8 @@ colorbar=True, title=title) # Then find the center of the regions and plot a connectome -from nilearn import image -regions_imgs = image.iter_img(regions_extracted_img) -coords_connectome = [plotting.find_xyz_cut_coords(img) for img in regions_imgs] +regions_img = regions_extracted_img +coords_connectome = plotting.find_probabilistic_atlas_cut_coords(regions_img) plotting.plot_connectome(mean_correlations, coords_connectome, edge_threshold='90%', title=title) @@ -141,6 +140,8 @@ # ---------------------------------------------------- # First, we plot a network of index=4 without region extraction (left plot) +from nilearn import image + img = image.index_img(components_img, 4) coords = plotting.find_xyz_cut_coords(img) display = plotting.plot_stat_map(img, cut_coords=coords, colorbar=False, diff --git a/examples/03_connectivity/plot_group_level_connectivity.py b/examples/03_connectivity/plot_group_level_connectivity.py index 299d52fc10..3f29e7ca28 100644 --- a/examples/03_connectivity/plot_group_level_connectivity.py +++ b/examples/03_connectivity/plot_group_level_connectivity.py @@ -182,26 +182,38 @@ def plot_matrices(matrices, matrix_kind): # We stratify the dataset into homogeneous classes according to phenotypic # and scan site. We then split the subjects into 3 folds with the same # proportion of each class as in the whole cohort -from sklearn.cross_validation import StratifiedKFold +from sklearn.model_selection import StratifiedKFold classes = ['{0}{1}'.format(site_name, adhd_label) for site_name, adhd_label in zip(site_names, adhd_labels)] -cv = StratifiedKFold(classes, n_folds=3) - +cv = StratifiedKFold(n_splits=3) ############################################################################### # and use the connectivity coefficients to classify ADHD patients vs controls. + +# Note that in cv.split(X, y), +# providing y is sufficient to generate the splits and +# hence np.zeros(n_samples) may be used as a placeholder for X +# instead of actual training data. from sklearn.svm import LinearSVC -from sklearn.cross_validation import cross_val_score +from sklearn.model_selection import cross_val_score mean_scores = [] for kind in kinds: svc = LinearSVC(random_state=0) - cv_scores = cross_val_score(svc, connectivity_biomarkers[kind], - y=adhd_labels, cv=cv, scoring='accuracy') + cv_scores = cross_val_score(svc, + connectivity_biomarkers[kind], + y=adhd_labels, + cv=cv, + groups=adhd_labels, + scoring='accuracy', + ) mean_scores.append(cv_scores.mean()) ############################################################################### # Finally, we can display the classification scores. + +from nilearn.plotting import show + plt.figure(figsize=(6, 4)) positions = np.arange(len(kinds)) * .1 + .1 plt.barh(positions, mean_scores, align='center', height=.05) @@ -211,4 +223,4 @@ def plot_matrices(matrices, matrix_kind): plt.grid(True) plt.tight_layout() -plt.show() +show() diff --git a/examples/03_connectivity/plot_inverse_covariance_connectome.py b/examples/03_connectivity/plot_inverse_covariance_connectome.py index f6532887a8..b20eef1f01 100644 --- a/examples/03_connectivity/plot_inverse_covariance_connectome.py +++ b/examples/03_connectivity/plot_inverse_covariance_connectome.py @@ -91,3 +91,23 @@ title='Sparse inverse covariance') plotting.show() + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_connectome` is to use +# :func:`nilearn.plotting.view_connectome` that gives more interactive +# visualizations in a web browser. See :ref:`interactive-connectome-plotting` +# for more details. + + +view = plotting.view_connectome(-estimator.precision_, coords) + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view diff --git a/examples/03_connectivity/plot_multi_subject_connectome.py b/examples/03_connectivity/plot_multi_subject_connectome.py index 282708956b..3da6875919 100644 --- a/examples/03_connectivity/plot_multi_subject_connectome.py +++ b/examples/03_connectivity/plot_multi_subject_connectome.py @@ -92,8 +92,8 @@ def plot_matrices(cov, prec, title, labels): ############################################################################## # Displaying results # ------------------- -atlas_imgs = image.iter_img(msdl_atlas_dataset.maps) -atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in atlas_imgs] +atlas_img = msdl_atlas_dataset.maps +atlas_region_coords = plotting.find_probabilistic_atlas_cut_coords(atlas_img) labels = msdl_atlas_dataset.labels plotting.plot_connectome(gl.covariance_, diff --git a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py index e170ac0c98..c455a266f9 100644 --- a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py +++ b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py @@ -76,3 +76,23 @@ edge_threshold="80%", colorbar=True) plotting.show() + +############################################################################## +# 3D visualization in a web browser +# --------------------------------- +# An alternative to :func:`nilearn.plotting.plot_connectome` is to use +# :func:`nilearn.plotting.view_connectome` that gives more interactive +# visualizations in a web browser. See :ref:`interactive-connectome-plotting` +# for more details. + + +view = plotting.view_connectome(correlation_matrix, coords, threshold='80%') + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + +############################################################################## +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell + +view diff --git a/examples/03_connectivity/plot_simulated_connectome.py b/examples/03_connectivity/plot_simulated_connectome.py index 1f192c3aab..385676c08d 100644 --- a/examples/03_connectivity/plot_simulated_connectome.py +++ b/examples/03_connectivity/plot_simulated_connectome.py @@ -11,7 +11,8 @@ # Generate synthetic data -from nilearn._utils.testing import generate_group_sparse_gaussian_graphs +from nilearn._utils.data_gen import generate_group_sparse_gaussian_graphs +from nilearn.plotting import show n_subjects = 20 # number of subjects n_displayed = 3 # number of subjects displayed @@ -73,4 +74,4 @@ vmax=max_precision, colorbar=False) plt.title("graph lasso, all subjects\n$\\alpha=%.2f$" % gl.alpha_) -plt.show() +show() diff --git a/examples/04_manipulating_images/plot_affine_transformation.py b/examples/04_manipulating_images/plot_affine_transformation.py index 7f46273506..f0b10a4dbb 100644 --- a/examples/04_manipulating_images/plot_affine_transformation.py +++ b/examples/04_manipulating_images/plot_affine_transformation.py @@ -105,6 +105,7 @@ ############################################################################# # Finally, visualize import matplotlib.pyplot as plt +from nilearn.plotting import show plt.figure() plt.imshow(image, interpolation="nearest", vmin=0, vmax=vmax) plt.title("The original data in voxel space") @@ -125,4 +126,4 @@ plt.title("Transformed using a 4x4 affine -\n Uses affine anchor " "and estimates bounding box size") -plt.show() +show() diff --git a/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py b/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py index c040f76210..72773ff068 100644 --- a/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py +++ b/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py @@ -5,7 +5,8 @@ This example shows how to extract regions or separate the regions from a statistical map. -We use localizer t-statistic maps from :func:`nilearn.datasets.fetch_localizer_contrasts` +We use localizer t-statistic maps from +:func:`nilearn.datasets.fetch_neurovault_auditory_computation_task` as an input image. The idea is to threshold an image to get foreground objects using a @@ -18,10 +19,8 @@ # utilities from nilearn import datasets -n_subjects = 3 -localizer_path = datasets.fetch_localizer_contrasts( - ['calculation (auditory cue)'], n_subjects=n_subjects, get_tmaps=True) -tmap_filename = localizer_path.tmaps[0] +localizer = datasets.fetch_neurovault_auditory_computation_task() +tmap_filename = localizer.images[0] ################################################################################ # Threshold the t-statistic image by importing threshold function @@ -34,7 +33,7 @@ # Type 2: threshold strategy used will be based on image intensity # Here, threshold value should be within the limits i.e. less than max value. -threshold_value_img = threshold_img(tmap_filename, threshold=4.) +threshold_value_img = threshold_img(tmap_filename, threshold=3.0) ################################################################################ # Visualization diff --git a/examples/04_manipulating_images/plot_mask_computation.py b/examples/04_manipulating_images/plot_mask_computation.py index be9ff156ce..b7ec34b609 100644 --- a/examples/04_manipulating_images/plot_mask_computation.py +++ b/examples/04_manipulating_images/plot_mask_computation.py @@ -4,24 +4,33 @@ In this example, the Nifti masker is used to automatically compute a mask. -For data that has already been masked, the default strategy works out of -the box. +* The default strategy is based on the background. -However, for raw EPI, as in resting-state time series, we need to use the -'epi' strategy of the NiftiMasker. +* Another option is to use a template. + +* For raw EPI, as in resting-state time series, we need to use the + 'epi' strategy of the NiftiMasker. In addition, we show here how to tweak the different parameters of the -underlying mask extraction routine +underlying routine that extract masks from EPI :func:`nilearn.masking.compute_epi_mask`. """ -############################################################################### -# From already masked data from nilearn.input_data import NiftiMasker import nilearn.image as image -from nilearn.plotting import plot_roi, show +from nilearn.plotting import plot_roi, plot_epi, show + +############################################################################### +# Computing a mask from the background +############################################################################### +# +# The default strategy to compute a mask, eg in NiftiMasker is to try to +# detect the background. +# +# With data that has already been masked, this will work well, as it lies +# on a homogeneous background # Load Miyawaki dataset from nilearn import datasets @@ -33,20 +42,23 @@ miyawaki_filename = miyawaki_dataset.func[0] miyawaki_mean_img = image.mean_img(miyawaki_filename) - -# This time, we can use the NiftiMasker without changing the default mask -# strategy, as the data has already been masked, and thus lies on a -# homogeneous background - +plot_epi(miyawaki_mean_img, title='Mean EPI image') +############################################################################### +# A NiftiMasker with the default strategy masker = NiftiMasker() masker.fit(miyawaki_filename) +# Plot the generated mask plot_roi(masker.mask_img_, miyawaki_mean_img, title="Mask from already masked data") ############################################################################### -# From raw EPI data +# Computing a mask from raw EPI data +############################################################################### +# +# From raw EPI data, there is no uniform background, and a different +# strategy is necessary # Load ADHD resting-state dataset dataset = datasets.fetch_adhd(n_subjects=1) @@ -58,20 +70,39 @@ # To display the background mean_img = image.mean_img(epi_img) +plot_epi(mean_img, title='Mean EPI image') - +############################################################################### # Simple mask extraction from EPI images # We need to specify an 'epi' mask_strategy, as this is raw EPI data masker = NiftiMasker(mask_strategy='epi') masker.fit(epi_img) plot_roi(masker.mask_img_, mean_img, title='EPI automatic mask') +############################################################################### # Generate mask with strong opening +# +# We can fine-tune the outline of the mask by increasing the number of +# opening steps (`opening=10`) using the `mask_args` argument of the +# NiftiMasker. This effectively performs erosion and dilation +# operations on the outer voxel layers of the mask, which can for example +# remove remaining +# skull parts in the image. masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10)) masker.fit(epi_img) plot_roi(masker.mask_img_, mean_img, title='EPI Mask with strong opening') +############################################################################### # Generate mask with a high lower cutoff +# +# The NiftiMasker calls the nilearn.masking.compute_epi_mask function to +# compute the mask from the EPI. It has two important parameters: +# lower_cutoff and upper_cutoff. These set the grey-value bounds in which +# the masking algorithm will search for its threshold (0 being the +# minimum of the image and 1 the maximum). We will here increase the +# lower cutoff to enforce selection of those voxels that appear as bright +# in the EPI image. + masker = NiftiMasker(mask_strategy='epi', mask_args=dict(upper_cutoff=.9, lower_cutoff=.8, opening=False)) @@ -80,6 +111,22 @@ title='EPI Mask: high lower_cutoff') ############################################################################### +# Computing the mask from the MNI template +############################################################################### +# +# A mask can also be computed from the MNI gray matter template. In this +# case, it is resampled to the target image + +masker = NiftiMasker(mask_strategy='template') +masker.fit(epi_img) +plot_roi(masker.mask_img_, mean_img, + title='Mask from template') + + +############################################################################### +# After mask computation: extracting time series +############################################################################### +# # Extract time series # trended vs detrended diff --git a/examples/04_manipulating_images/plot_negate_image.py b/examples/04_manipulating_images/plot_negate_image.py index e2aa8dfc12..b7166c8652 100644 --- a/examples/04_manipulating_images/plot_negate_image.py +++ b/examples/04_manipulating_images/plot_negate_image.py @@ -11,26 +11,19 @@ ############################################################################### # Retrieve the data: the localizer dataset with contrast maps. -localizer_dataset = datasets.fetch_localizer_contrasts( - ["left vs right button press"], - n_subjects=2, - get_anats=True, - get_tmaps=True) -localizer_anat_filename = localizer_dataset.anats[1] -localizer_tmap_filename = localizer_dataset.tmaps[1] +motor_images = datasets.fetch_neurovault_motor_task() +stat_img = motor_images.images[0] ############################################################################### # Multiply voxel values by -1. -negative_stat_img = image.math_img("-img", img=localizer_tmap_filename) +negative_stat_img = image.math_img("-img", img=stat_img) -plotting.plot_stat_map(localizer_tmap_filename, - bg_img=localizer_anat_filename, +plotting.plot_stat_map(stat_img, cut_coords=(36, -27, 66), - threshold=3, title="t-map, dim=-.5", - dim=-.5) + threshold=3, title="t-map", vmax=9 +) plotting.plot_stat_map(negative_stat_img, - bg_img=localizer_anat_filename, cut_coords=(36, -27, 66), - threshold=3, title="Negative t-map, dim=-.5", - dim=-.5) + threshold=3, title="Negative t-map", vmax=9 +) plotting.show() diff --git a/examples/04_manipulating_images/plot_resample_to_template.py b/examples/04_manipulating_images/plot_resample_to_template.py index d044c7548f..0f4866a1f2 100644 --- a/examples/04_manipulating_images/plot_resample_to_template.py +++ b/examples/04_manipulating_images/plot_resample_to_template.py @@ -10,34 +10,32 @@ ############################################################################### # First we load the required datasets using the nilearn datasets module. -from nilearn.datasets import fetch_localizer_button_task +from nilearn.datasets import fetch_neurovault_motor_task from nilearn.datasets import load_mni152_template template = load_mni152_template() -localizer_dataset = fetch_localizer_button_task(get_anats=True) - -localizer_tmap_filename = localizer_dataset.tmaps[0] -localizer_anat_filename = localizer_dataset.anats[0] +motor_images = fetch_neurovault_motor_task() +stat_img = motor_images.images[0] ############################################################################### # Now, the localizer t-map image can be resampled to the MNI template image. from nilearn.image import resample_to_img -resampled_localizer_tmap = resample_to_img(localizer_tmap_filename, template) +resampled_stat_img = resample_to_img(stat_img, template) ############################################################################### # Let's check the shape and affine have been correctly updated. # First load the original t-map in memory: from nilearn.image import load_img -tmap_img = load_img(localizer_dataset.tmaps[0]) +tmap_img = load_img(stat_img) original_shape = tmap_img.shape original_affine = tmap_img.affine -resampled_shape = resampled_localizer_tmap.shape -resampled_affine = resampled_localizer_tmap.affine +resampled_shape = resampled_stat_img.shape +resampled_affine = resampled_stat_img.affine template_img = load_img(template) template_shape = template_img.shape @@ -58,14 +56,14 @@ # Finally, result images are displayed using nilearn plotting module. from nilearn import plotting -plotting.plot_stat_map(localizer_tmap_filename, - bg_img=localizer_anat_filename, +plotting.plot_stat_map(stat_img, + bg_img=template, cut_coords=(36, -27, 66), threshold=3, - title="t-map on original anat") -plotting.plot_stat_map(resampled_localizer_tmap, + title="t-map in original resolution") +plotting.plot_stat_map(resampled_stat_img, bg_img=template, cut_coords=(36, -27, 66), threshold=3, - title="Resampled t-map on MNI template anat") + title="Resampled t-map") plotting.show() diff --git a/examples/plot_3d_and_4d_niimg.py b/examples/plot_3d_and_4d_niimg.py index aad1365285..1ff56fac28 100644 --- a/examples/plot_3d_and_4d_niimg.py +++ b/examples/plot_3d_and_4d_niimg.py @@ -16,13 +16,13 @@ print('Datasets are stored in: %r' % datasets.get_data_dirs()) ############################################################################### -# Let's now retrieve a motor contrast from a localizer experiment -tmap_filenames = datasets.fetch_localizer_button_task()['tmaps'] -print(tmap_filenames) +# Let's now retrieve a motor contrast from a Neurovault repository +motor_images = datasets.fetch_neurovault_motor_task() +print(motor_images.images) ############################################################################### -# tmap_filenames is a list of filenames. We need to take the first one -tmap_filename = tmap_filenames[0] +# motor_images is a list of filenames. We need to take the first one +tmap_filename = motor_images.images[0] ############################################################################### @@ -105,4 +105,3 @@ # to break down 4D images into 3D images, and on the other hand # :func:`nilearn.image.concat_imgs` to group a list of 3D images into a 4D # image. - diff --git a/examples/plot_decoding_tutorial.py b/examples/plot_decoding_tutorial.py index da8d51da7d..997b37d7d2 100644 --- a/examples/plot_decoding_tutorial.py +++ b/examples/plot_decoding_tutorial.py @@ -179,11 +179,13 @@ # # We can split the data in train and test set repetitively in a `KFold` # strategy: -from sklearn.cross_validation import KFold +from sklearn.model_selection import KFold -cv = KFold(n=len(fmri_masked), n_folds=5) +cv = KFold(n_splits=5) -for train, test in cv: +# The "cv" object's split method can now accept data and create a +# generator which can yield the splits. +for train, test in cv.split(X=fmri_masked): conditions_masked = conditions.values[train] svc.fit(fmri_masked[train], conditions_masked) prediction = svc.predict(fmri_masked[test]) @@ -195,18 +197,13 @@ # ................................... # # Scikit-learn has tools to perform cross-validation easier: -from sklearn.cross_validation import cross_val_score +from sklearn.model_selection import cross_val_score cv_score = cross_val_score(svc, fmri_masked, conditions) print(cv_score) ########################################################################### # Note that we can speed things up to use all the CPUs of our computer # with the n_jobs parameter. -# -# By default, cross_val_score uses a 3-fold KFold. We can control this by -# passing the "cv" object, here a 5-fold: -cv_score = cross_val_score(svc, fmri_masked, conditions, cv=cv) -print(cv_score) ########################################################################### # The best way to do cross-validation is to respect the structure of @@ -215,15 +212,25 @@ # # The number of the session is stored in the CSV file giving the # behavioral data. We have to apply our session mask, to select only cats -# and faces. To leave a session out, we pass it to a -# LeaveOneLabelOut object: +# and faces. session_label = behavioral['chunks'][condition_mask] -from sklearn.cross_validation import LeaveOneLabelOut -cv = LeaveOneLabelOut(session_label) +# By default, cross_val_score uses a 3-fold KFold. We can control this by +# passing the "cv" object, here a 5-fold: cv_score = cross_val_score(svc, fmri_masked, conditions, cv=cv) print(cv_score) +# To leave a session out, pass it to the groups parameter of cross_val_score. +from sklearn.model_selection import LeaveOneGroupOut +cv = LeaveOneGroupOut() +cv_score = cross_val_score(svc, + fmri_masked, + conditions, + cv=cv, + groups=session_label, + ) +print(cv_score) + ########################################################################### # Inspecting the model weights diff --git a/examples/plot_nilearn_101.py b/examples/plot_nilearn_101.py index bef4b551cc..5d39263a29 100644 --- a/examples/plot_nilearn_101.py +++ b/examples/plot_nilearn_101.py @@ -77,4 +77,3 @@ # To recap, all the nilearn tools can take data as filenames or in-memory # objects, and return brain volumes as in-memory objects. These can be # passed on to other nilearn tools, or saved to disk. - diff --git a/nilearn/__init__.py b/nilearn/__init__.py index a830bf17e7..0c59990a94 100644 --- a/nilearn/__init__.py +++ b/nilearn/__init__.py @@ -33,10 +33,24 @@ """ import gzip +import sys +import warnings + from distutils.version import LooseVersion from .version import _check_module_dependencies, __version__ + +def _py2_deprecation_warning(): + warnings.simplefilter('once') + py2_warning = ('Python2 support is deprecated and will be removed in ' + 'a future release. Consider switching to Python3.') + if sys.version_info.major == 2: + warnings.warn(message=py2_warning, + category=DeprecationWarning, + stacklevel=3, + ) + _check_module_dependencies() # Temporary work around to address formatting issues in doc tests @@ -73,3 +87,6 @@ __all__ = ['datasets', 'decoding', 'decomposition', 'connectome', 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', 'region', 'signal', 'surface', 'parcellations', '__version__'] + + +_py2_deprecation_warning() diff --git a/nilearn/_utils/cache_mixin.py b/nilearn/_utils/cache_mixin.py index de5b04e296..30bde14440 100644 --- a/nilearn/_utils/cache_mixin.py +++ b/nilearn/_utils/cache_mixin.py @@ -89,7 +89,18 @@ def _safe_cache(memory, func, **kwargs): """ A wrapper for mem.cache that flushes the cache if the version number of nibabel has changed. """ - cachedir = memory.cachedir + ''' Workaround for + https://github.com/scikit-learn-contrib/imbalanced-learn/issues/482 + joblib throws a spurious warning with newer scikit-learn. + This code uses the recommended method first and the deprecated one + if that fails, ensuring th warning is not generated in any case. + ''' + try: + cachedir = os.path.join(memory.location, 'joblib') + except AttributeError: + cachedir = memory.cachedir + except TypeError: + cachedir = None if cachedir is None or cachedir in __CACHE_CHECKED: return memory.cache(func, **kwargs) diff --git a/nilearn/_utils/data_gen.py b/nilearn/_utils/data_gen.py new file mode 100644 index 0000000000..7677936208 --- /dev/null +++ b/nilearn/_utils/data_gen.py @@ -0,0 +1,403 @@ +""" +Data generation utilities +""" + +import numpy as np +import scipy.signal + +from sklearn.utils import check_random_state +import scipy.linalg +import nibabel + +from .. import masking +from . import logger + + +def generate_timeseries(n_instants, n_features, + rand_gen=None): + """Generate some random timeseries. """ + if rand_gen is None: + rand_gen = np.random.RandomState(0) + # TODO: add an "order" keyword + return rand_gen.randn(n_instants, n_features) + + +def generate_regions_ts(n_features, n_regions, + overlap=0, + rand_gen=None, + window="boxcar"): + """Generate some regions as timeseries. + + Parameters + ---------- + overlap: int + Number of overlapping voxels between two regions (more or less) + window: str + Name of a window in scipy.signal. e.g. "hamming". + + Returns + ------- + regions: numpy.ndarray + regions, nepresented as signals. + shape (n_features, n_regions) + """ + + if rand_gen is None: + rand_gen = np.random.RandomState(0) + if window is None: + window = "boxcar" + + assert(n_features > n_regions) + + # Compute region boundaries indices. + # Start at 1 to avoid getting an empty region + boundaries = np.zeros(n_regions + 1) + boundaries[-1] = n_features + boundaries[1:-1] = rand_gen.permutation(np.arange(1, n_features) + )[:n_regions - 1] + boundaries.sort() + + regions = np.zeros((n_regions, n_features), order="C") + overlap_end = int((overlap + 1) / 2.) + overlap_start = int(overlap / 2.) + for n in range(len(boundaries) - 1): + start = int(max(0, boundaries[n] - overlap_start)) + end = int(min(n_features, boundaries[n + 1] + overlap_end)) + win = scipy.signal.get_window(window, end - start) + win /= win.mean() # unity mean + regions[n, start:end] = win + + return regions + + +def generate_maps(shape, n_regions, overlap=0, border=1, + window="boxcar", rand_gen=None, affine=np.eye(4)): + """Generate a 4D volume containing several maps. + Parameters + ---------- + n_regions: int + number of regions to generate + + overlap: int + approximate number of voxels common to two neighboring regions + + window: str + name of a window in scipy.signal. Used to get non-uniform regions. + + border: int + number of background voxels on each side of the 3D volumes. + + Returns + ------- + maps: nibabel.Nifti1Image + 4D array, containing maps. + """ + + mask = np.zeros(shape, dtype=np.int8) + mask[border:-border, border:-border, border:-border] = 1 + ts = generate_regions_ts(mask.sum(), n_regions, overlap=overlap, + rand_gen=rand_gen, window=window) + mask_img = nibabel.Nifti1Image(mask, affine) + return masking.unmask(ts, mask_img), mask_img + + +def generate_labeled_regions(shape, n_regions, rand_gen=None, labels=None, + affine=np.eye(4), dtype=np.int): + """Generate a 3D volume with labeled regions. + + Parameters + ---------- + shape: tuple + shape of returned array + + n_regions: int + number of regions to generate. By default (if "labels" is None), + add a background with value zero. + + labels: iterable + labels to use for each zone. If provided, n_regions is unused. + + rand_gen: numpy.random.RandomState + random generator to use for generation. + + affine: numpy.ndarray + affine of returned image + + Returns + ------- + regions: nibabel.Nifti1Image + data has shape "shape", containing region labels. + """ + n_voxels = shape[0] * shape[1] * shape[2] + if labels is None: + labels = range(0, n_regions + 1) + n_regions += 1 + else: + n_regions = len(labels) + + regions = generate_regions_ts(n_voxels, n_regions, rand_gen=rand_gen) + # replace weights with labels + for n, row in zip(labels, regions): + row[row > 0] = n + data = np.zeros(shape, dtype=dtype) + data[np.ones(shape, dtype=np.bool)] = regions.sum(axis=0).T + return nibabel.Nifti1Image(data, affine) + + +def generate_labeled_regions_large(shape, n_regions, rand_gen=None, + affine=np.eye(4)): + """Similar to generate_labeled_regions, but suitable for a large number of + regions. + + See generate_labeled_regions for details. + """ + if rand_gen is None: + rand_gen = np.random.RandomState(0) + data = rand_gen.randint(n_regions + 1, size=shape) + if len(np.unique(data)) != n_regions + 1: + raise ValueError("Some labels are missing. Maybe shape is too small.") + return nibabel.Nifti1Image(data, affine) + + +def generate_fake_fmri(shape=(10, 11, 12), length=17, kind="noise", + affine=np.eye(4), n_blocks=None, block_size=None, + block_type='classification', + rand_gen=np.random.RandomState(0)): + """Generate a signal which can be used for testing. + + The return value is a 4D array, representing 3D volumes along time. + Only the voxels in the center are non-zero, to mimic the presence of + brain voxels in real signals. Setting n_blocks to an integer generates + condition blocks, the remaining of the timeseries corresponding + to 'rest' or 'baseline' condition. + + Parameters + ---------- + shape: tuple, optional + Shape of 3D volume + + length: int, optional + Number of time instants + + kind: string, optional + Kind of signal used as timeseries. + "noise": uniformly sampled values in [0..255] + "step": 0.5 for the first half then 1. + + affine: numpy.ndarray + Affine of returned images + + n_blocks: int or None + Number of condition blocks. + + block_size: int or None + Number of timepoints in a block. Used only if n_blocks is not + None. Defaults to 3 if n_blocks is not None. + + block_type: str + Defines if the returned target should be used for + 'classification' or 'regression'. + + Returns + ------- + fmri: nibabel.Nifti1Image + fake fmri signal. + shape: shape + (length,) + + mask: nibabel.Nifti1Image + mask giving non-zero voxels + + target: numpy.ndarray + Classification or regression target. Shape of number of + time points (length). Returned only if n_blocks is not None + """ + full_shape = shape + (length, ) + fmri = np.zeros(full_shape) + # Fill central voxels timeseries with random signals + width = [s // 2 for s in shape] + shift = [s // 4 for s in shape] + + if kind == "noise": + signals = rand_gen.randint(256, size=(width + [length])) + elif kind == "step": + signals = np.ones(width + [length]) + signals[..., :length // 2] = 0.5 + else: + raise ValueError("Unhandled value for parameter 'kind'") + + fmri[shift[0]:shift[0] + width[0], + shift[1]:shift[1] + width[1], + shift[2]:shift[2] + width[2], + :] = signals + + mask = np.zeros(shape) + mask[shift[0]:shift[0] + width[0], + shift[1]:shift[1] + width[1], + shift[2]:shift[2] + width[2]] = 1 + + if n_blocks is None: + return (nibabel.Nifti1Image(fmri, affine), + nibabel.Nifti1Image(mask, affine)) + + block_size = 3 if block_size is None else block_size + flat_fmri = fmri[mask.astype(np.bool)] + flat_fmri /= np.abs(flat_fmri).max() + target = np.zeros(length, dtype=np.int) + rest_max_size = (length - (n_blocks * block_size)) // n_blocks + if rest_max_size < 0: + raise ValueError( + '%s is too small ' + 'to put %s blocks of size %s' % ( + length, n_blocks, block_size)) + t_start = 0 + if rest_max_size > 0: + t_start = rand_gen.random_integers(0, rest_max_size, 1)[0] + for block in range(n_blocks): + if block_type == 'classification': + # Select a random voxel and add some signal to the background + voxel_idx = rand_gen.randint(0, flat_fmri.shape[0], 1)[0] + trials_effect = (rand_gen.random_sample(block_size) + 1) * 3. + else: + # Select the voxel in the image center and add some signal + # that increases with each block + voxel_idx = flat_fmri.shape[0] // 2 + trials_effect = ( + rand_gen.random_sample(block_size) + 1) * block + t_rest = 0 + if rest_max_size > 0: + t_rest = rand_gen.random_integers(0, rest_max_size, 1)[0] + flat_fmri[voxel_idx, t_start:t_start + block_size] += trials_effect + target[t_start:t_start + block_size] = block + 1 + t_start += t_rest + block_size + target = target if block_type == 'classification' \ + else target.astype(np.float) + fmri = np.zeros(fmri.shape) + fmri[mask.astype(np.bool)] = flat_fmri + return (nibabel.Nifti1Image(fmri, affine), + nibabel.Nifti1Image(mask, affine), target) + + +def generate_signals_from_precisions(precisions, + min_n_samples=50, max_n_samples=100, + random_state=0): + """Generate timeseries according to some given precision matrices. + + Signals all have zero mean. + + Parameters + ---------- + precisions: list of numpy.ndarray + list of precision matrices. Every matrix must be square (with the same + size) and positive definite. The output of + generate_group_sparse_gaussian_graphs() can be used here. + + min_samples, max_samples: int + the number of samples drawn for each timeseries is taken at random + between these two numbers. + + Returns + ------- + signals: list of numpy.ndarray + output signals. signals[n] corresponds to precisions[n], and has shape + (sample number, precisions[n].shape[0]). + """ + random_state = check_random_state(random_state) + + signals = [] + n_samples = random_state.randint(min_n_samples, high=max_n_samples, + size=len(precisions)) + + mean = np.zeros(precisions[0].shape[0]) + for n, prec in zip(n_samples, precisions): + signals.append(random_state.multivariate_normal(mean, + np.linalg.inv(prec), + (n,))) + return signals + + +def generate_group_sparse_gaussian_graphs( + n_subjects=5, n_features=30, min_n_samples=30, max_n_samples=50, + density=0.1, random_state=0, verbose=0): + """Generate signals drawn from a sparse Gaussian graphical model. + + Parameters + ---------- + n_subjects : int, optional + number of subjects + + n_features : int, optional + number of signals per subject to generate + + density : float, optional + density of edges in graph topology + + min_n_samples, max_n_samples : int, optional + Each subject have a different number of samples, between these two + numbers. All signals for a given subject have the same number of + samples. + + random_state : int or numpy.random.RandomState instance, optional + random number generator, or seed. + + verbose: int, optional + verbosity level (0 means no message). + + Returns + ------- + subjects : list of numpy.ndarray, shape for each (n_samples, n_features) + subjects[n] is the signals for subject n. They are provided as a numpy + len(subjects) = n_subjects. n_samples varies according to the subject. + + precisions : list of numpy.ndarray + precision matrices. + + topology : numpy.ndarray + binary array giving the graph topology used for generating covariances + and signals. + """ + + random_state = check_random_state(random_state) + # Generate topology (upper triangular binary matrix, with zeros on the + # diagonal) + topology = np.empty((n_features, n_features)) + topology[:, :] = np.triu(( + random_state.randint(0, high=int(1. / density), + size=n_features * n_features) + ).reshape(n_features, n_features) == 0, k=1) + + # Generate edges weights on topology + precisions = [] + mask = topology > 0 + for _ in range(n_subjects): + + # See also sklearn.datasets.samples_generator.make_sparse_spd_matrix + prec = topology.copy() + prec[mask] = random_state.uniform(low=.1, high=.8, size=(mask.sum())) + prec += np.eye(prec.shape[0]) + prec = np.dot(prec.T, prec) + + # Assert precision matrix is spd + np.testing.assert_almost_equal(prec, prec.T) + eigenvalues = np.linalg.eigvalsh(prec) + if eigenvalues.min() < 0: + raise ValueError("Failed generating a positive definite precision " + "matrix. Decreasing n_features can help solving " + "this problem.") + precisions.append(prec) + + # Returns the topology matrix of precision matrices. + topology += np.eye(*topology.shape) + topology = np.dot(topology.T, topology) + topology = topology > 0 + assert(np.all(topology == topology.T)) + logger.log("Sparsity: {0:f}".format( + 1. * topology.sum() / (topology.shape[0] ** 2)), + verbose=verbose) + + # Generate temporal signals + signals = generate_signals_from_precisions(precisions, + min_n_samples=min_n_samples, + max_n_samples=max_n_samples, + random_state=random_state) + return signals, precisions, topology + diff --git a/nilearn/_utils/exceptions.py b/nilearn/_utils/exceptions.py index 74b20d7ea5..ab47a7b1f0 100644 --- a/nilearn/_utils/exceptions.py +++ b/nilearn/_utils/exceptions.py @@ -1,3 +1,10 @@ +try: + from numpy import VisibleDeprecationWarning +except ImportError: + class VisibleDeprecationWarning(UserWarning): + pass + + AuthorizedException = ( BufferError, ArithmeticError, diff --git a/nilearn/_utils/extmath.py b/nilearn/_utils/extmath.py index 8543e93393..6cbc8c6daf 100644 --- a/nilearn/_utils/extmath.py +++ b/nilearn/_utils/extmath.py @@ -6,11 +6,7 @@ import numpy as np -try: - # partition is available only in numpy >= 1.8.0 - from numpy import partition -except ImportError: - partition = None +from numpy import partition def fast_abs_percentile(data, percentile=80): diff --git a/nilearn/_utils/fixes/__init__.py b/nilearn/_utils/fixes/__init__.py deleted file mode 100644 index 9db421a595..0000000000 --- a/nilearn/_utils/fixes/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -from distutils.version import LooseVersion -import sklearn - - -try: - if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'): - from sklearn.model_selection import check_cv - from sklearn.model_selection import cross_val_score - # 0.18 > scikit-learn >= 0.16 - else: - from sklearn.cross_validation import check_cv - from sklearn.cross_validation import cross_val_score -except ImportError: - # scikit-learn < 0.16 - from sklearn.cross_validation import _check_cv as check_cv - - -try: - from sklearn.utils import check_X_y - from sklearn.utils import check_is_fitted -except ImportError: - # scikit-learn < 0.16 - from .sklearn_validation import check_X_y - from .sklearn_validation import check_is_fitted - -if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'): - from sklearn.linear_model.base import _preprocess_data as center_data -else: - from sklearn.linear_model.base import center_data - -__all__ = ['check_X_y', 'check_is_fitted', 'check_cv', 'cross_val_score', - 'center_data'] diff --git a/nilearn/_utils/fixes/matplotlib_backports.py b/nilearn/_utils/fixes/matplotlib_backports.py deleted file mode 100644 index 6eacaa956d..0000000000 --- a/nilearn/_utils/fixes/matplotlib_backports.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Backports for matplotlib compatibility across versions""" - - -def cbar_outline_get_xy(cbar_outline): - """In the matplotlib versions >= 1.4.0, ColorbarBase.outline is a - Polygon(Patch) object instead of a Line2D(Line) object. This entails - different getters and setters. - - Change specifically after commit 48f594c2e2b05839ea394040b06196f39d9fbfba, - entitled - "changed colorbar outline from a Line2D object to a Polygon object" - from August 28th, 2013. - - This function unifies getters and setters of ColorbarBase outline xy - coordinates.""" - - if hasattr(cbar_outline, "get_xy"): - # loose version >= 1.4.x - return cbar_outline.get_xy() - else: - return cbar_outline.get_xydata() - - -def cbar_outline_set_xy(cbar_outline, xy): - """Setter for ColorbarBase.outline xy coordinates. - See cbar_outline_get_xy for more information. - """ - - if hasattr(cbar_outline, "set_xy"): - # loose version >= 1.4.x - return cbar_outline.set_xy(xy) - else: - cbar_outline.set_xdata(xy[:, 0]) - cbar_outline.set_ydata(xy[:, 1]) diff --git a/nilearn/_utils/fixes/sklearn_validation.py b/nilearn/_utils/fixes/sklearn_validation.py deleted file mode 100644 index a3a02789f3..0000000000 --- a/nilearn/_utils/fixes/sklearn_validation.py +++ /dev/null @@ -1,401 +0,0 @@ -# scikit-learn v0.17 -# file: sklearn.utils.validation - -import numpy as np -import warnings -import scipy.sparse as sp - - -class NotFittedError(ValueError, AttributeError): - """Exception class to raise if estimator is used before fitting - This class inherits from both ValueError and AttributeError to help with - exception handling and backward compatibility. - """ - - -def check_array(array, accept_sparse=None, dtype="numeric", order=None, - copy=False, force_all_finite=True, ensure_2d=True, - allow_nd=False, ensure_min_samples=1, ensure_min_features=1): - """Input validation on an array, list, sparse matrix or similar. - By default, the input is converted to an at least 2nd numpy array. - If the dtype of the array is object, attempt converting to float, - raising on failure. - Parameters - ---------- - array : object - Input object to check / convert. - accept_sparse : string, list of string or None (default=None) - String[s] representing allowed sparse matrix formats, such as 'csc', - 'csr', etc. None means that sparse matrix input will raise an error. - If the input is sparse but not in the allowed format, it will be - converted to the first listed format. - dtype : string, type or None (default="numeric") - Data type of result. If None, the dtype of the input is preserved. - If "numeric", dtype is preserved unless array.dtype is object. - order : 'F', 'C' or None (default=None) - Whether an array will be forced to be fortran or c-style. - copy : boolean (default=False) - Whether a forced copy will be triggered. If copy=False, a copy might - be triggered by a conversion. - force_all_finite : boolean (default=True) - Whether to raise an error on np.inf and np.nan in X. - ensure_2d : boolean (default=True) - Whether to make X at least 2d. - allow_nd : boolean (default=False) - Whether to allow X.ndim > 2. - ensure_min_samples : int (default=1) - Make sure that the array has a minimum number of samples in its first - axis (rows for a 2D array). Setting to 0 disables this check. - ensure_min_features : int (default=1) - Make sure that the 2D array has some minimum number of features - (columns). The default value of 1 rejects empty datasets. - This check is only enforced when the input data has effectively 2 - dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 - disables this check. - Returns - ------- - X_converted : object - The converted and validated X. - """ - if isinstance(accept_sparse, str): - accept_sparse = [accept_sparse] - - # store whether originally we wanted numeric dtype - dtype_numeric = dtype == "numeric" - - if sp.issparse(array): - if dtype_numeric: - dtype = None - array = _ensure_sparse_format(array, accept_sparse, dtype, order, - copy, force_all_finite) - else: - if ensure_2d: - array = np.atleast_2d(array) - if dtype_numeric: - if hasattr(array, "dtype") and getattr(array.dtype, - "kind", None) == "O": - # if input is object, convert to float. - dtype = np.float64 - else: - dtype = None - array = np.array(array, dtype=dtype, order=order, copy=copy) - # make sure we actually converted to numeric: - if dtype_numeric and array.dtype.kind == "O": - array = array.astype(np.float64) - if not allow_nd and array.ndim >= 3: - raise ValueError("Found array with dim %d. Expected <= 2" % - array.ndim) - if force_all_finite: - _assert_all_finite(array) - - shape_repr = _shape_repr(array.shape) - if ensure_min_samples > 0: - n_samples = _num_samples(array) - if n_samples < ensure_min_samples: - raise ValueError("Found array with %d sample(s) (shape=%s) while a" - " minimum of %d is required." - % (n_samples, shape_repr, ensure_min_samples)) - - if ensure_min_features > 0 and array.ndim == 2: - n_features = array.shape[1] - if n_features < ensure_min_features: - raise ValueError("Found array with %d feature(s) (shape=%s) while" - " a minimum of %d is required." - % (n_features, shape_repr, ensure_min_features)) - return array - - -class DataConversionWarning(UserWarning): - """Warning used to notify implicit data conversions happening in the code. - - This warning occurs when some input data needs to be converted or - interpreted in a way that may not match the user's expectations. - - For example, this warning may occur when the user - - passes an integer array to a function which expects float input and - will convert the input - - requests a non-copying operation, but a copy is required to meet the - implementation's data-type expectations; - - passes an input whose - shape can be interpreted ambiguously. - """ - - -def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, - copy=False, force_all_finite=True, ensure_2d=True, - allow_nd=False, multi_output=False, ensure_min_samples=1, - ensure_min_features=1, y_numeric=False, - warn_on_dtype=False, estimator=None): - """Input validation for standard estimators. - Checks X and y for consistent length, enforces X 2d and y 1d. - Standard input checks are only applied to y, such as checking that y - does not have np.nan or np.inf targets. For multi-label y, set - multi_output=True to allow 2d and sparse y. If the dtype of X is - object, attempt converting to float, raising on failure. - Parameters - ---------- - X : nd-array, list or sparse matrix - Input data. - y : nd-array, list or sparse matrix - Labels. - accept_sparse : string, list of string or None (default=None) - String[s] representing allowed sparse matrix formats, such as 'csc', - 'csr', etc. None means that sparse matrix input will raise an error. - If the input is sparse but not in the allowed format, it will be - converted to the first listed format. - dtype : string, type, list of types or None (default="numeric") - Data type of result. If None, the dtype of the input is preserved. - If "numeric", dtype is preserved unless array.dtype is object. - If dtype is a list of types, conversion on the first type is only - performed if the dtype of the input is not in the list. - order : 'F', 'C' or None (default=None) - Whether an array will be forced to be fortran or c-style. - copy : boolean (default=False) - Whether a forced copy will be triggered. If copy=False, a copy might - be triggered by a conversion. - force_all_finite : boolean (default=True) - Whether to raise an error on np.inf and np.nan in X. This parameter - does not influence whether y can have np.inf or np.nan values. - ensure_2d : boolean (default=True) - Whether to make X at least 2d. - allow_nd : boolean (default=False) - Whether to allow X.ndim > 2. - multi_output : boolean (default=False) - Whether to allow 2-d y (array or sparse matrix). If false, y will be - validated as a vector. y cannot have np.nan or np.inf values if - multi_output=True. - ensure_min_samples : int (default=1) - Make sure that X has a minimum number of samples in its first - axis (rows for a 2D array). - ensure_min_features : int (default=1) - Make sure that the 2D array has some minimum number of features - (columns). The default value of 1 rejects empty datasets. - This check is only enforced when X has effectively 2 dimensions or - is originally 1D and ``ensure_2d`` is True. Setting to 0 disables - this check. - y_numeric : boolean (default=False) - Whether to ensure that y has a numeric type. If dtype of y is object, - it is converted to float64. Should only be used for regression - algorithms. - warn_on_dtype : boolean (default=False) - Raise DataConversionWarning if the dtype of the input data structure - does not match the requested dtype, causing a memory copy. - estimator : str or estimator instance (default=None) - If passed, include the name of the estimator in warning messages. - Returns - ------- - X_converted : object - The converted and validated X. - y_converted : object - The converted and validated y. - """ - - X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite, - ensure_2d, allow_nd, ensure_min_samples, - ensure_min_features) - - if multi_output: - y = check_array(y, 'csr', dtype=None, ensure_2d=False) - else: - y = column_or_1d(y, warn=True) - _assert_all_finite(y) - if y_numeric and y.dtype.kind == 'O': - y = y.astype(np.float64) - - check_consistent_length(X, y) - - return X, y - - -def column_or_1d(y, warn=False): - """ Ravel column or 1d numpy array, else raises an error - Parameters - ---------- - y : array-like - warn : boolean, default False - To control display of warnings. - Returns - ------- - y : array - """ - shape = np.shape(y) - if len(shape) == 1: - return np.ravel(y) - if len(shape) == 2 and shape[1] == 1: - if warn: - warnings.warn("A column-vector y was passed when a 1d array was" - " expected. Please change the shape of y to " - "(n_samples, ), for example using ravel().", - DataConversionWarning, stacklevel=2) - return np.ravel(y) - - raise ValueError("bad input shape {0}".format(shape)) - - -def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy, - force_all_finite): - """Convert a sparse matrix to a given format. - Checks the sparse format of spmatrix and converts if necessary. - Parameters - ---------- - spmatrix : scipy sparse matrix - Input to validate and convert. - accept_sparse : string, list of string or None (default=None) - String[s] representing allowed sparse matrix formats ('csc', - 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse - matrix input will raise an error. If the input is sparse but not in - the allowed format, it will be converted to the first listed format. - dtype : string, type or None (default=none) - Data type of result. If None, the dtype of the input is preserved. - order : 'F', 'C' or None (default=None) - Whether an array will be forced to be fortran or c-style. - copy : boolean (default=False) - Whether a forced copy will be triggered. If copy=False, a copy might - be triggered by a conversion. - force_all_finite : boolean (default=True) - Whether to raise an error on np.inf and np.nan in X. - Returns - ------- - spmatrix_converted : scipy sparse matrix. - Matrix that is ensured to have an allowed type. - """ - if accept_sparse is None: - raise TypeError('A sparse matrix was passed, but dense ' - 'data is required. Use X.toarray() to ' - 'convert to a dense numpy array.') - sparse_type = spmatrix.format - if dtype is None: - dtype = spmatrix.dtype - if sparse_type in accept_sparse: - # correct type - if dtype == spmatrix.dtype: - # correct dtype - if copy: - spmatrix = spmatrix.copy() - else: - # convert dtype - spmatrix = spmatrix.astype(dtype) - else: - # create new - spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype) - if force_all_finite: - if not hasattr(spmatrix, "data"): - warnings.warn("Can't check %s sparse matrix for nan or inf." - % spmatrix.format) - else: - _assert_all_finite(spmatrix.data) - if hasattr(spmatrix, "data"): - spmatrix.data = np.array(spmatrix.data, copy=False, order=order) - return spmatrix - - -def check_consistent_length(*arrays): - """Check that all arrays have consistent first dimensions. - Checks whether all objects in arrays have the same shape or length. - Parameters - ---------- - *arrays : list or tuple of input objects. - Objects that will be checked for consistent length. - """ - - uniques = np.unique([_num_samples(X) for X in arrays if X is not None]) - if len(uniques) > 1: - raise ValueError("Found arrays with inconsistent numbers of samples: " - "%s" % str(uniques)) - - -def _assert_all_finite(X): - """Like assert_all_finite, but only for ndarray.""" - X = np.asanyarray(X) - # First try an O(n) time, O(1) space solution for the common case that - # everything is finite; fall back to O(n) space np.isfinite to prevent - # false positives from overflow in sum method. - if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum()) - and not np.isfinite(X).all()): - raise ValueError("Input contains NaN, infinity" - " or a value too large for %r." % X.dtype) - - -def _shape_repr(shape): - """Return a platform independent reprensentation of an array shape - Under Python 2, the `long` type introduces an 'L' suffix when using the - default %r format for tuples of integers (typically used to store the shape - of an array). - Under Windows 64 bit (and Python 2), the `long` type is used by default - in numpy shapes even when the integer dimensions are well below 32 bit. - The platform specific type causes string messages or doctests to change - from one platform to another which is not desirable. - Under Python 3, there is no more `long` type so the `L` suffix is never - introduced in string representation. - >>> _shape_repr((1, 2)) - '(1, 2)' - >>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2 - >>> _shape_repr((one, 2 * one)) - '(1, 2)' - >>> _shape_repr((1,)) - '(1,)' - >>> _shape_repr(()) - '()' - """ - if len(shape) == 0: - return "()" - joined = ", ".join("%d" % e for e in shape) - if len(shape) == 1: - # special notation for singleton tuples - joined += ',' - return "(%s)" % joined - - -def _num_samples(x): - """Return number of samples in array-like x.""" - if hasattr(x, 'fit'): - # Don't get num_samples from an ensembles length! - raise TypeError('Expected sequence or array-like, got ' - 'estimator %s' % x) - if not hasattr(x, '__len__') and not hasattr(x, 'shape'): - if hasattr(x, '__array__'): - x = np.asarray(x) - else: - raise TypeError("Expected sequence or array-like, got %s" % - type(x)) - if hasattr(x, 'shape'): - if len(x.shape) == 0: - raise TypeError("Singleton array %r cannot be considered" - " a valid collection." % x) - return x.shape[0] - else: - return len(x) - - -def check_is_fitted(estimator, attributes, msg=None, all_or_any=all): - """Perform is_fitted validation for estimator. - Checks if the estimator is fitted by verifying the presence of - "all_or_any" of the passed attributes and raises a NotFittedError with the - given message. - Parameters - ---------- - estimator : estimator instance. - estimator instance for which the check is performed. - attributes : attribute name(s) given as string or a list/tuple of strings - Eg. : ["coef_", "estimator_", ...], "coef_" - msg : string - The default error message is, "This %(name)s instance is not fitted - yet. Call 'fit' with appropriate arguments before using this method." - For custom messages if "%(name)s" is present in the message string, - it is substituted for the estimator name. - Eg. : "Estimator, %(name)s, must be fitted before sparsifying". - all_or_any : callable, {all, any}, default all - Specify whether all or any of the given attributes must exist. - """ - if msg is None: - msg = ("This %(name)s instance is not fitted yet. Call 'fit' with " - "appropriate arguments before using this method.") - - if not hasattr(estimator, 'fit'): - raise TypeError("%s is not an estimator instance." % (estimator)) - - if not isinstance(attributes, (list, tuple)): - attributes = [attributes] - - if not all_or_any([hasattr(estimator, attr) for attr in attributes]): - raise NotFittedError(msg % {'name': type(estimator).__name__}) diff --git a/nilearn/_utils/testing.py b/nilearn/_utils/testing.py index e6a4f5e14f..b700500429 100644 --- a/nilearn/_utils/testing.py +++ b/nilearn/_utils/testing.py @@ -5,21 +5,14 @@ import functools import inspect import os -import re import sys import tempfile import warnings import gc import numpy as np -import scipy.signal -from sklearn.utils import check_random_state from sklearn.utils.testing import assert_warns -import scipy.linalg -import nibabel -from .. import masking -from . import logger from .compat import _basestring, _urllib from ..datasets.utils import _fetch_files @@ -258,394 +251,6 @@ def __call__(self, *args, **kwargs): return filenames -def generate_timeseries(n_instants, n_features, - rand_gen=None): - """Generate some random timeseries. """ - if rand_gen is None: - rand_gen = np.random.RandomState(0) - # TODO: add an "order" keyword - return rand_gen.randn(n_instants, n_features) - - -def generate_regions_ts(n_features, n_regions, - overlap=0, - rand_gen=None, - window="boxcar"): - """Generate some regions as timeseries. - - Parameters - ---------- - overlap: int - Number of overlapping voxels between two regions (more or less) - window: str - Name of a window in scipy.signal. e.g. "hamming". - - Returns - ------- - regions: numpy.ndarray - regions, nepresented as signals. - shape (n_features, n_regions) - """ - - if rand_gen is None: - rand_gen = np.random.RandomState(0) - if window is None: - window = "boxcar" - - assert(n_features > n_regions) - - # Compute region boundaries indices. - # Start at 1 to avoid getting an empty region - boundaries = np.zeros(n_regions + 1) - boundaries[-1] = n_features - boundaries[1:-1] = rand_gen.permutation(np.arange(1, n_features) - )[:n_regions - 1] - boundaries.sort() - - regions = np.zeros((n_regions, n_features), order="C") - overlap_end = int((overlap + 1) / 2.) - overlap_start = int(overlap / 2.) - for n in range(len(boundaries) - 1): - start = int(max(0, boundaries[n] - overlap_start)) - end = int(min(n_features, boundaries[n + 1] + overlap_end)) - win = scipy.signal.get_window(window, end - start) - win /= win.mean() # unity mean - regions[n, start:end] = win - - return regions - - -def generate_maps(shape, n_regions, overlap=0, border=1, - window="boxcar", rand_gen=None, affine=np.eye(4)): - """Generate a 4D volume containing several maps. - Parameters - ---------- - n_regions: int - number of regions to generate - - overlap: int - approximate number of voxels common to two neighboring regions - - window: str - name of a window in scipy.signal. Used to get non-uniform regions. - - border: int - number of background voxels on each side of the 3D volumes. - - Returns - ------- - maps: nibabel.Nifti1Image - 4D array, containing maps. - """ - - mask = np.zeros(shape, dtype=np.int8) - mask[border:-border, border:-border, border:-border] = 1 - ts = generate_regions_ts(mask.sum(), n_regions, overlap=overlap, - rand_gen=rand_gen, window=window) - mask_img = nibabel.Nifti1Image(mask, affine) - return masking.unmask(ts, mask_img), mask_img - - -def generate_labeled_regions(shape, n_regions, rand_gen=None, labels=None, - affine=np.eye(4), dtype=np.int): - """Generate a 3D volume with labeled regions. - - Parameters - ---------- - shape: tuple - shape of returned array - - n_regions: int - number of regions to generate. By default (if "labels" is None), - add a background with value zero. - - labels: iterable - labels to use for each zone. If provided, n_regions is unused. - - rand_gen: numpy.random.RandomState - random generator to use for generation. - - affine: numpy.ndarray - affine of returned image - - Returns - ------- - regions: nibabel.Nifti1Image - data has shape "shape", containing region labels. - """ - n_voxels = shape[0] * shape[1] * shape[2] - if labels is None: - labels = range(0, n_regions + 1) - n_regions += 1 - else: - n_regions = len(labels) - - regions = generate_regions_ts(n_voxels, n_regions, rand_gen=rand_gen) - # replace weights with labels - for n, row in zip(labels, regions): - row[row > 0] = n - data = np.zeros(shape, dtype=dtype) - data[np.ones(shape, dtype=np.bool)] = regions.sum(axis=0).T - return nibabel.Nifti1Image(data, affine) - - -def generate_labeled_regions_large(shape, n_regions, rand_gen=None, - affine=np.eye(4)): - """Similar to generate_labeled_regions, but suitable for a large number of - regions. - - See generate_labeled_regions for details. - """ - if rand_gen is None: - rand_gen = np.random.RandomState(0) - data = rand_gen.randint(n_regions + 1, size=shape) - if len(np.unique(data)) != n_regions + 1: - raise ValueError("Some labels are missing. Maybe shape is too small.") - return nibabel.Nifti1Image(data, affine) - - -def generate_fake_fmri(shape=(10, 11, 12), length=17, kind="noise", - affine=np.eye(4), n_blocks=None, block_size=None, - block_type='classification', - rand_gen=np.random.RandomState(0)): - """Generate a signal which can be used for testing. - - The return value is a 4D array, representing 3D volumes along time. - Only the voxels in the center are non-zero, to mimic the presence of - brain voxels in real signals. Setting n_blocks to an integer generates - condition blocks, the remaining of the timeseries corresponding - to 'rest' or 'baseline' condition. - - Parameters - ---------- - shape: tuple, optional - Shape of 3D volume - - length: int, optional - Number of time instants - - kind: string, optional - Kind of signal used as timeseries. - "noise": uniformly sampled values in [0..255] - "step": 0.5 for the first half then 1. - - affine: numpy.ndarray - Affine of returned images - - n_blocks: int or None - Number of condition blocks. - - block_size: int or None - Number of timepoints in a block. Used only if n_blocks is not - None. Defaults to 3 if n_blocks is not None. - - block_type: str - Defines if the returned target should be used for - 'classification' or 'regression'. - - Returns - ------- - fmri: nibabel.Nifti1Image - fake fmri signal. - shape: shape + (length,) - - mask: nibabel.Nifti1Image - mask giving non-zero voxels - - target: numpy.ndarray - Classification or regression target. Shape of number of - time points (length). Returned only if n_blocks is not None - """ - full_shape = shape + (length, ) - fmri = np.zeros(full_shape) - # Fill central voxels timeseries with random signals - width = [s // 2 for s in shape] - shift = [s // 4 for s in shape] - - if kind == "noise": - signals = rand_gen.randint(256, size=(width + [length])) - elif kind == "step": - signals = np.ones(width + [length]) - signals[..., :length // 2] = 0.5 - else: - raise ValueError("Unhandled value for parameter 'kind'") - - fmri[shift[0]:shift[0] + width[0], - shift[1]:shift[1] + width[1], - shift[2]:shift[2] + width[2], - :] = signals - - mask = np.zeros(shape) - mask[shift[0]:shift[0] + width[0], - shift[1]:shift[1] + width[1], - shift[2]:shift[2] + width[2]] = 1 - - if n_blocks is None: - return (nibabel.Nifti1Image(fmri, affine), - nibabel.Nifti1Image(mask, affine)) - - block_size = 3 if block_size is None else block_size - flat_fmri = fmri[mask.astype(np.bool)] - flat_fmri /= np.abs(flat_fmri).max() - target = np.zeros(length, dtype=np.int) - rest_max_size = (length - (n_blocks * block_size)) // n_blocks - if rest_max_size < 0: - raise ValueError( - '%s is too small ' - 'to put %s blocks of size %s' % ( - length, n_blocks, block_size)) - t_start = 0 - if rest_max_size > 0: - t_start = rand_gen.random_integers(0, rest_max_size, 1)[0] - for block in range(n_blocks): - if block_type == 'classification': - # Select a random voxel and add some signal to the background - voxel_idx = rand_gen.randint(0, flat_fmri.shape[0], 1)[0] - trials_effect = (rand_gen.random_sample(block_size) + 1) * 3. - else: - # Select the voxel in the image center and add some signal - # that increases with each block - voxel_idx = flat_fmri.shape[0] // 2 - trials_effect = ( - rand_gen.random_sample(block_size) + 1) * block - t_rest = 0 - if rest_max_size > 0: - t_rest = rand_gen.random_integers(0, rest_max_size, 1)[0] - flat_fmri[voxel_idx, t_start:t_start + block_size] += trials_effect - target[t_start:t_start + block_size] = block + 1 - t_start += t_rest + block_size - target = target if block_type == 'classification' \ - else target.astype(np.float) - fmri = np.zeros(fmri.shape) - fmri[mask.astype(np.bool)] = flat_fmri - return (nibabel.Nifti1Image(fmri, affine), - nibabel.Nifti1Image(mask, affine), target) - - -def generate_signals_from_precisions(precisions, - min_n_samples=50, max_n_samples=100, - random_state=0): - """Generate timeseries according to some given precision matrices. - - Signals all have zero mean. - - Parameters - ---------- - precisions: list of numpy.ndarray - list of precision matrices. Every matrix must be square (with the same - size) and positive definite. The output of - generate_group_sparse_gaussian_graphs() can be used here. - - min_samples, max_samples: int - the number of samples drawn for each timeseries is taken at random - between these two numbers. - - Returns - ------- - signals: list of numpy.ndarray - output signals. signals[n] corresponds to precisions[n], and has shape - (sample number, precisions[n].shape[0]). - """ - random_state = check_random_state(random_state) - - signals = [] - n_samples = random_state.randint(min_n_samples, high=max_n_samples, - size=len(precisions)) - - mean = np.zeros(precisions[0].shape[0]) - for n, prec in zip(n_samples, precisions): - signals.append(random_state.multivariate_normal(mean, - np.linalg.inv(prec), - (n,))) - return signals - - -def generate_group_sparse_gaussian_graphs( - n_subjects=5, n_features=30, min_n_samples=30, max_n_samples=50, - density=0.1, random_state=0, verbose=0): - """Generate signals drawn from a sparse Gaussian graphical model. - - Parameters - ---------- - n_subjects : int, optional - number of subjects - - n_features : int, optional - number of signals per subject to generate - - density : float, optional - density of edges in graph topology - - min_n_samples, max_n_samples : int, optional - Each subject have a different number of samples, between these two - numbers. All signals for a given subject have the same number of - samples. - - random_state : int or numpy.random.RandomState instance, optional - random number generator, or seed. - - verbose: int, optional - verbosity level (0 means no message). - - Returns - ------- - subjects : list of numpy.ndarray, shape for each (n_samples, n_features) - subjects[n] is the signals for subject n. They are provided as a numpy - len(subjects) = n_subjects. n_samples varies according to the subject. - - precisions : list of numpy.ndarray - precision matrices. - - topology : numpy.ndarray - binary array giving the graph topology used for generating covariances - and signals. - """ - - random_state = check_random_state(random_state) - # Generate topology (upper triangular binary matrix, with zeros on the - # diagonal) - topology = np.empty((n_features, n_features)) - topology[:, :] = np.triu(( - random_state.randint(0, high=int(1. / density), - size=n_features * n_features) - ).reshape(n_features, n_features) == 0, k=1) - - # Generate edges weights on topology - precisions = [] - mask = topology > 0 - for _ in range(n_subjects): - - # See also sklearn.datasets.samples_generator.make_sparse_spd_matrix - prec = topology.copy() - prec[mask] = random_state.uniform(low=.1, high=.8, size=(mask.sum())) - prec += np.eye(prec.shape[0]) - prec = np.dot(prec.T, prec) - - # Assert precision matrix is spd - np.testing.assert_almost_equal(prec, prec.T) - eigenvalues = np.linalg.eigvalsh(prec) - if eigenvalues.min() < 0: - raise ValueError("Failed generating a positive definite precision " - "matrix. Decreasing n_features can help solving " - "this problem.") - precisions.append(prec) - - # Returns the topology matrix of precision matrices. - topology += np.eye(*topology.shape) - topology = np.dot(topology.T, topology) - topology = topology > 0 - assert(np.all(topology == topology.T)) - logger.log("Sparsity: {0:f}".format( - 1. * topology.sum() / (topology.shape[0] ** 2)), - verbose=verbose) - - # Generate temporal signals - signals = generate_signals_from_precisions(precisions, - min_n_samples=min_n_samples, - max_n_samples=max_n_samples, - random_state=random_state) - return signals, precisions, topology - def is_nose_running(): """Returns whether we are running the nose test loader diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index a2b9ad8b85..fce8fd064a 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -79,8 +79,8 @@ def _map_eigenvalues(function, symmetric): The new symmetric matrix obtained after transforming the eigenvalues, while keeping the same eigenvectors. - Note - ---- + Notes + ----- If input matrix is not real symmetric, no error is reported but result will be wrong. """ @@ -290,8 +290,8 @@ def vec_to_sym_matrix(vec, diagonal=None): sym : numpy.ndarray, shape (..., n_columns, n_columns). The output symmetric matrix. - Note - ---- + Notes + ----- This function is meant to be the inverse of sym_matrix_to_vec. If you have discarded the diagonal in sym_matrix_to_vec, you need to provide it separately to reconstruct the symmetric matrix. For instance this can be diff --git a/nilearn/connectome/group_sparse_cov.py b/nilearn/connectome/group_sparse_cov.py index 45b042d2ac..f43076b992 100644 --- a/nilearn/connectome/group_sparse_cov.py +++ b/nilearn/connectome/group_sparse_cov.py @@ -5,7 +5,6 @@ # Authors: Philippe Gervais # License: simplified BSD -from distutils.version import LooseVersion import warnings import collections import operator @@ -14,16 +13,15 @@ import numpy as np import scipy.linalg -import sklearn -from sklearn.utils.extmath import fast_logdet -from sklearn.covariance import empirical_covariance from sklearn.base import BaseEstimator +from sklearn.covariance import empirical_covariance from sklearn.externals.joblib import Memory, delayed, Parallel +from sklearn.model_selection import check_cv +from sklearn.utils.extmath import fast_logdet from .._utils import CacheMixin from .._utils import logger from .._utils.extmath import is_spd -from .._utils.fixes import check_cv from .._utils.compat import izip @@ -942,17 +940,12 @@ def fit(self, subjects, y=None): # One cv generator per subject must be created, because each subject # can have a different number of samples from the others. cv = [] - if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'): - # scikit-learn >= 0.18 - for k in range(n_subjects): - cv.append(check_cv(self.cv, np.ones(subjects[k].shape[0]), - classifier=False).split(subjects[k])) - else: - # scikit-learn < 0.18 - for k in range(n_subjects): - cv.append(check_cv(self.cv, subjects[k], None, - classifier=False)) - + for k in range(n_subjects): + cv.append(check_cv( + self.cv, np.ones(subjects[k].shape[0]), + classifier=False + ).split(subjects[k]) + ) path = list() # List of (alpha, scores, covs) n_alphas = self.alphas diff --git a/nilearn/connectome/tests/test_group_sparse_cov.py b/nilearn/connectome/tests/test_group_sparse_cov.py index 7ec337f501..efae2e0c72 100644 --- a/nilearn/connectome/tests/test_group_sparse_cov.py +++ b/nilearn/connectome/tests/test_group_sparse_cov.py @@ -1,7 +1,7 @@ from nose.tools import assert_equal, assert_true, assert_raises import numpy as np -from nilearn._utils.testing import generate_group_sparse_gaussian_graphs +from nilearn._utils.data_gen import generate_group_sparse_gaussian_graphs from nilearn.connectome.group_sparse_cov import (group_sparse_covariance, group_sparse_scores) from nilearn.connectome import GroupSparseCovariance, GroupSparseCovarianceCV diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index 34d4d01f54..91c8cf2f5e 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -5,7 +5,8 @@ from .struct import (fetch_icbm152_2009, load_mni152_template, load_mni152_brain_mask, fetch_oasis_vbm, fetch_icbm152_brain_gm_mask, - MNI152_FILE_PATH, fetch_surf_fsaverage5) + MNI152_FILE_PATH, fetch_surf_fsaverage5, + fetch_surf_fsaverage) from .func import (fetch_haxby_simple, fetch_haxby, fetch_nyu_rest, fetch_adhd, fetch_miyawaki2008, fetch_localizer_contrasts, fetch_abide_pcp, @@ -22,10 +23,15 @@ fetch_coords_dosenbach_2010, fetch_atlas_allen_2011, fetch_atlas_surf_destrieux, - fetch_atlas_talairach) + fetch_atlas_talairach, + fetch_atlas_pauli_2017) from .utils import get_data_dirs -from .neurovault import fetch_neurovault, fetch_neurovault_ids +from .neurovault import (fetch_neurovault, + fetch_neurovault_ids, + fetch_neurovault_motor_task, + fetch_neurovault_auditory_computation_task +) __all__ = ['MNI152_FILE_PATH', 'fetch_icbm152_2009', 'load_mni152_template', 'fetch_oasis_vbm', @@ -41,8 +47,11 @@ 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', 'fetch_megatrawls_netmats', 'fetch_cobre', 'fetch_surf_nki_enhanced', 'fetch_surf_fsaverage5', + 'fetch_surf_fsaverage', 'fetch_atlas_basc_multiscale_2015', 'fetch_coords_dosenbach_2010', 'fetch_neurovault', 'fetch_neurovault_ids', + 'fetch_neurovault_motor_task', + 'fetch_neurovault_auditory_computation_task', 'load_mni152_brain_mask', 'fetch_icbm152_brain_gm_mask', 'fetch_atlas_surf_destrieux', 'fetch_atlas_talairach', 'get_data_dirs'] diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 74b3f9027c..325087b6f7 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -537,8 +537,8 @@ def fetch_atlas_yeo_2011(data_dir=None, url=None, resume=True, verbose=1): Licence: unknown. """ if url is None: - url = "ftp://surfer.nmr.mgh.harvard.edu/" \ - "pub/data/Yeo_JNeurophysiol11_MNI152.zip" + url = ('ftp://surfer.nmr.mgh.harvard.edu/pub/data/' + 'Yeo_JNeurophysiol11_MNI152.zip') opts = {'uncompress': True} dataset_name = "yeo_2011" @@ -1116,3 +1116,72 @@ def fetch_atlas_talairach(level_name, data_dir=None, verbose=1): description = _get_dataset_descr( 'talairach_atlas').decode('utf-8').format(level_name) return Bunch(maps=atlas_img, labels=labels, description=description) + + +def fetch_atlas_pauli_2017(version='prob', data_dir=None, verbose=1): + """Download the Pauli et al. (2017) atlas with in total + 12 subcortical nodes. + + Parameters + ---------- + + version: str, optional (default='prob') + Which version of the atlas should be download. This can be 'prob' + for the probabilistic atlas or 'det' for the deterministic atlas. + + data_dir : str, optional (default=None) + Path of the data directory. Used to force data storage in a specified + location. + + verbose : int + verbosity level (0 means no message). + + Returns + ------- + sklearn.datasets.base.Bunch + Dictionary-like object, contains: + + - maps: 3D Nifti image, values are indices in the list of labels. + - labels: list of strings. Starts with 'Background'. + - description: a short description of the atlas and some references. + + References + ---------- + https://osf.io/r2hvk/ + + `Pauli, W. M., Nili, A. N., & Tyszka, J. M. (2018). A high-resolution + probabilistic in vivo atlas of human subcortical brain nuclei. + Scientific Data, 5, 180063-13. http://doi.org/10.1038/sdata.2018.63`` + """ + + if version == 'prob': + url_maps = 'https://osf.io/w8zq2/download' + filename = 'pauli_2017_labels.nii.gz' + elif version == 'labels': + url_maps = 'https://osf.io/5mqfx/download' + filename = 'pauli_2017_prob.nii.gz' + else: + raise NotImplementedError('{} is no valid version for '.format(version) + \ + 'the Pauli atlas') + + url_labels = 'https://osf.io/6qrcb/download' + dataset_name = 'pauli_2017' + + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + + files = [(filename, + url_maps, + {'move':filename}), + ('labels.txt', + url_labels, + {'move':'labels.txt'})] + atlas_file, labels = _fetch_files(data_dir, files) + + labels = np.loadtxt(labels, dtype=str)[:, 1].tolist() + + fdescr = _get_dataset_descr(dataset_name) + + return Bunch(maps=atlas_file, + labels=labels, + description=fdescr) diff --git a/nilearn/datasets/data/fsaverage5/__init__.py b/nilearn/datasets/data/fsaverage5/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nilearn/datasets/data/fsaverage5/pial.left.gii.gz b/nilearn/datasets/data/fsaverage5/pial.left.gii.gz new file mode 100644 index 0000000000..b5db6f4085 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/pial.left.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/pial.right.gii.gz b/nilearn/datasets/data/fsaverage5/pial.right.gii.gz new file mode 100644 index 0000000000..16c0b350c7 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/pial.right.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/pial_inflated.left.gii.gz b/nilearn/datasets/data/fsaverage5/pial_inflated.left.gii.gz new file mode 100644 index 0000000000..0f2c47edb9 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/pial_inflated.left.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/pial_inflated.right.gii.gz b/nilearn/datasets/data/fsaverage5/pial_inflated.right.gii.gz new file mode 100644 index 0000000000..5f6820a933 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/pial_inflated.right.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/sulc.left.gii.gz b/nilearn/datasets/data/fsaverage5/sulc.left.gii.gz new file mode 100644 index 0000000000..c8666e2a37 Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/sulc.left.gii.gz differ diff --git a/nilearn/datasets/data/fsaverage5/sulc.right.gii.gz b/nilearn/datasets/data/fsaverage5/sulc.right.gii.gz new file mode 100644 index 0000000000..2ef29aa34f Binary files /dev/null and b/nilearn/datasets/data/fsaverage5/sulc.right.gii.gz differ diff --git a/nilearn/datasets/description/fsaverage.rst b/nilearn/datasets/description/fsaverage.rst new file mode 100644 index 0000000000..db4eb1a91e --- /dev/null +++ b/nilearn/datasets/description/fsaverage.rst @@ -0,0 +1,21 @@ +fsaverage + + +Notes +----- +Fsaverage standard surface as distributed with Freesurfer (Fischl et al, 1999) + +Content +------- + :'pial_left': Gifti file, left hemisphere pial surface mesh + :'pial_right': Gifti file, right hemisphere pial surface mesh + :'infl_left': Gifti file, left hemisphere inflated pial surface mesh + :'infl_right': Gifti file, right hemisphere inflated pial + surface mesh + :'sulc_left': Gifti file, left hemisphere sulcal depth data + :'sulc_right': Gifti file, right hemisphere sulcal depth data + +References +---------- +Fischl et al, (1999). High-resolution intersubject averaging and a +coordinate system for the cortical surface. Hum Brain Mapp 8, 272-284. diff --git a/nilearn/datasets/description/pauli_2017.rst b/nilearn/datasets/description/pauli_2017.rst new file mode 100644 index 0000000000..eaaa8b12ac --- /dev/null +++ b/nilearn/datasets/description/pauli_2017.rst @@ -0,0 +1,21 @@ +In Vivo High Resolution Atlas of the Subcortical Human Brain + + +Notes +----- +The purpose of this project is to develop a crowd-sourced In Vivo High Resolution Atlas of the Subcortical Human Brain. +We invite contributions to this project, both to increase the precision of anatomical labels, and to increase the number of labeled subcortical nuclei. + +This resource can be used as a reference atlas for researchers and students alike. + +Content +------- + :'maps': Nifti images with the (probabilistic) region definitions + :'labels': text file containing the file names + +References +---------- +For more information about this dataset: +https://osf.io/r2hvk/ + +Licence: UCC-By Attribution 4.0 International diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index c5db149035..eef785c207 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -17,6 +17,7 @@ from .._utils import check_niimg from .._utils.compat import BytesIO, _basestring, _urllib from .._utils.numpy_conversions import csv_to_array +from .._utils.exceptions import VisibleDeprecationWarning @deprecated("fetch_haxby_simple will be removed in future releases. " @@ -39,7 +40,7 @@ def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): target data. 'mask': string. Path to nifti mask file. 'session': list of string. Path to text file containing labels - (can be used for LeaveOneLabelOut cross validation for example). + (can be used for LeaveOneGroupOut cross validation for example). References ---------- @@ -151,7 +152,7 @@ def fetch_haxby(data_dir=None, n_subjects=None, subjects=(2,), warn_str = ("The parameter 'n_subjects' is deprecated from 0.2.6 and " "will be removed in nilearn next release. Use parameter " "'subjects' instead.") - warnings.warn(warn_str, np.VisibleDeprecationWarning, stacklevel=2) + warnings.warn(warn_str, VisibleDeprecationWarning, stacklevel=2) subjects = n_subjects if isinstance(subjects, numbers.Number) and subjects > 6: @@ -1062,16 +1063,14 @@ def fetch_localizer_calculation_task(n_subjects=1, data_dir=None, url=None, return data -def fetch_localizer_button_task(n_subjects=[2, ], data_dir=None, url=None, - get_anats=False, verbose=1): +def fetch_localizer_button_task(data_dir=None, url=None, verbose=1): """Fetch left vs right button press contrast maps from the localizer. + This function ships only 2nd subject (S02) specific tmap and + its normalized T1 image. + Parameters ---------- - n_subjects: int or list, optional - The number or list of subjects to load. If None is given, - all 94 subjects are used. - data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. @@ -1080,9 +1079,6 @@ def fetch_localizer_button_task(n_subjects=[2, ], data_dir=None, url=None, Override download URL. Used for test only (or if you setup a mirror of the data). - get_anats: boolean - Whether individual structural images should be fetched or not. - verbose: int, optional verbosity level (0 means no message). @@ -1090,7 +1086,8 @@ def fetch_localizer_button_task(n_subjects=[2, ], data_dir=None, url=None, ------- data: Bunch Dictionary-like object, the interest attributes are : - 'cmaps': string list, giving paths to nifti contrast maps + 'tmap': string, giving paths to nifti contrast maps + 'anat': string, giving paths to normalized anatomical image Notes ------ @@ -1105,12 +1102,30 @@ def fetch_localizer_button_task(n_subjects=[2, ], data_dir=None, url=None, nilearn.datasets.fetch_localizer_contrasts """ - data = fetch_localizer_contrasts(["left vs right button press"], - n_subjects=n_subjects, - get_tmaps=True, get_masks=False, - get_anats=get_anats, data_dir=data_dir, - url=url, resume=True, verbose=verbose) - return data + # The URL can be retrieved from the nilearn account on OSF (Open + # Science Framework). Uploaded files specific to S02 from + # fetch_localizer_contrasts ['left vs right button press'] + if url is None: + url = 'https://osf.io/dx9jn/download' + + tmap = "t_map_left_auditory_&_visual_click_vs_right_auditory&visual_click.nii.gz" + anat = "normalized_T1_anat_defaced.nii.gz" + + opts = {'uncompress': True} + + options = ('tmap', 'anat') + filenames = [(os.path.join('localizer_button_task', name), url, opts) + for name in (tmap, anat)] + + dataset_name = 'brainomics' + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + files = _fetch_files(data_dir, filenames, verbose=verbose) + + fdescr = _get_dataset_descr('brainomics_localizer') + + params = dict([('description', fdescr)] + list(zip(options, files))) + return Bunch(**params) def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', @@ -1845,3 +1860,4 @@ def fetch_surf_nki_enhanced(n_subjects=10, data_dir=None, return Bunch(func_left=func_left, func_right=func_right, phenotypic=phenotypic, description=fdescr) + diff --git a/nilearn/datasets/neurovault.py b/nilearn/datasets/neurovault.py index bffa6b0725..18dca08927 100644 --- a/nilearn/datasets/neurovault.py +++ b/nilearn/datasets/neurovault.py @@ -36,7 +36,7 @@ _NEUROVAULT_BASE_URL = 'http://neurovault.org/api/' _NEUROVAULT_COLLECTIONS_URL = urljoin(_NEUROVAULT_BASE_URL, 'collections/') _NEUROVAULT_IMAGES_URL = urljoin(_NEUROVAULT_BASE_URL, 'images/') -_NEUROSYNTH_FETCH_WORDS_URL = 'http://neurosynth.org/api/v2/decode/' +_NEUROSYNTH_FETCH_WORDS_URL = 'http://neurosynth.org/api/decode/' _COL_FILTERS_AVAILABLE_ON_SERVER = ('DOI', 'name', 'owner', 'id') _IM_FILTERS_AVAILABLE_ON_SERVER = tuple() @@ -2600,3 +2600,90 @@ def fetch_neurovault_ids( data_dir=data_dir, fetch_neurosynth_words=fetch_neurosynth_words, vectorize_words=vectorize_words, verbose=verbose) + + +def fetch_neurovault_motor_task(data_dir=None, verbose=1): + """Fetch left vs right button press group contrast map from NeuroVault. + + Parameters + ---------- + data_dir: string, optional + Path of the data directory. Used to force data storage in a specified + location. + + verbose: int, optional + verbosity level (0 means no message). + + Returns + ------- + data: Bunch + A dict-like object which exposes its items as attributes. It contains: + - 'images', the paths to downloaded files. + - 'images_meta', the metadata for the images in a list of + dictionaries. + - 'collections_meta', the metadata for the + collections. + - 'description', a short description of the Neurovault dataset. + + Notes + ------ + + This function is only a caller for the fetch_localizer_contrasts in order + to simplify examples reading and understanding. + The 'left vs right button press' contrast is used: + https://neurovault.org/images/10426/ + + See Also + --------- + nilearn.datasets.fetch_neurovault_ids + nilearn.datasets.fetch_neurovault + nilearn.datasets.fetch_neurovault_auditory_computation_task + + """ + data = fetch_neurovault_ids(image_ids=[10426], data_dir=data_dir, + verbose=verbose) + return data + + +def fetch_neurovault_auditory_computation_task(data_dir=None, verbose=1): + """Fetch a contrast map from NeuroVault showing + the effect of mental subtraction upon auditory instructions + + Parameters + ---------- + data_dir: string, optional + Path of the data directory. Used to force data storage in a specified + location. + + verbose: int, optional + verbosity level (0 means no message). + + Returns + ------- + data: Bunch + A dict-like object which exposes its items as attributes. It contains: + - 'images', the paths to downloaded files. + - 'images_meta', the metadata for the images in a list of + dictionaries. + - 'collections_meta', the metadata for the + collections. + - 'description', a short description of the Neurovault dataset. + + Notes + ------ + + This function is only a caller for the fetch_localizer_contrasts in order + to simplify examples reading and understanding. + The 'auditory_calculation_vs_baseline' contrast is used: + https://neurovault.org/images/32980/ + + See Also + --------- + nilearn.datasets.fetch_neurovault_ids + nilearn.datasets.fetch_neurovault + nilearn.datasets.fetch_neurovault_motor_task + + """ + data = fetch_neurovault_ids(image_ids=[32980], data_dir=data_dir, + verbose=verbose) + return data diff --git a/nilearn/datasets/struct.py b/nilearn/datasets/struct.py index 7b143d135e..8393e44bd4 100644 --- a/nilearn/datasets/struct.py +++ b/nilearn/datasets/struct.py @@ -7,15 +7,18 @@ from scipy import ndimage from sklearn.datasets.base import Bunch -from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr +from .utils import (_get_dataset_dir, _fetch_files, + _get_dataset_descr, _uncompress_file) from .._utils import check_niimg, niimg +from .._utils.exceptions import VisibleDeprecationWarning from ..image import new_img_like _package_directory = os.path.dirname(os.path.abspath(__file__)) # Useful for the very simple examples MNI152_FILE_PATH = os.path.join(_package_directory, "data", - "avg152T1_brain.nii.gz") + "avg152T1_brain.nii.gz") +FSAVERAGE5_PATH = os.path.join(_package_directory, "data", "fsaverage5") def fetch_icbm152_2009(data_dir=None, url=None, resume=True, verbose=1): @@ -60,28 +63,34 @@ def fetch_icbm152_2009(data_dir=None, url=None, resume=True, verbose=1): ----- For more information about this dataset's structure: http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009 + + The original download URL is + http://www.bic.mni.mcgill.ca/~vfonov/icbm/2009/mni_icbm152_nlin_sym_09a_nifti.zip """ if url is None: - url = "http://www.bic.mni.mcgill.ca/~vfonov/icbm/2009/" \ - "mni_icbm152_nlin_sym_09a_nifti.zip" + # The URL can be retrieved from the nilearn account on OSF (Open + # Science Framework), https://osf.io/4r3jt/quickfiles/ + # Clicking on the "share" button gives the root of the URL. + url = "https://osf.io/7pj92/download" opts = {'uncompress': True} keys = ("csf", "gm", "wm", "pd", "t1", "t2", "t2_relax", "eye_mask", "face_mask", "mask") filenames = [(os.path.join("mni_icbm152_nlin_sym_09a", name), url, opts) - for name in ("mni_icbm152_csf_tal_nlin_sym_09a.nii", - "mni_icbm152_gm_tal_nlin_sym_09a.nii", - "mni_icbm152_wm_tal_nlin_sym_09a.nii", + for name in ( + "mni_icbm152_csf_tal_nlin_sym_09a.nii.gz", + "mni_icbm152_gm_tal_nlin_sym_09a.nii.gz", + "mni_icbm152_wm_tal_nlin_sym_09a.nii.gz", - "mni_icbm152_pd_tal_nlin_sym_09a.nii", - "mni_icbm152_t1_tal_nlin_sym_09a.nii", - "mni_icbm152_t2_tal_nlin_sym_09a.nii", - "mni_icbm152_t2_relx_tal_nlin_sym_09a.nii", + "mni_icbm152_pd_tal_nlin_sym_09a.nii.gz", + "mni_icbm152_t1_tal_nlin_sym_09a.nii.gz", + "mni_icbm152_t2_tal_nlin_sym_09a.nii.gz", + "mni_icbm152_t2_relx_tal_nlin_sym_09a.nii.gz", - "mni_icbm152_t1_tal_nlin_sym_09a_eye_mask.nii", - "mni_icbm152_t1_tal_nlin_sym_09a_face_mask.nii", - "mni_icbm152_t1_tal_nlin_sym_09a_mask.nii")] + "mni_icbm152_t1_tal_nlin_sym_09a_eye_mask.nii.gz", + "mni_icbm152_t1_tal_nlin_sym_09a_face_mask.nii.gz", + "mni_icbm152_t1_tal_nlin_sym_09a_mask.nii.gz")] dataset_name = 'icbm152_2009' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, @@ -423,25 +432,77 @@ def fetch_oasis_vbm(n_subjects=None, dartel_version=True, data_dir=None, description=fdescr) -def fetch_surf_fsaverage5(data_dir=None, url=None, resume=True, verbose=1): - - """ Download Freesurfer fsaverage5 surface +def fetch_surf_fsaverage(mesh='fsaverage5', data_dir=None): + """ Download a Freesurfer fsaverage surface Parameters ---------- - data_dir: str, optional + mesh: str, optional (default='fsaverage5') + Which mesh to fetch. + 'fsaverage5': the low-resolution fsaverage5 mesh (10242 nodes) + 'fsaverage': the high-resolution fsaverage mesh (163842 nodes) + (high-resolution fsaverage will result in + more computation time and memory usage) + + data_dir: str, optional (default=None) Path of the data directory. Used to force data storage in a specified - location. Default: None + location. - url: str, optional - Override download URL. Used for test only (or if you setup a mirror of - the data). Default: None + Returns + ------- + data: sklearn.datasets.base.Bunch + Dictionary-like object, the interest attributes are : + - 'pial_left': Gifti file, left hemisphere pial surface mesh + - 'pial_right': Gifti file, right hemisphere pial surface mesh + - 'infl_left': Gifti file, left hemisphere inflated pial surface mesh + - 'infl_right': Gifti file, right hemisphere inflated pial + surface mesh + - 'sulc_left': Gifti file, left hemisphere sulcal depth data + - 'sulc_right': Gifti file, right hemisphere sulcal depth data - resume: bool, optional (default True) - If True, try resuming download if possible. + References + ---------- + Fischl et al, (1999). High-resolution intersubject averaging and a + coordinate system for the cortical surface. Hum Brain Mapp 8, 272-284. - verbose: int, optional (default 1) - Defines the level of verbosity of the output. + """ + meshes = {'fsaverage5': _fetch_surf_fsaverage5, + 'fsaverage': _fetch_surf_fsaverage} + if mesh not in meshes: + raise ValueError( + "'mesh' should be one of {}; {!r} was provided".format( + list(meshes.keys()), mesh)) + return meshes[mesh](data_dir=data_dir) + + +def _fetch_surf_fsaverage(data_dir=None): + dataset_dir = _get_dataset_dir('fsaverage', data_dir=data_dir) + url = 'https://www.nitrc.org/frs/download.php/10846/fsaverage.tar.gz' + if not os.path.isdir(os.path.join(dataset_dir, 'fsaverage')): + _fetch_files(dataset_dir, [('fsaverage.tar.gz', url, {})]) + _uncompress_file(os.path.join(dataset_dir, 'fsaverage.tar.gz')) + result = { + name: os.path.join(dataset_dir, 'fsaverage', '{}.gii'.format(name)) + for name in ['pial_right', 'sulc_right', 'sulc_left', 'pial_left']} + result['infl_left'] = os.path.join( + dataset_dir, 'fsaverage', 'inflated_left.gii') + result['infl_right'] = os.path.join( + dataset_dir, 'fsaverage', 'inflated_right.gii') + + result['description'] = str(_get_dataset_descr('fsaverage')) + return Bunch(**result) + + +def fetch_surf_fsaverage5(data_dir=None, url=None, resume=True, verbose=1): + """ Deprecated since version 0.4.3 + + Use fetch_surf_fsaverage instead. + + Parameters + ---------- + data_dir: str, optional (default=None) + Path of the data directory. Used to force data storage in a specified + location. Returns ------- @@ -459,58 +520,54 @@ def fetch_surf_fsaverage5(data_dir=None, url=None, resume=True, verbose=1): ---------- Fischl et al, (1999). High-resolution intersubject averaging and a coordinate system for the cortical surface. Hum Brain Mapp 8, 272-284. + + """ + warnings.warn("fetch_surf_fsaverage5 has been deprecated and will " + "be removed in a future release. " + "Use fetch_surf_fsaverage(mesh='fsaverage5')", + VisibleDeprecationWarning, stacklevel=2) + return fetch_surf_fsaverage(mesh='fsaverage5', data_dir=data_dir) + + +def _fetch_surf_fsaverage5(data_dir=None, url=None, resume=True, verbose=1): + """Helper function to ship fsaverage5 surfaces and sulcal information + with Nilearn. + + The source of the data is coming from nitrc based on this PR #1016. + Manually downloaded gzipped and shipped with this function. + + Shipping is done with Nilearn based on issue #1705. """ - if url is None: - url = 'https://www.nitrc.org/frs/download.php/' - # Preliminary checks and declarations dataset_name = 'fsaverage5' - data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, - verbose=verbose) # Dataset description fdescr = _get_dataset_descr(dataset_name) # Download fsaverage surfaces and sulcal information - surf_file = '%s.%s.gii' - surf_url = url + '%i/%s.%s.gii' - surf_nids = {'lh pial': 9344, 'rh pial': 9345, - 'lh infl': 9346, 'rh infl': 9347, - 'lh sulc': 9348, 'rh sulc': 9349} + surface_file = '%s.%s.gii.gz' + surface_path = os.path.join(FSAVERAGE5_PATH, surface_file) pials = [] infls = [] sulcs = [] - for hemi in [('lh', 'left'), ('rh', 'right')]: - - pial = _fetch_files(data_dir, - [(surf_file % ('pial', hemi[1]), - surf_url % (surf_nids['%s pial' % hemi[0]], - 'pial', hemi[1]), - {})], - resume=resume, verbose=verbose) - pials.append(pial) - - infl = _fetch_files(data_dir, - [(surf_file % ('pial_inflated', hemi[1]), - surf_url % (surf_nids['%s infl' % hemi[0]], - 'pial_inflated', hemi[1]), - {})], - resume=resume, verbose=verbose) - infls.append(infl) - - sulc = _fetch_files(data_dir, - [(surf_file % ('sulc', hemi[1]), - surf_url % (surf_nids['%s sulc' % hemi[0]], - 'sulc', hemi[1]), - {})], - resume=resume, verbose=verbose) + for hemi in ['left', 'right']: + # pial + pial_path = surface_path % ('pial', hemi) + pials.append(pial_path) + + # pial_inflated + pial_infl_path = surface_path % ('pial_inflated', hemi) + infls.append(pial_infl_path) + + # sulcal + sulc = surface_path % ('sulc', hemi) sulcs.append(sulc) - return Bunch(pial_left=pials[0][0], - pial_right=pials[1][0], - infl_left=infls[0][0], - infl_right=infls[1][0], - sulc_left=sulcs[0][0], - sulc_right=sulcs[1][0], + return Bunch(pial_left=pials[0], + pial_right=pials[1], + infl_left=infls[0], + infl_right=infls[1], + sulc_left=sulcs[0], + sulc_right=sulcs[1], description=fdescr) diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index 5cafe79492..b9c9673f9f 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -530,3 +530,16 @@ def test_fetch_atlas_talairach(data_dir=tst.tmpdir): assert_array_equal(talairach.maps.get_data().ravel(), level_values.ravel()) assert_raises(ValueError, atlas.fetch_atlas_talairach, 'bad_level') + +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_atlas_pauli_2017(): + data_dir = os.path.join(tst.tmpdir, 'pauli_2017') + + data = atlas.fetch_atlas_pauli_2017('labels', data_dir) + assert_equal(len(data.labels), 16) + + values = nibabel.load(data.maps).get_data() + assert_equal(len(np.unique(values)), 17) + + data = atlas.fetch_atlas_pauli_2017('prob', data_dir) + assert_equal(nibabel.load(data.maps).shape[-1], 16) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index f060ef42d6..74e242f8e5 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -325,30 +325,14 @@ def test_fetch_localizer_calculation_task(): @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_localizer_button_task(): local_url = "file://" + tst.datadir - ids = np.asarray(['S%2d' % i for i in range(94)]) - ids = ids.view(dtype=[('subject_id', 'S3')]) - tst.mock_fetch_files.add_csv('cubicwebexport.csv', ids) - tst.mock_fetch_files.add_csv('cubicwebexport2.csv', ids) # Disabled: cannot be tested without actually fetching covariates CSV file - # All subjects + # Only one subject dataset = func.fetch_localizer_button_task(data_dir=tst.tmpdir, url=local_url, verbose=0) - assert_true(isinstance(dataset.ext_vars, np.recarray)) - assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_equal(dataset.ext_vars.size, 1) - assert_equal(len(dataset.cmaps), 1) - - # 20 subjects - dataset = func.fetch_localizer_button_task(n_subjects=20, - data_dir=tst.tmpdir, - url=local_url, - verbose=0) - assert_true(isinstance(dataset.ext_vars, np.recarray)) - assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_equal(dataset.ext_vars.size, 20) - assert_equal(len(dataset.cmaps), 20) + assert_true(isinstance(dataset.tmap, _basestring)) + assert_true(isinstance(dataset.anat, _basestring)) assert_not_equal(dataset.description, '') diff --git a/nilearn/datasets/tests/test_struct.py b/nilearn/datasets/tests/test_struct.py index 595a849db0..f94a99833e 100644 --- a/nilearn/datasets/tests/test_struct.py +++ b/nilearn/datasets/tests/test_struct.py @@ -164,18 +164,14 @@ def test_fetch_icbm152_brain_gm_mask(): @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_surf_fsaverage(): + # for mesh in ['fsaverage5', 'fsaverage']: + for mesh in ['fsaverage']: - dataset = struct.fetch_surf_fsaverage5(data_dir=tst.tmpdir, verbose=0) + dataset = struct.fetch_surf_fsaverage( + mesh, data_dir=tst.tmpdir) - keys = ['pial_left', 'pial_right', 'infl_left', 'infl_right', - 'sulc_left', 'sulc_right'] + keys = {'pial_left', 'pial_right', 'infl_left', 'infl_right', + 'sulc_left', 'sulc_right'} - filenames = ['pial.left.gii', 'pial.right.gii', 'pial_inflated.left.gii', - 'pial_inflated.right.gii', 'sulc.left.gii', 'sulc.right.gii'] - - for key, filename in zip(keys, filenames): - assert_equal(dataset[key], os.path.join(tst.tmpdir, 'fsaverage5', - filename)) - - assert_not_equal(dataset.description, '') - assert_equal(len(tst.mock_url_request.urls), len(keys)) + assert keys.issubset(set(dataset.keys())) + assert_not_equal(dataset.description, '') diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index 14324f47ad..446cae359e 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -156,9 +156,9 @@ def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, bytes_so_far += len(chunk) time_last_read = time.time() if (report_hook and - # Refresh report every half second or when download is + # Refresh report every second or when download is # finished. - (time_last_read > time_last_display + 0.5 or not chunk)): + (time_last_read > time_last_display + 1. or not chunk)): _chunk_report_(bytes_so_far, total_size, initial_size, t0) time_last_display = time_last_read diff --git a/nilearn/decoding/objective_functions.py b/nilearn/decoding/objective_functions.py index f663a3c2a7..032eae1577 100644 --- a/nilearn/decoding/objective_functions.py +++ b/nilearn/decoding/objective_functions.py @@ -206,7 +206,7 @@ def _gradient_id(img, l1_ratio=.5): # with dimension i stop at -1 slice_all = [0, slice(None, -1)] for d in range(img.ndim): - gradient[slice_all] = np.diff(img, axis=d) + gradient[tuple(slice_all)] = np.diff(img, axis=d) slice_all[0] = d + 1 slice_all.insert(1, slice(None)) diff --git a/nilearn/decoding/searchlight.py b/nilearn/decoding/searchlight.py index 949156f28d..7bc7fd96a8 100644 --- a/nilearn/decoding/searchlight.py +++ b/nilearn/decoding/searchlight.py @@ -21,12 +21,14 @@ from sklearn.externals.joblib import Parallel, delayed, cpu_count from sklearn import svm from sklearn.base import BaseEstimator +from sklearn.exceptions import ConvergenceWarning from .. import masking from ..image.resampling import coord_transform from ..input_data.nifti_spheres_masker import _apply_mask_and_get_affinity from .._utils.compat import _basestring -from .._utils.fixes import cross_val_score +from .._utils import check_niimg_4d +from sklearn.model_selection import cross_val_score ESTIMATOR_CATALOG = dict(svc=svm.LinearSVC, svr=svm.SVR) @@ -79,12 +81,14 @@ def search_light(X, y, estimator, A, groups=None, scoring=None, search_light scores """ group_iter = GroupIterator(A.shape[0], n_jobs) - scores = Parallel(n_jobs=n_jobs, verbose=verbose)( - delayed(_group_iter_search_light)( - A.rows[list_i], - estimator, X, y, groups, scoring, cv, - thread_id + 1, A.shape[0], verbose) - for thread_id, list_i in enumerate(group_iter)) + with warnings.catch_warnings(): # might not converge + warnings.simplefilter('ignore', ConvergenceWarning) + scores = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(_group_iter_search_light)( + A.rows[list_i], + estimator, X, y, groups, scoring, cv, + thread_id + 1, A.shape[0], verbose) + for thread_id, list_i in enumerate(group_iter)) return np.concatenate(scores) @@ -136,8 +140,6 @@ def _group_iter_search_light(list_rows, estimator, X, y, groups, groups : array-like, optional group label for each sample for cross validation. - NOTE: will have no effect for scikit learn < 0.18 - scoring : string or callable, optional Scoring strategy to use. See the scikit-learn documentation. If callable, takes as arguments the fitted estimator, the @@ -166,13 +168,8 @@ def _group_iter_search_light(list_rows, estimator, X, y, groups, t0 = time.time() for i, row in enumerate(list_rows): kwargs = dict() - if not LooseVersion(sklearn.__version__) < LooseVersion('0.15'): - kwargs['scoring'] = scoring - if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'): - kwargs['groups'] = groups - elif scoring is not None: - warnings.warn('Scikit-learn version is too old. ' - 'scoring argument ignored', stacklevel=2) + kwargs['scoring'] = scoring + kwargs['groups'] = groups par_scores[i] = np.mean(cross_val_score(estimator, X[:, row], y, cv=cv, n_jobs=1, **kwargs)) @@ -290,6 +287,9 @@ def fit(self, imgs, y, groups=None): """ + # check if image is 4D + imgs = check_niimg_4d(imgs) + # Get the seeds process_mask_img = self.process_mask_img if self.process_mask_img is None: diff --git a/nilearn/decoding/space_net.py b/nilearn/decoding/space_net.py index 293dd43d92..55fab8d730 100644 --- a/nilearn/decoding/space_net.py +++ b/nilearn/decoding/space_net.py @@ -22,10 +22,7 @@ from scipy import stats, ndimage from sklearn.base import RegressorMixin from sklearn.utils.extmath import safe_sparse_dot -try: - from sklearn.utils import atleast2d_or_csr -except ImportError: # sklearn 0.15 - from sklearn.utils import check_array as atleast2d_or_csr +from sklearn.utils import check_array from sklearn.linear_model.base import LinearModel from sklearn.feature_selection import (SelectPercentile, f_regression, f_classif) @@ -34,8 +31,9 @@ from sklearn.metrics import accuracy_score from ..input_data.masker_validation import check_embedded_nifti_masker from .._utils.param_validation import _adjust_screening_percentile -from .._utils.fixes import check_X_y -from .._utils.fixes import check_cv, center_data +from sklearn.utils import check_X_y +from sklearn.model_selection import check_cv +from sklearn.linear_model.base import _preprocess_data as center_data from .._utils.compat import _basestring from .._utils.cache_mixin import CacheMixin from .objective_functions import _unmask @@ -798,14 +796,8 @@ def fit(self, X, y): case1 = (None in [alphas, l1_ratios]) and self.n_alphas > 1 case2 = (alphas is not None) and min(len(l1_ratios), len(alphas)) > 1 if case1 or case2: - if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'): - # scikit-learn >= 0.18 - self.cv_ = list(check_cv( - self.cv, y=y, classifier=self.is_classif).split(X, y)) - else: - # scikit-learn < 0.18 - self.cv_ = list(check_cv(self.cv, X=X, y=y, - classifier=self.is_classif)) + self.cv_ = list(check_cv( + self.cv, y=y, classifier=self.is_classif).split(X, y)) else: # no cross-validation needed, user supplied all params self.cv_ = [(np.arange(n_samples), [])] @@ -907,7 +899,7 @@ class would be predicted. if not self.is_classif: return LinearModel.decision_function(self, X) - X = atleast2d_or_csr(X) + X = check_array(X) n_features = self.coef_.shape[1] if X.shape[1] != n_features: raise ValueError("X has %d features per sample; expecting %d" diff --git a/nilearn/decoding/tests/test_searchlight.py b/nilearn/decoding/tests/test_searchlight.py index 551dc8e4cc..ccbac9d7b7 100644 --- a/nilearn/decoding/tests/test_searchlight.py +++ b/nilearn/decoding/tests/test_searchlight.py @@ -30,13 +30,8 @@ def test_searchlight(): data_img = nibabel.Nifti1Image(data, np.eye(4)) # Define cross validation - if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'): - from sklearn.model_selection import KFold - cv = KFold(n_splits=4) - else: - from sklearn.cross_validation import KFold - # avoid using KFold for compatibility with sklearn < 0.18 - cv = KFold(len(cond), 4) + from sklearn.model_selection import KFold + cv = KFold(n_splits=4) n_jobs = 1 # Run Searchlight with different radii @@ -103,4 +98,16 @@ def test_searchlight(): assert_equal(np.where(sl.scores_ == 1)[0].size, 7) assert_equal(sl.scores_[2, 2, 2], 1.) + # Check whether searchlight works on list of 3D images + rand = np.random.RandomState(0) + data = rand.rand(5, 5, 5) + data_img = nibabel.Nifti1Image(data, affine=np.eye(4)) + imgs = [data_img, data_img, data_img, data_img, data_img, data_img] + + # labels + y = [0, 1, 0, 1, 0, 1] + + # run searchlight on list of 3D images + sl = searchlight.SearchLight(mask_img) + sl.fit(imgs, y) diff --git a/nilearn/decoding/tests/test_space_net.py b/nilearn/decoding/tests/test_space_net.py index 938ec1e648..53e93b543a 100644 --- a/nilearn/decoding/tests/test_space_net.py +++ b/nilearn/decoding/tests/test_space_net.py @@ -216,8 +216,12 @@ def test_log_reg_vs_graph_net_two_classes_iris(C=.01, tol=1e-10, mask=mask, alphas=1. / C / X.shape[0], l1_ratios=1., tol=tol, verbose=0, max_iter=1000, penalty="tv-l1", standardize=False, screening_percentile=100.).fit(X_, y) - sklogreg = LogisticRegression(penalty="l1", fit_intercept=True, - tol=tol, C=C).fit(X, y) + sklogreg = LogisticRegression(penalty="l1", + fit_intercept=True, + solver='liblinear', + tol=tol, + C=C, + ).fit(X, y) # compare supports np.testing.assert_array_equal((np.abs(tvl1.coef_) < zero_thr), diff --git a/nilearn/decomposition/base.py b/nilearn/decomposition/base.py index 3835e7f9f4..aef70a7219 100644 --- a/nilearn/decomposition/base.py +++ b/nilearn/decomposition/base.py @@ -75,10 +75,8 @@ def fast_svd(X, n_components, random_state=None): S = S[:n_components] V = V[:n_components].copy() else: - if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'): - n_iter = 'auto' - else: - n_iter = 3 + n_iter = 'auto' + U, S, V = randomized_svd(X, n_components=n_components, n_iter=n_iter, flip_sign=True, @@ -282,12 +280,15 @@ class BaseDecomposition(BaseEstimator, CacheMixin, TransformerMixin): This parameter is passed to image.resample_img. Please see the related documentation for details. - mask_strategy: {'background', 'epi'}, optional + mask_strategy: {'background', 'epi' or 'template'}, optional The strategy used to compute the mask: use 'background' if your - images present a clear homogeneous background, and 'epi' if they - are raw EPI images. Depending on this value, the mask will be - computed from masking.compute_background_mask or - masking.compute_epi_mask. Default is 'epi'. + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'epi'. mask_args: dict, optional If mask is None, these are additional parameters passed to diff --git a/nilearn/decomposition/canica.py b/nilearn/decomposition/canica.py index 098ab52399..ef6d77d63a 100644 --- a/nilearn/decomposition/canica.py +++ b/nilearn/decomposition/canica.py @@ -81,12 +81,15 @@ class CanICA(MultiPCA): This parameter is passed to signal.clean. Please see the related documentation for details - mask_strategy: {'background', 'epi'}, optional + mask_strategy: {'background', 'epi' or 'template'}, optional The strategy used to compute the mask: use 'background' if your - images present a clear homogeneous background, and 'epi' if they - are raw EPI images. Depending on this value, the mask will be - computed from masking.compute_background_mask or - masking.compute_epi_mask. Default is 'epi'. + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'epi'. mask_args: dict, optional If mask is None, these are additional parameters passed to diff --git a/nilearn/decomposition/dict_learning.py b/nilearn/decomposition/dict_learning.py index 2e53ffb2f8..e31b2e734f 100644 --- a/nilearn/decomposition/dict_learning.py +++ b/nilearn/decomposition/dict_learning.py @@ -21,10 +21,8 @@ from .base import BaseDecomposition from .canica import CanICA - -if LooseVersion(sklearn.__version__) >= LooseVersion('0.17'): - # check_input=False is an optimization available only in sklearn >=0.17 - sparse_encode_args = {'check_input': False} +# check_input=False is an optimization available in sklearn. +sparse_encode_args = {'check_input': False} def _compute_loadings(components, data): @@ -117,12 +115,15 @@ class DictLearning(BaseDecomposition): This parameter is passed to signal.clean. Please see the related documentation for details. - mask_strategy: {'background', 'epi'}, optional + mask_strategy: {'background', 'epi' or 'template'}, optional The strategy used to compute the mask: use 'background' if your - images present a clear homogeneous background, and 'epi' if they - are raw EPI images. Depending on this value, the mask will be - computed from masking.compute_background_mask or - masking.compute_epi_mask. Default is 'epi'. + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'epi'. mask_args: dict, optional If mask is None, these are additional parameters passed to diff --git a/nilearn/decomposition/multi_pca.py b/nilearn/decomposition/multi_pca.py index 990ab9602a..1cc60f6229 100644 --- a/nilearn/decomposition/multi_pca.py +++ b/nilearn/decomposition/multi_pca.py @@ -38,12 +38,15 @@ class MultiPCA(BaseDecomposition): it will be computed automatically by a MultiNiftiMasker with default parameters. - mask_strategy: {'background', 'epi'}, optional + mask_strategy: {'background', 'epi' or 'template'}, optional The strategy used to compute the mask: use 'background' if your - images present a clear homogeneous background, and 'epi' if they - are raw EPI images. Depending on this value, the mask will be - computed from masking.compute_background_mask or - masking.compute_epi_mask. Default is 'epi'. + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'epi'. mask_args: dict, optional If mask is None, these are additional parameters passed to diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 9f1f08be4e..ee8adc0162 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -110,8 +110,8 @@ def _fast_smooth_array(arr): smoothed_arr: numpy.ndarray Smoothed array. - Note - ---- + Notes + ----- Rather than calling this function directly, users are encouraged to call the high-level function :func:`smooth_img` with fwhm='fast'. diff --git a/nilearn/image/resampling.py b/nilearn/image/resampling.py index 8c8fa73f3c..95c94a0a1c 100644 --- a/nilearn/image/resampling.py +++ b/nilearn/image/resampling.py @@ -122,7 +122,7 @@ def coord_transform(x, y, z, affine): >>> from nilearn import datasets, image >>> niimg = datasets.load_mni152_template() - >>> # Find the MNI coordinates of the voxel (10, 10, 10) + >>> # Find the MNI coordinates of the voxel (50, 50, 50) >>> image.coord_transform(50, 50, 50, niimg.affine) (-10.0, -26.0, 28.0) @@ -260,12 +260,6 @@ def _resample_one_img(data, A, b, target_shape, data = _extrapolate_out_mask(data, np.logical_not(not_finite), iterations=2)[0] - # See https://github.com/nilearn/nilearn/issues/346 Copying the - # array makes it C continuous and as such the int32 index in the C - # code is a lot less likely to overflow - if (LooseVersion(scipy.__version__) < LooseVersion('0.14.1')): - data = data.copy() - # Suppresses warnings in https://github.com/nilearn/nilearn/issues/1363 with warnings.catch_warnings(): if LooseVersion(scipy.__version__) >= LooseVersion('0.18'): @@ -277,13 +271,6 @@ def _resample_one_img(data, A, b, target_shape, output=out, order=interpolation_order) - # Bug in ndimage.affine_transform when out does not have native endianness - # see https://github.com/nilearn/nilearn/issues/275 - # Bug was fixed in scipy 0.15 - if (LooseVersion(scipy.__version__) < LooseVersion('0.15') and - not out.dtype.isnative): - out.byteswap(True) - if has_not_finite: # Suppresses warnings in https://github.com/nilearn/nilearn/issues/1363 with warnings.catch_warnings(): @@ -520,19 +507,18 @@ def resample_img(img, target_affine=None, target_shape=None, else: resampled_data_dtype = data.dtype - if LooseVersion(scipy.__version__) >= LooseVersion('0.17'): - # Since the release of 0.17, resampling nifti images have some issues - # when affine is passed as 1D array and if data is of non-native - # endianess. - # See issue https://github.com/nilearn/nilearn/issues/1445. - # If affine is passed as 1D, scipy uses _nd_image.zoom_shift rather - # than _geometric_transform (2D) where _geometric_transform is able - # to swap byte order in scipy later than 0.15 for nonnative endianess. - - # We convert to 'native' order to not have any issues either with - # 'little' or 'big' endian data dtypes (non-native endians). - if len(A.shape) == 1 and not resampled_data_dtype.isnative: - resampled_data_dtype = resampled_data_dtype.newbyteorder('N') + # Since the release of 0.17, resampling nifti images have some issues + # when affine is passed as 1D array and if data is of non-native + # endianess. + # See issue https://github.com/nilearn/nilearn/issues/1445. + # If affine is passed as 1D, scipy uses _nd_image.zoom_shift rather + # than _geometric_transform (2D) where _geometric_transform is able + # to swap byte order in scipy later than 0.15 for nonnative endianess. + + # We convert to 'native' order to not have any issues either with + # 'little' or 'big' endian data dtypes (non-native endians). + if len(A.shape) == 1 and not resampled_data_dtype.isnative: + resampled_data_dtype = resampled_data_dtype.newbyteorder('N') # Code is generic enough to work for both 3D and 4D images other_shape = data_shape[3:] diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index caaf95287a..e76399f204 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -18,7 +18,7 @@ from nilearn.image import image from nilearn.image import resampling from nilearn.image import concat_imgs -from nilearn._utils import testing, niimg_conversions +from nilearn._utils import testing, niimg_conversions, data_gen from nilearn.image import new_img_like from nilearn.image import threshold_img from nilearn.image import iter_img @@ -45,7 +45,7 @@ def test_high_variance_confounds(): length = 17 n_confounds = 10 - img, mask_img = testing.generate_fake_fmri(shape=shape, length=length) + img, mask_img = data_gen.generate_fake_fmri(shape=shape, length=length) confounds1 = image.high_variance_confounds(img, mask_img=mask_img, percentile=10., @@ -150,10 +150,10 @@ def test_smooth_img(): lengths = (17, 18) fwhm = (1., 2., 3.) - img1, mask1 = testing.generate_fake_fmri(shape=shapes[0], - length=lengths[0]) - img2, mask2 = testing.generate_fake_fmri(shape=shapes[1], - length=lengths[1]) + img1, mask1 = data_gen.generate_fake_fmri(shape=shapes[0], + length=lengths[0]) + img2, mask2 = data_gen.generate_fake_fmri(shape=shapes[1], + length=lengths[1]) for create_files in (False, True): with testing.write_tmp_imgs(img1, img2, @@ -347,7 +347,7 @@ def test_index_img(): [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) - img_4d, _ = testing.generate_fake_fmri(affine=affine) + img_4d, _ = data_gen.generate_fake_fmri(affine=affine) fourth_dim_size = img_4d.shape[3] tested_indices = (list(range(fourth_dim_size)) + @@ -378,7 +378,7 @@ def test_pd_index_img(): [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) - img_4d, _ = testing.generate_fake_fmri(affine=affine) + img_4d, _ = data_gen.generate_fake_fmri(affine=affine) fourth_dim_size = img_4d.shape[3] @@ -405,7 +405,7 @@ def test_iter_img(): [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) - img_4d, _ = testing.generate_fake_fmri(affine=affine) + img_4d, _ = data_gen.generate_fake_fmri(affine=affine) for i, img in enumerate(image.iter_img(img_4d)): expected_data_3d = img_4d.get_data()[..., i] @@ -468,7 +468,7 @@ def test_new_img_like(): def test_validity_threshold_value_in_threshold_img(): shape = (6, 8, 10) - maps, _ = testing.generate_maps(shape, n_regions=2) + maps, _ = data_gen.generate_maps(shape, n_regions=2) # testing to raise same error when threshold=None case testing.assert_raises_regex(ValueError, @@ -487,7 +487,7 @@ def test_validity_threshold_value_in_threshold_img(): def test_threshold_img(): # to check whether passes with valid threshold inputs shape = (10, 20, 30) - maps, _ = testing.generate_maps(shape, n_regions=4) + maps, _ = data_gen.generate_maps(shape, n_regions=4) affine = np.eye(4) mask_img = nibabel.Nifti1Image(np.ones((shape), dtype=np.int8), affine) @@ -502,7 +502,7 @@ def test_threshold_img(): def test_isnan_threshold_img_data(): shape = (10, 10, 10) - maps, _ = testing.generate_maps(shape, n_regions=2) + maps, _ = data_gen.generate_maps(shape, n_regions=2) data = maps.get_data() data[:, :, 0] = np.nan @@ -591,10 +591,10 @@ def test_largest_cc_img(): shapes = ((10, 11, 12), (13, 14, 15)) regions = [1, 3] - img1 = testing.generate_labeled_regions(shape=shapes[0], - n_regions=regions[0]) - img2 = testing.generate_labeled_regions(shape=shapes[1], - n_regions=regions[1]) + img1 = data_gen.generate_labeled_regions(shape=shapes[0], + n_regions=regions[0]) + img2 = data_gen.generate_labeled_regions(shape=shapes[1], + n_regions=regions[1]) for create_files in (False, True): with testing.write_tmp_imgs(img1, img2, @@ -612,7 +612,7 @@ def test_largest_cc_img(): assert_true(out.shape == (shapes[0])) # Test whether 4D Nifti throws the right error. - img_4D = testing.generate_fake_fmri(shapes[0], length=17) + img_4D = data_gen.generate_fake_fmri(shapes[0], length=17) assert_raises(DimensionError, largest_connected_component_img, img_4D) # tests adapted to non-native endian data dtype diff --git a/nilearn/input_data/base_masker.py b/nilearn/input_data/base_masker.py index 618a6ace46..f90b54785f 100644 --- a/nilearn/input_data/base_masker.py +++ b/nilearn/input_data/base_masker.py @@ -26,7 +26,8 @@ def filter_and_extract(imgs, extraction_function, memory_level=0, memory=Memory(cachedir=None), verbose=0, confounds=None, - copy=True): + copy=True, + dtype=None): """Extract representative time series using given function. Parameters @@ -63,7 +64,8 @@ def filter_and_extract(imgs, extraction_function, print("[%s] Loading data from %s" % ( class_name, _utils._repr_niimgs(imgs)[:200])) - imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4) + imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4, + dtype=dtype) sample_mask = parameters.get('sample_mask') if sample_mask is not None: @@ -103,7 +105,6 @@ def filter_and_extract(imgs, extraction_function, # Filtering # Confounds removing (from csv file or numpy array) # Normalizing - if verbose > 0: print("[%s] Cleaning extracted signals" % class_name) sessions = parameters.get('sessions') diff --git a/nilearn/input_data/multi_nifti_masker.py b/nilearn/input_data/multi_nifti_masker.py index 62d5f723f9..f0f5737684 100644 --- a/nilearn/input_data/multi_nifti_masker.py +++ b/nilearn/input_data/multi_nifti_masker.py @@ -67,12 +67,15 @@ class MultiNiftiMasker(NiftiMasker, CacheMixin): This parameter is passed to image.resample_img. Please see the related documentation for details. - mask_strategy: {'background' or 'epi'}, optional + mask_strategy: {'background', 'epi' or 'template'}, optional The strategy used to compute the mask: use 'background' if your - images present a clear homogeneous background, and 'epi' if they - are raw EPI images. Depending on this value, the mask will be - computed from masking.compute_background_mask or - masking.compute_epi_mask. Default is 'background'. + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'background'. mask_args : dict, optional If mask is None, these are additional parameters passed to @@ -80,6 +83,11 @@ class MultiNiftiMasker(NiftiMasker, CacheMixin): to fine-tune mask computation. Please see the related documentation for details. + dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the @@ -116,7 +124,7 @@ def __init__(self, mask_img=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, - mask_strategy='background', mask_args=None, + mask_strategy='background', mask_args=None, dtype=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0 ): @@ -133,6 +141,7 @@ def __init__(self, mask_img=None, smoothing_fwhm=None, self.target_shape = target_shape self.mask_strategy = mask_strategy self.mask_args = mask_args + self.dtype = dtype self.memory = memory self.memory_level = memory_level @@ -176,9 +185,12 @@ def fit(self, imgs=None, y=None): compute_mask = masking.compute_multi_background_mask elif self.mask_strategy == 'epi': compute_mask = masking.compute_multi_epi_mask + elif self.mask_strategy == 'template': + compute_mask = masking.compute_multi_gray_matter_mask else: raise ValueError("Unknown value of mask_strategy '%s'. " - "Acceptable values are 'background' and 'epi'.") + "Acceptable values are 'background', 'epi' " + "and 'template'.") self.mask_img_ = self._cache( compute_mask, ignore=['n_jobs', 'verbose', 'memory'])( @@ -280,6 +292,7 @@ def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1): verbose=self.verbose, confounds=cfs, copy=copy, + dtype=self.dtype ) for imgs, cfs in izip(niimg_iter, confounds)) return data diff --git a/nilearn/input_data/nifti_labels_masker.py b/nilearn/input_data/nifti_labels_masker.py index 19d421a22e..77e104c841 100644 --- a/nilearn/input_data/nifti_labels_masker.py +++ b/nilearn/input_data/nifti_labels_masker.py @@ -76,6 +76,11 @@ class NiftiLabelsMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details + dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + resampling_target: {"data", "labels", None}, optional. Gives which image gives the final shape/size. For example, if `resampling_target` is "data", the atlas is resampled to the @@ -104,7 +109,7 @@ class NiftiLabelsMasker(BaseMasker, CacheMixin): def __init__(self, labels_img, background_label=0, mask_img=None, smoothing_fwhm=None, standardize=False, detrend=False, - low_pass=None, high_pass=None, t_r=None, + low_pass=None, high_pass=None, t_r=None, dtype=None, resampling_target="data", memory=Memory(cachedir=None, verbose=0), memory_level=1, verbose=0): @@ -121,6 +126,7 @@ def __init__(self, labels_img, background_label=0, mask_img=None, self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r + self.dtype = dtype # Parameters for resampling self.resampling_target = resampling_target @@ -253,6 +259,7 @@ def transform_single_imgs(self, imgs, confounds=None): # Pre-processing params, confounds=confounds, + dtype=self.dtype, # Caching memory=self.memory, memory_level=self.memory_level, @@ -285,5 +292,5 @@ def inverse_transform(self, signals): logger.log("computing image from signals", verbose=self.verbose) return signal_extraction.signals_to_img_labels( - signals, self.labels_img_, self.mask_img_, + signals, self._resampled_labels_img_, self.mask_img_, background_label=self.background_label) diff --git a/nilearn/input_data/nifti_maps_masker.py b/nilearn/input_data/nifti_maps_masker.py index 50f58c0211..0cb57567f9 100644 --- a/nilearn/input_data/nifti_maps_masker.py +++ b/nilearn/input_data/nifti_maps_masker.py @@ -78,6 +78,11 @@ class NiftiMapsMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details + dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + resampling_target: {"mask", "maps", "data", None} optional. Gives which image gives the final shape/size. For example, if `resampling_target` is "mask" then maps_img and images provided to @@ -113,7 +118,7 @@ class NiftiMapsMasker(BaseMasker, CacheMixin): def __init__(self, maps_img, mask_img=None, allow_overlap=True, smoothing_fwhm=None, standardize=False, detrend=False, - low_pass=None, high_pass=None, t_r=None, + low_pass=None, high_pass=None, t_r=None, dtype=None, resampling_target="data", memory=Memory(cachedir=None, verbose=0), memory_level=0, verbose=0): @@ -132,6 +137,7 @@ def __init__(self, maps_img, mask_img=None, self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r + self.dtype = dtype # Parameters for resampling self.resampling_target = resampling_target @@ -161,7 +167,7 @@ def fit(self, X=None, y=None): _utils._repr_niimgs(self.maps_img)[:200], verbose=self.verbose) - self.maps_img_ = _utils.check_niimg_4d(self.maps_img) + self.maps_img_ = _utils.check_niimg_4d(self.maps_img, dtype=self.dtype) self.maps_img_ = image.clean_img(self.maps_img_, detrend=False, standardize=False, ensure_finite=True) @@ -312,6 +318,7 @@ def transform_single_imgs(self, imgs, confounds=None): # Pre-treatments params, confounds=confounds, + dtype=self.dtype, # Caching memory=self.memory, memory_level=self.memory_level, diff --git a/nilearn/input_data/nifti_masker.py b/nilearn/input_data/nifti_masker.py index 18c40f018a..aa579e972a 100644 --- a/nilearn/input_data/nifti_masker.py +++ b/nilearn/input_data/nifti_masker.py @@ -24,14 +24,16 @@ def __init__(self, mask_img_): self.mask_img_ = mask_img_ def __call__(self, imgs): - return masking.apply_mask(imgs, self.mask_img_), imgs.affine + return(masking.apply_mask(imgs, self.mask_img_, + dtype=imgs.get_data_dtype()), imgs.affine) def filter_and_mask(imgs, mask_img_, parameters, memory_level=0, memory=Memory(cachedir=None), verbose=0, confounds=None, - copy=True): + copy=True, + dtype=None): imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4) # Check whether resampling is truly necessary. If so, crop mask @@ -49,7 +51,8 @@ def filter_and_mask(imgs, mask_img_, parameters, memory_level=memory_level, memory=memory, verbose=verbose, - confounds=confounds, copy=copy) + confounds=confounds, copy=copy, + dtype=dtype) # For _later_: missing value removal or imputing of missing data # (i.e. we want to get rid of NaNs, if smoothing must be done @@ -110,12 +113,15 @@ class NiftiMasker(BaseMasker, CacheMixin): This parameter is passed to image.resample_img. Please see the related documentation for details. - mask_strategy: {'background' or 'epi'}, optional + mask_strategy: {'background', 'epi' or 'template'}, optional The strategy used to compute the mask: use 'background' if your - images present a clear homogeneous background, and 'epi' if they - are raw EPI images. Depending on this value, the mask will be - computed from masking.compute_background_mask or - masking.compute_epi_mask. Default is 'background'. + images present a clear homogeneous background, 'epi' if they + are raw EPI images, or you could use 'template' which will + extract the gray matter part of your data by resampling the MNI152 + brain mask for your data's field of view. + Depending on this value, the mask will be computed from + masking.compute_background_mask, masking.compute_epi_mask or + masking.compute_gray_matter_mask. Default is 'background'. mask_args : dict, optional If mask is None, these are additional parameters passed to @@ -130,6 +136,11 @@ class NiftiMasker(BaseMasker, CacheMixin): This is useful to perform data subselection as part of a scikit-learn pipeline. + `dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + memory : instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the @@ -164,7 +175,7 @@ def __init__(self, mask_img=None, sessions=None, smoothing_fwhm=None, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='background', - mask_args=None, sample_mask=None, + mask_args=None, sample_mask=None, dtype=None, memory_level=1, memory=Memory(cachedir=None), verbose=0 ): @@ -183,6 +194,7 @@ def __init__(self, mask_img=None, sessions=None, smoothing_fwhm=None, self.mask_strategy = mask_strategy self.mask_args = mask_args self.sample_mask = sample_mask + self.dtype = dtype self.memory = memory self.memory_level = memory_level @@ -222,10 +234,12 @@ def fit(self, imgs=None, y=None): compute_mask = masking.compute_background_mask elif self.mask_strategy == 'epi': compute_mask = masking.compute_epi_mask + elif self.mask_strategy == 'template': + compute_mask = masking.compute_gray_matter_mask else: raise ValueError("Unknown value of mask_strategy '%s'. " - "Acceptable values are 'background' and " - "'epi'." % self.mask_strategy) + "Acceptable values are 'background', " + "'epi' and 'template'." % self.mask_strategy) if self.verbose > 0: print("[%s.fit] Computing the mask" % self.__class__.__name__) self.mask_img_ = self._cache(compute_mask, ignore=['verbose'])( @@ -289,7 +303,8 @@ def transform_single_imgs(self, imgs, confounds=None, copy=True): memory=self.memory, verbose=self.verbose, confounds=confounds, - copy=copy + copy=copy, + dtype=self.dtype ) return data diff --git a/nilearn/input_data/nifti_spheres_masker.py b/nilearn/input_data/nifti_spheres_masker.py index ff156b5632..96d163fa60 100644 --- a/nilearn/input_data/nifti_spheres_masker.py +++ b/nilearn/input_data/nifti_spheres_masker.py @@ -55,12 +55,6 @@ def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap, mask_coords[2], affine) mask_coords = np.asarray(mask_coords).T - if (radius is not None and - LooseVersion(sklearn.__version__) < LooseVersion('0.16')): - # Fix for scikit learn versions below 0.16. See - # https://github.com/scikit-learn/scikit-learn/issues/4072 - radius += 1e-6 - clf = neighbors.NearestNeighbors(radius=radius) A = clf.fit(mask_coords).radius_neighbors_graph(seeds) A = A.tolil() @@ -119,22 +113,23 @@ class _ExtractionFunctor(object): func_name = 'nifti_spheres_masker_extractor' - def __init__(self, seeds_, radius, mask_img, allow_overlap): + def __init__(self, seeds_, radius, mask_img, allow_overlap, dtype): self.seeds_ = seeds_ self.radius = radius self.mask_img = mask_img self.allow_overlap = allow_overlap + self.dtype = dtype def __call__(self, imgs): n_seeds = len(self.seeds_) - imgs = check_niimg_4d(imgs) + imgs = check_niimg_4d(imgs, dtype=self.dtype) - signals = np.empty((imgs.shape[3], n_seeds)) + signals = np.empty((imgs.shape[3], n_seeds), + dtype=imgs.get_data_dtype()) for i, sphere in enumerate(_iter_signals_from_spheres( self.seeds_, imgs, self.radius, self.allow_overlap, mask_img=self.mask_img)): signals[:, i] = np.mean(sphere, axis=1) - return signals, None @@ -187,6 +182,11 @@ class NiftiSpheresMasker(BaseMasker, CacheMixin): This parameter is passed to signal.clean. Please see the related documentation for details. + dtype: {dtype, "auto"} + Data type toward which the data should be converted. If "auto", the + data will be converted to int32 if dtype is discrete and float32 if it + is continuous. + memory: joblib.Memory or str, optional Used to cache the region extraction process. By default, no caching is done. If a string is given, it is the @@ -207,7 +207,7 @@ class NiftiSpheresMasker(BaseMasker, CacheMixin): def __init__(self, seeds, radius=None, mask_img=None, allow_overlap=False, smoothing_fwhm=None, standardize=False, detrend=False, - low_pass=None, high_pass=None, t_r=None, + low_pass=None, high_pass=None, t_r=None, dtype=None, memory=Memory(cachedir=None, verbose=0), memory_level=1, verbose=0): self.seeds = seeds @@ -224,6 +224,7 @@ def __init__(self, seeds, radius=None, mask_img=None, allow_overlap=False, self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r + self.dtype = dtype # Parameters for joblib self.memory = memory @@ -310,10 +311,11 @@ def transform_single_imgs(self, imgs, confounds=None): ignore=['verbose', 'memory', 'memory_level'])( # Images imgs, _ExtractionFunctor(self.seeds_, self.radius, self.mask_img, - self.allow_overlap), + self.allow_overlap, self.dtype), # Pre-processing params, confounds=confounds, + dtype=self.dtype, # Caching memory=self.memory, memory_level=self.memory_level, diff --git a/nilearn/input_data/tests/test_multi_nifti_masker.py b/nilearn/input_data/tests/test_multi_nifti_masker.py index b0fba4a00e..620864d10c 100644 --- a/nilearn/input_data/tests/test_multi_nifti_masker.py +++ b/nilearn/input_data/tests/test_multi_nifti_masker.py @@ -162,3 +162,37 @@ def test_shelving(): # enables to delete "filename" on windows del masker shutil.rmtree(cachedir, ignore_errors=True) + + +def test_compute_multi_gray_matter_mask(): + # Check mask is correctly is correctly calculated + imgs = [Nifti1Image(np.random.rand(9, 9, 5), np.eye(4)), + Nifti1Image(np.random.rand(9, 9, 5), np.eye(4))] + + masker = MultiNiftiMasker(mask_strategy='template') + masker.fit(imgs) + + # Check that the order of the images does not change the output + masker2 = MultiNiftiMasker(mask_strategy='template') + masker2.fit(imgs[::-1]) + + mask = masker.mask_img_ + mask2 = masker2.mask_img_ + + mask_ref = np.zeros((9, 9, 5)) + mask_ref[2:7, 2:7, 2] = 1 + + np.testing.assert_array_equal(mask.get_data(), mask_ref) + np.testing.assert_array_equal(mask2.get_data(), mask_ref) + + +def test_dtype(): + data = np.zeros((9, 9, 9), dtype=np.float64) + data[2:-2, 2:-2, 2:-2] = 10 + img = Nifti1Image(data, np.eye(4)) + + masker = MultiNiftiMasker(dtype='auto') + masker.fit([[img]]) + + masked_img = masker.transform([[img]]) + assert(masked_img[0].dtype == np.float32) diff --git a/nilearn/input_data/tests/test_nifti_labels_masker.py b/nilearn/input_data/tests/test_nifti_labels_masker.py index 6a59b44276..18e7148e3b 100644 --- a/nilearn/input_data/tests/test_nifti_labels_masker.py +++ b/nilearn/input_data/tests/test_nifti_labels_masker.py @@ -11,7 +11,7 @@ import nibabel from nilearn.input_data.nifti_labels_masker import NiftiLabelsMasker -from nilearn._utils import testing, as_ndarray +from nilearn._utils import testing, as_ndarray, data_gen from nilearn._utils.exceptions import DimensionError from nilearn._utils.testing import assert_less @@ -41,8 +41,8 @@ def test_nifti_labels_masker(): fmri21_img, mask21_img = generate_random_img(shape2, affine=affine1, length=length) - labels11_img = testing.generate_labeled_regions(shape1, affine=affine1, - n_regions=n_regions) + labels11_img = data_gen.generate_labeled_regions(shape1, affine=affine1, + n_regions=n_regions) mask_img_4d = nibabel.Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) @@ -109,9 +109,9 @@ def test_nifti_labels_masker_with_nans_and_infs(): n_regions = 9 fmri_img, mask_img = generate_random_img((13, 11, 12), affine=np.eye(4), length=length) - labels_img = testing.generate_labeled_regions((13, 11, 12), - affine=np.eye(4), - n_regions=n_regions) + labels_img = data_gen.generate_labeled_regions((13, 11, 12), + affine=np.eye(4), + n_regions=n_regions) # nans mask_data = mask_img.get_data() mask_data[:, :, 7] = np.nan @@ -144,8 +144,8 @@ def test_nifti_labels_masker_resampling(): _, mask22_img = generate_random_img(shape2, affine=affine, length=length) - labels33_img = testing.generate_labeled_regions(shape3, n_regions, - affine=affine) + labels33_img = data_gen.generate_labeled_regions(shape3, n_regions, + affine=affine) # Test error checking assert_raises(ValueError, NiftiLabelsMasker, labels33_img, @@ -191,8 +191,8 @@ def test_nifti_labels_masker_resampling(): length=length) # Target: labels - labels33_img = testing.generate_labeled_regions(shape3, n_regions, - affine=affine) + labels33_img = data_gen.generate_labeled_regions(shape3, n_regions, + affine=affine) masker = NiftiLabelsMasker(labels33_img, mask_img=mask22_img, resampling_target="labels") @@ -239,3 +239,30 @@ def test_nifti_labels_masker_resampling(): with testing.write_tmp_imgs(fmri22_img) as filename: masker = NiftiLabelsMasker(labels33_img, resampling_target='data') masker.fit_transform(filename) + + # test labels masker with resampling target in 'data', 'labels' to return + # resampled labels having number of labels equal with transformed shape of + # 2nd dimension. This tests are added based on issue #1673 in Nilearn + shape = (13, 11, 12) + affine = np.eye(4) * 2 + + fmri_img, _ = generate_random_img(shape, affine=affine, length=21) + labels_img = data_gen.generate_labeled_regions((9, 8, 6), affine=np.eye(4), + n_regions=10) + for resampling_target in ['data', 'labels']: + masker = NiftiLabelsMasker(labels_img=labels_img, + resampling_target=resampling_target) + transformed = masker.fit_transform(fmri_img) + resampled_labels_img = masker._resampled_labels_img_ + n_resampled_labels = len(np.unique(resampled_labels_img.get_data())) + assert_equal(n_resampled_labels - 1, transformed.shape[1]) + # inverse transform + compressed_img = masker.inverse_transform(transformed) + + # Test that compressing the image a second time should yield an image + # with the same data as compressed_img. + transformed2 = masker.fit_transform(fmri_img) + # inverse transform again + compressed_img2 = masker.inverse_transform(transformed2) + np.testing.assert_array_equal(compressed_img.get_data(), + compressed_img2.get_data()) diff --git a/nilearn/input_data/tests/test_nifti_maps_masker.py b/nilearn/input_data/tests/test_nifti_maps_masker.py index aae44feeee..82c09b9b76 100644 --- a/nilearn/input_data/tests/test_nifti_maps_masker.py +++ b/nilearn/input_data/tests/test_nifti_maps_masker.py @@ -11,7 +11,7 @@ import nibabel from nilearn.input_data.nifti_maps_masker import NiftiMapsMasker -from nilearn._utils import testing, as_ndarray +from nilearn._utils import testing, as_ndarray, data_gen from nilearn._utils.exceptions import DimensionError from nilearn._utils.testing import assert_less, assert_raises_regex @@ -42,7 +42,7 @@ def test_nifti_maps_masker(): length=length) labels11_img, labels_mask_img = \ - testing.generate_maps(shape1, n_regions, affine=affine1) + data_gen.generate_maps(shape1, n_regions, affine=affine1) # No exception raised here for create_files in (True, False): @@ -124,8 +124,8 @@ def test_nifti_maps_masker_with_nans(): n_regions = 8 fmri_img, mask_img = generate_random_img((13, 11, 12), affine=np.eye(4), length=length) - maps_img, maps_mask_img = testing.generate_maps((13, 11, 12), n_regions, - affine=np.eye(4)) + maps_img, maps_mask_img = data_gen.generate_maps((13, 11, 12), n_regions, + affine=np.eye(4)) # nans maps_data = maps_img.get_data() @@ -162,7 +162,7 @@ def test_nifti_maps_masker_2(): length=length) maps33_img, _ = \ - testing.generate_maps(shape3, n_regions, affine=affine) + data_gen.generate_maps(shape3, n_regions, affine=affine) mask_img_4d = nibabel.Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) @@ -235,11 +235,11 @@ def test_nifti_maps_masker_2(): length = 21 fmri11_img, _ = generate_random_img(shape1, affine=affine1, length=length) - _, mask22_img = testing.generate_fake_fmri(shape2, length=1, - affine=affine2) + _, mask22_img = data_gen.generate_fake_fmri(shape2, length=1, + affine=affine2) # Target: maps maps33_img, _ = \ - testing.generate_maps(shape3, n_regions, affine=affine1) + data_gen.generate_maps(shape3, n_regions, affine=affine1) masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="maps") diff --git a/nilearn/input_data/tests/test_nifti_masker.py b/nilearn/input_data/tests/test_nifti_masker.py index 496f4bd951..fd223e4388 100644 --- a/nilearn/input_data/tests/test_nifti_masker.py +++ b/nilearn/input_data/tests/test_nifti_masker.py @@ -19,6 +19,7 @@ from numpy.testing import assert_array_equal, assert_equal from nilearn._utils import testing +from nilearn._utils import data_gen from nilearn._utils.class_inspect import get_params from nilearn._utils.exceptions import DimensionError from nilearn._utils.testing import assert_raises_regex @@ -100,7 +101,7 @@ def test_matrix_orientation(): # the "step" kind generate heavyside-like signals for each voxel. # all signals being identical, standardizing along the wrong axis # would leave a null signal. Along the correct axis, the step remains. - fmri, mask = testing.generate_fake_fmri(shape=(40, 41, 42), kind="step") + fmri, mask = data_gen.generate_fake_fmri(shape=(40, 41, 42), kind="step") masker = NiftiMasker(mask_img=mask, standardize=True, detrend=True) timeseries = masker.fit_transform(fmri) assert(timeseries.shape[0] == fmri.shape[3]) @@ -300,6 +301,30 @@ def test_compute_epi_mask(): mask4.get_data()[3:12, 3:12])) +def test_compute_gray_matter_mask(): + # Check masker for template masking strategy + + img = np.random.rand(9, 9, 5) + img = Nifti1Image(img, np.eye(4)) + + masker = NiftiMasker(mask_strategy='template') + + masker.fit(img) + mask1 = masker.mask_img_ + + masker2 = NiftiMasker(mask_strategy='template', + mask_args=dict(threshold=0.)) + + masker2.fit(img) + mask2 = masker2.mask_img_ + + mask_ref = np.zeros((9, 9, 5)) + mask_ref[2:7, 2:7, 2] = 1 + + np.testing.assert_array_equal(mask1.get_data(), mask_ref) + np.testing.assert_array_equal(mask2.get_data(), mask_ref) + + def test_filter_and_mask_error(): data = np.zeros([20, 30, 40, 5]) mask = np.zeros([20, 30, 40, 2]) @@ -332,3 +357,24 @@ def test_filter_and_mask(): # Test return_affine = False data = filter_and_mask(data_img, mask_img, params) assert_equal(data.shape, (5, 24000)) + + +def test_dtype(): + data_32 = np.zeros((9, 9, 9), dtype=np.float32) + data_64 = np.zeros((9, 9, 9), dtype=np.float64) + data_32[2:-2, 2:-2, 2:-2] = 10 + data_64[2:-2, 2:-2, 2:-2] = 10 + + affine_32 = np.eye(4, dtype=np.float32) + affine_64 = np.eye(4, dtype=np.float64) + + img_32 = Nifti1Image(data_32, affine_32) + img_64 = Nifti1Image(data_64, affine_64) + + masker_1 = NiftiMasker(dtype='auto') + assert(masker_1.fit_transform(img_32).dtype == np.float32) + assert(masker_1.fit_transform(img_64).dtype == np.float32) + + masker_2 = NiftiMasker(dtype='float64') + assert(masker_2.fit_transform(img_32).dtype == np.float64) + assert(masker_2.fit_transform(img_64).dtype == np.float64) diff --git a/nilearn/masking.py b/nilearn/masking.py index 92f4ff02d0..3a31557c57 100644 --- a/nilearn/masking.py +++ b/nilearn/masking.py @@ -82,8 +82,9 @@ def _extrapolate_out_mask(data, mask, iterations=1): outer_shell[1:-1, 1:-1, 1:-1] = np.logical_xor(new_mask, mask) outer_shell_x, outer_shell_y, outer_shell_z = np.where(outer_shell) extrapolation = list() - for i, j, k in [(0, 1, 0), (0, -1, 0), (1, 0, 0), (-1, 0, 0), - (1, 0, 0), (-1, 0, 0)]: + for i, j, k in [(1, 0, 0), (-1, 0, 0), + (0, 1, 0), (0, -1, 0), + (0, 0, 1), (0, 0, -1)]: this_x = outer_shell_x + i this_y = outer_shell_y + j this_z = outer_shell_z + k @@ -321,7 +322,7 @@ def compute_multi_epi_mask(epi_imgs, lower_cutoff=0.2, upper_cutoff=0.85, upper_cutoff: float, optional upper fraction of the histogram to be discarded. - connected: boolean, optional + connected: bool, optional if connected is True, only the largest connect component is kept. exclude_zeros: boolean, optional @@ -473,7 +474,7 @@ def compute_multi_background_mask(data_imgs, border_size=2, upper_cutoff=0.85, The size, in voxel of the border used on the side of the image to determine the value of the background. - connected: boolean, optional + connected: bool, optional if connected is True, only the largest connect component is kept. target_affine: 3x3 or 4x4 matrix, optional @@ -513,6 +514,147 @@ def compute_multi_background_mask(data_imgs, border_size=2, upper_cutoff=0.85, return mask +def compute_gray_matter_mask(target_img, threshold=.5, + connected=True, opening=2, memory=None, + verbose=0): + """ Compute a mask corresponding to the gray matter part of the brain. + The gray matter part is calculated through the resampling of MNI152 + template gray matter mask onto the target image + + Parameters + ---------- + target_img: Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + Images used to compute the mask. 3D and 4D images are accepted. + Only the shape and affine of target_img will be used here. + + threshold: float, optional + The value under which the MNI template is cut off. + Default value is 0.5 + + connected: bool, optional + if connected is True, only the largest connected component is kept. + Default is True + + opening: bool or int, optional + if opening is True, a morphological opening is performed, to keep + only large structures. + If opening is an integer `n`, it is performed via `n` erosions. + After estimation of the largest connected constituent, 2`n` closing + operations are performed followed by `n` erosions. This corresponds + to 1 opening operation of order `n` followed by a closing operator + of order `n`. + + memory: instance of joblib.Memory or str + Used to cache the function call. + + verbose: int, optional + Controls the amount of verbosity: higher numbers give + more messages + + Returns + ------- + mask: nibabel.Nifti1Image + The brain mask (3D image) + """ + if verbose > 0: + print("Template mask computation") + + target_img = _utils.check_niimg(target_img) + + from .datasets import load_mni152_brain_mask + template = load_mni152_brain_mask() + dtype = target_img.get_data_dtype() + template = new_img_like(template, + template.get_data().astype(dtype)) + + from .image.resampling import resample_to_img + resampled_template = cache(resample_to_img, memory)(template, target_img) + + mask = resampled_template.get_data() >= threshold + + mask, affine = _post_process_mask(mask, target_img.affine, opening=opening, + connected=connected, + warning_msg="Gray matter mask is empty, " + "lower the threshold or " + "check your input FOV") + + return new_img_like(target_img, mask, affine) + + +def compute_multi_gray_matter_mask(target_imgs, threshold=.5, + connected=True, opening=2, + memory=None, verbose=0, n_jobs=1, **kwargs): + """ Compute a mask corresponding to the gray matter part of the brain for + a list of images. + The gray matter part is calculated through the resampling of MNI152 + template gray matter mask onto the target image + + Parameters + ---------- + target_imgs: list of Niimg-like object + See http://nilearn.github.io/manipulating_images/input_output.html + Images used to compute the mask. 3D and 4D images are accepted. + The images in this list must be of same shape and affine. The mask is + calculated with the first element of the list for only the shape/affine + of the image is used for this masking strategy + + threshold: float, optional + The value under which the MNI template is cut off. + Default value is 0.5 + + connected: bool, optional + if connected is True, only the largest connect component is kept. + Default is True + + opening: bool or int, optional + if opening is True, a morphological opening is performed, to keep + only large structures. + If opening is an integer `n`, it is performed via `n` erosions. + After estimation of the largest connected constituent, 2`n` closing + operations are performed followed by `n` erosions. This corresponds + to 1 opening operation of order `n` followed by a closing operator + of order `n`. + + memory: instance of joblib.Memory or str + Used to cache the function call. + + n_jobs: integer, optional + Argument not used but kept to fit the API + + **kwargs: optional arguments + arguments such as 'target_affine' are used in the call of other + masking strategies, which then would raise an error for this function + which does not need such arguments. + + verbose: int, optional + Controls the amount of verbosity: higher numbers give + more messages + + Returns + ------- + mask: nibabel.Nifti1Image + The brain mask (3D image) + + See also + -------- + nilearn.masking.compute_gray_matter_mask + """ + if len(target_imgs) == 0: + raise TypeError('An empty object - %r - was passed instead of an ' + 'image or a list of images' % target_imgs) + + # Check images in the list have the same FOV without loading them in memory + imgs_generator = _utils.check_niimg(target_imgs, return_iterator=True) + for _ in imgs_generator: + pass + + mask = compute_gray_matter_mask(target_imgs[0], threshold=threshold, + connected=connected, opening=opening, + memory=memory, verbose=verbose) + return mask + + # # Time series extraction # diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py index f7b9fd3d58..bb238b47aa 100644 --- a/nilearn/plotting/__init__.py +++ b/nilearn/plotting/__init__.py @@ -34,25 +34,24 @@ def _set_mpl_backend(): _set_mpl_backend() ############################################################################### -import matplotlib -from distutils.version import LooseVersion - from . import cm from .img_plotting import plot_img, plot_anat, plot_epi, \ plot_roi, plot_stat_map, plot_glass_brain, plot_connectome, \ plot_prob_atlas, show -from .find_cuts import find_xyz_cut_coords, find_cut_slices +from .find_cuts import find_xyz_cut_coords, find_cut_slices, \ + find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords from .matrix_plotting import plot_matrix +from .html_surface import view_surf, view_img_on_surf +from .html_stat_map import view_stat_map +from .html_connectome import view_connectome, view_markers +from .surf_plotting import plot_surf, plot_surf_stat_map, plot_surf_roi + __all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi', 'plot_roi', 'plot_stat_map', 'plot_glass_brain', 'plot_connectome', 'plot_prob_atlas', 'find_xyz_cut_coords', 'find_cut_slices', - 'show', 'plot_matrix'] - -# matplotlib older versions 1.1.1 will not work to plot surface data using -# plotting functions from surf_plotting.py. Hence we check the version and -# import them only if we have recent versions. - -if LooseVersion(matplotlib.__version__) > LooseVersion('1.3.1'): - from .surf_plotting import plot_surf, plot_surf_stat_map, plot_surf_roi - __all__.extend(['plot_surf', 'plot_surf_stat_map', 'plot_surf_roi']) + 'show', 'plot_matrix', 'view_surf', 'view_img_on_surf', + 'view_stat_map', 'view_connectome', 'view_markers', + 'find_parcellation_cut_coords', 'find_probabilistic_atlas_cut_coords', + 'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi', + ] diff --git a/nilearn/plotting/cm.py b/nilearn/plotting/cm.py index 9c68ce6a96..1c989f4caa 100644 --- a/nilearn/plotting/cm.py +++ b/nilearn/plotting/cm.py @@ -174,6 +174,29 @@ def alpha_cmap(color, name='', alpha_min=0.5, alpha_max=1.): color, alpha_min=0, alpha_max=1, name=name) +############################################################################### +# HCP Connectome Workbench colormaps +# As seen in https://github.com/Washington-University/workbench src/Pallete +roy_big_bl = _np.array([(255, 255, 0), (255, 200, 0), + (255, 120, 0), (255, 0, 0), + (200, 0, 0), (150, 0, 0), + (100, 0, 0), (60, 0, 0), + (0, 0, 0), (0, 0, 80), + (0, 0, 170), (75, 0, 125), + (125, 0, 160), (75, 125, 0), + (0, 200, 0), (0, 255, 0), + (0, 255, 255), (0, 255, 255)][::-1]) / 255 + +videen_style = ['#000000', '#bbbbbb', '#dddddd', '#ffffff', + '#ff388d', '#e251e2', '#10b010', '#00ff00', + '#00ffff', '#000000', '#660033', '#33334c', + '#4c4c7f', '#7f7fcc', '#00ff00', '#10b010', + '#ffff00', '#ff9900', '#ff6900', '#ff0000'] + +_cmap_d['roy_big_bl'] = _colors.LinearSegmentedColormap.from_list( + 'roy_big_bl', roy_big_bl.tolist()) +_cmap_d['videen_style'] = _colors.LinearSegmentedColormap.from_list( + 'videen_style', videen_style) # Save colormaps in the scope of the module locals().update(_cmap_d) diff --git a/nilearn/plotting/data/README.txt b/nilearn/plotting/data/README.txt new file mode 100644 index 0000000000..ddad159b27 --- /dev/null +++ b/nilearn/plotting/data/README.txt @@ -0,0 +1,8 @@ +This directory contains files required for javascript plots. + +html/ : templates for HTML files + +js/ + surface-plot-utils.js : helpers for nilearn plots + plotly-gl3d-latest.min.js : plotly library (https://plot.ly/javascript/getting-started/) + jquery.min.js : jquery library (https://jquery.com/) diff --git a/nilearn/plotting/data/__init__.py b/nilearn/plotting/data/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nilearn/plotting/data/html/__init__.py b/nilearn/plotting/data/html/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nilearn/plotting/data/html/connectome_plot_template.html b/nilearn/plotting/data/html/connectome_plot_template.html new file mode 100644 index 0000000000..9b346219fe --- /dev/null +++ b/nilearn/plotting/data/html/connectome_plot_template.html @@ -0,0 +1,162 @@ + + + + + connectome plot + INSERT_JS_LIBRARIES_HERE + + + + + + +
    + + + + + + diff --git a/nilearn/plotting/data/html/stat_map_template.html b/nilearn/plotting/data/html/stat_map_template.html new file mode 100644 index 0000000000..efa437951a --- /dev/null +++ b/nilearn/plotting/data/html/stat_map_template.html @@ -0,0 +1,123 @@ + + + + + + + + + + + + + + + + + Brain image + + + + + + + +
    + + + diff --git a/nilearn/plotting/data/html/surface_plot_template.html b/nilearn/plotting/data/html/surface_plot_template.html new file mode 100644 index 0000000000..2544fd6e4f --- /dev/null +++ b/nilearn/plotting/data/html/surface_plot_template.html @@ -0,0 +1,94 @@ + + + + + surface plot + + INSERT_JS_LIBRARIES_HERE + + + + + + +
    + + + + + + + + + + diff --git a/nilearn/plotting/data/js/__init__.py b/nilearn/plotting/data/js/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nilearn/plotting/data/js/jquery.min.js b/nilearn/plotting/data/js/jquery.min.js new file mode 100644 index 0000000000..4d9b3a2587 --- /dev/null +++ b/nilearn/plotting/data/js/jquery.min.js @@ -0,0 +1,2 @@ +/*! jQuery v3.3.1 | (c) JS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(e,t){"use strict";var n=[],r=e.document,i=Object.getPrototypeOf,o=n.slice,a=n.concat,s=n.push,u=n.indexOf,l={},c=l.toString,f=l.hasOwnProperty,p=f.toString,d=p.call(Object),h={},g=function e(t){return"function"==typeof t&&"number"!=typeof t.nodeType},y=function e(t){return null!=t&&t===t.window},v={type:!0,src:!0,noModule:!0};function m(e,t,n){var i,o=(t=t||r).createElement("script");if(o.text=e,n)for(i in v)n[i]&&(o[i]=n[i]);t.head.appendChild(o).parentNode.removeChild(o)}function x(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[c.call(e)]||"object":typeof e}var b="3.3.1",w=function(e,t){return new w.fn.init(e,t)},T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;w.fn=w.prototype={jquery:"3.3.1",constructor:w,length:0,toArray:function(){return o.call(this)},get:function(e){return null==e?o.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=w.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return w.each(this,e)},map:function(e){return this.pushStack(w.map(this,function(t,n){return e.call(t,n,t)}))},slice:function(){return this.pushStack(o.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(n>=0&&n0&&t-1 in e)}var E=function(e){var t,n,r,i,o,a,s,u,l,c,f,p,d,h,g,y,v,m,x,b="sizzle"+1*new Date,w=e.document,T=0,C=0,E=ae(),k=ae(),S=ae(),D=function(e,t){return e===t&&(f=!0),0},N={}.hasOwnProperty,A=[],j=A.pop,q=A.push,L=A.push,H=A.slice,O=function(e,t){for(var n=0,r=e.length;n+~]|"+M+")"+M+"*"),z=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),X=new RegExp(W),U=new RegExp("^"+R+"$"),V={ID:new RegExp("^#("+R+")"),CLASS:new RegExp("^\\.("+R+")"),TAG:new RegExp("^("+R+"|[*])"),ATTR:new RegExp("^"+I),PSEUDO:new RegExp("^"+W),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+P+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},G=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Q=/^[^{]+\{\s*\[native \w/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,K=/[+~]/,Z=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ee=function(e,t,n){var r="0x"+t-65536;return r!==r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},te=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ne=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},re=function(){p()},ie=me(function(e){return!0===e.disabled&&("form"in e||"label"in e)},{dir:"parentNode",next:"legend"});try{L.apply(A=H.call(w.childNodes),w.childNodes),A[w.childNodes.length].nodeType}catch(e){L={apply:A.length?function(e,t){q.apply(e,H.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function oe(e,t,r,i){var o,s,l,c,f,h,v,m=t&&t.ownerDocument,T=t?t.nodeType:9;if(r=r||[],"string"!=typeof e||!e||1!==T&&9!==T&&11!==T)return r;if(!i&&((t?t.ownerDocument||t:w)!==d&&p(t),t=t||d,g)){if(11!==T&&(f=J.exec(e)))if(o=f[1]){if(9===T){if(!(l=t.getElementById(o)))return r;if(l.id===o)return r.push(l),r}else if(m&&(l=m.getElementById(o))&&x(t,l)&&l.id===o)return r.push(l),r}else{if(f[2])return L.apply(r,t.getElementsByTagName(e)),r;if((o=f[3])&&n.getElementsByClassName&&t.getElementsByClassName)return L.apply(r,t.getElementsByClassName(o)),r}if(n.qsa&&!S[e+" "]&&(!y||!y.test(e))){if(1!==T)m=t,v=e;else if("object"!==t.nodeName.toLowerCase()){(c=t.getAttribute("id"))?c=c.replace(te,ne):t.setAttribute("id",c=b),s=(h=a(e)).length;while(s--)h[s]="#"+c+" "+ve(h[s]);v=h.join(","),m=K.test(e)&&ge(t.parentNode)||t}if(v)try{return L.apply(r,m.querySelectorAll(v)),r}catch(e){}finally{c===b&&t.removeAttribute("id")}}}return u(e.replace(B,"$1"),t,r,i)}function ae(){var e=[];function t(n,i){return e.push(n+" ")>r.cacheLength&&delete t[e.shift()],t[n+" "]=i}return t}function se(e){return e[b]=!0,e}function ue(e){var t=d.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function le(e,t){var n=e.split("|"),i=n.length;while(i--)r.attrHandle[n[i]]=t}function ce(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function fe(e){return function(t){return"input"===t.nodeName.toLowerCase()&&t.type===e}}function pe(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function de(e){return function(t){return"form"in t?t.parentNode&&!1===t.disabled?"label"in t?"label"in t.parentNode?t.parentNode.disabled===e:t.disabled===e:t.isDisabled===e||t.isDisabled!==!e&&ie(t)===e:t.disabled===e:"label"in t&&t.disabled===e}}function he(e){return se(function(t){return t=+t,se(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}function ge(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}n=oe.support={},o=oe.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return!!t&&"HTML"!==t.nodeName},p=oe.setDocument=function(e){var t,i,a=e?e.ownerDocument||e:w;return a!==d&&9===a.nodeType&&a.documentElement?(d=a,h=d.documentElement,g=!o(d),w!==d&&(i=d.defaultView)&&i.top!==i&&(i.addEventListener?i.addEventListener("unload",re,!1):i.attachEvent&&i.attachEvent("onunload",re)),n.attributes=ue(function(e){return e.className="i",!e.getAttribute("className")}),n.getElementsByTagName=ue(function(e){return e.appendChild(d.createComment("")),!e.getElementsByTagName("*").length}),n.getElementsByClassName=Q.test(d.getElementsByClassName),n.getById=ue(function(e){return h.appendChild(e).id=b,!d.getElementsByName||!d.getElementsByName(b).length}),n.getById?(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){return e.getAttribute("id")===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n=t.getElementById(e);return n?[n]:[]}}):(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){var n="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return n&&n.value===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),r.find.TAG=n.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):n.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},r.find.CLASS=n.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&g)return t.getElementsByClassName(e)},v=[],y=[],(n.qsa=Q.test(d.querySelectorAll))&&(ue(function(e){h.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&y.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||y.push("\\["+M+"*(?:value|"+P+")"),e.querySelectorAll("[id~="+b+"-]").length||y.push("~="),e.querySelectorAll(":checked").length||y.push(":checked"),e.querySelectorAll("a#"+b+"+*").length||y.push(".#.+[+~]")}),ue(function(e){e.innerHTML="";var t=d.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&y.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&y.push(":enabled",":disabled"),h.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&y.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),y.push(",.*:")})),(n.matchesSelector=Q.test(m=h.matches||h.webkitMatchesSelector||h.mozMatchesSelector||h.oMatchesSelector||h.msMatchesSelector))&&ue(function(e){n.disconnectedMatch=m.call(e,"*"),m.call(e,"[s!='']:x"),v.push("!=",W)}),y=y.length&&new RegExp(y.join("|")),v=v.length&&new RegExp(v.join("|")),t=Q.test(h.compareDocumentPosition),x=t||Q.test(h.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return f=!0,0;var r=!e.compareDocumentPosition-!t.compareDocumentPosition;return r||(1&(r=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!n.sortDetached&&t.compareDocumentPosition(e)===r?e===d||e.ownerDocument===w&&x(w,e)?-1:t===d||t.ownerDocument===w&&x(w,t)?1:c?O(c,e)-O(c,t):0:4&r?-1:1)}:function(e,t){if(e===t)return f=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===d?-1:t===d?1:i?-1:o?1:c?O(c,e)-O(c,t):0;if(i===o)return ce(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?ce(a[r],s[r]):a[r]===w?-1:s[r]===w?1:0},d):d},oe.matches=function(e,t){return oe(e,null,null,t)},oe.matchesSelector=function(e,t){if((e.ownerDocument||e)!==d&&p(e),t=t.replace(z,"='$1']"),n.matchesSelector&&g&&!S[t+" "]&&(!v||!v.test(t))&&(!y||!y.test(t)))try{var r=m.call(e,t);if(r||n.disconnectedMatch||e.document&&11!==e.document.nodeType)return r}catch(e){}return oe(t,d,null,[e]).length>0},oe.contains=function(e,t){return(e.ownerDocument||e)!==d&&p(e),x(e,t)},oe.attr=function(e,t){(e.ownerDocument||e)!==d&&p(e);var i=r.attrHandle[t.toLowerCase()],o=i&&N.call(r.attrHandle,t.toLowerCase())?i(e,t,!g):void 0;return void 0!==o?o:n.attributes||!g?e.getAttribute(t):(o=e.getAttributeNode(t))&&o.specified?o.value:null},oe.escape=function(e){return(e+"").replace(te,ne)},oe.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},oe.uniqueSort=function(e){var t,r=[],i=0,o=0;if(f=!n.detectDuplicates,c=!n.sortStable&&e.slice(0),e.sort(D),f){while(t=e[o++])t===e[o]&&(i=r.push(o));while(i--)e.splice(r[i],1)}return c=null,e},i=oe.getText=function(e){var t,n="",r=0,o=e.nodeType;if(o){if(1===o||9===o||11===o){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=i(e)}else if(3===o||4===o)return e.nodeValue}else while(t=e[r++])n+=i(t);return n},(r=oe.selectors={cacheLength:50,createPseudo:se,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(Z,ee),e[3]=(e[3]||e[4]||e[5]||"").replace(Z,ee),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||oe.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&oe.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return V.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=a(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(Z,ee).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=E[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&E(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=oe.attr(r,e);return null==i?"!="===t:!t||(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i.replace($," ")+" ").indexOf(n)>-1:"|="===t&&(i===n||i.slice(0,n.length+1)===n+"-"))}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,f,p,d,h,g=o!==a?"nextSibling":"previousSibling",y=t.parentNode,v=s&&t.nodeName.toLowerCase(),m=!u&&!s,x=!1;if(y){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===v:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?y.firstChild:y.lastChild],a&&m){x=(d=(l=(c=(f=(p=y)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1])&&l[2],p=d&&y.childNodes[d];while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if(1===p.nodeType&&++x&&p===t){c[e]=[T,d,x];break}}else if(m&&(x=d=(l=(c=(f=(p=t)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1]),!1===x)while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===v:1===p.nodeType)&&++x&&(m&&((c=(f=p[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]=[T,x]),p===t))break;return(x-=i)===r||x%r==0&&x/r>=0}}},PSEUDO:function(e,t){var n,i=r.pseudos[e]||r.setFilters[e.toLowerCase()]||oe.error("unsupported pseudo: "+e);return i[b]?i(t):i.length>1?(n=[e,e,"",t],r.setFilters.hasOwnProperty(e.toLowerCase())?se(function(e,n){var r,o=i(e,t),a=o.length;while(a--)e[r=O(e,o[a])]=!(n[r]=o[a])}):function(e){return i(e,0,n)}):i}},pseudos:{not:se(function(e){var t=[],n=[],r=s(e.replace(B,"$1"));return r[b]?se(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),t[0]=null,!n.pop()}}),has:se(function(e){return function(t){return oe(e,t).length>0}}),contains:se(function(e){return e=e.replace(Z,ee),function(t){return(t.textContent||t.innerText||i(t)).indexOf(e)>-1}}),lang:se(function(e){return U.test(e||"")||oe.error("unsupported lang: "+e),e=e.replace(Z,ee).toLowerCase(),function(t){var n;do{if(n=g?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return(n=n.toLowerCase())===e||0===n.indexOf(e+"-")}while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===h},focus:function(e){return e===d.activeElement&&(!d.hasFocus||d.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:de(!1),disabled:de(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!r.pseudos.empty(e)},header:function(e){return Y.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:he(function(){return[0]}),last:he(function(e,t){return[t-1]}),eq:he(function(e,t,n){return[n<0?n+t:n]}),even:he(function(e,t){for(var n=0;n=0;)e.push(r);return e}),gt:he(function(e,t,n){for(var r=n<0?n+t:n;++r1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function be(e,t,n){for(var r=0,i=t.length;r-1&&(o[l]=!(a[l]=f))}}else v=we(v===a?v.splice(h,v.length):v),i?i(null,a,v,u):L.apply(a,v)})}function Ce(e){for(var t,n,i,o=e.length,a=r.relative[e[0].type],s=a||r.relative[" "],u=a?1:0,c=me(function(e){return e===t},s,!0),f=me(function(e){return O(t,e)>-1},s,!0),p=[function(e,n,r){var i=!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):f(e,n,r));return t=null,i}];u1&&xe(p),u>1&&ve(e.slice(0,u-1).concat({value:" "===e[u-2].type?"*":""})).replace(B,"$1"),n,u0,i=e.length>0,o=function(o,a,s,u,c){var f,h,y,v=0,m="0",x=o&&[],b=[],w=l,C=o||i&&r.find.TAG("*",c),E=T+=null==w?1:Math.random()||.1,k=C.length;for(c&&(l=a===d||a||c);m!==k&&null!=(f=C[m]);m++){if(i&&f){h=0,a||f.ownerDocument===d||(p(f),s=!g);while(y=e[h++])if(y(f,a||d,s)){u.push(f);break}c&&(T=E)}n&&((f=!y&&f)&&v--,o&&x.push(f))}if(v+=m,n&&m!==v){h=0;while(y=t[h++])y(x,b,a,s);if(o){if(v>0)while(m--)x[m]||b[m]||(b[m]=j.call(u));b=we(b)}L.apply(u,b),c&&!o&&b.length>0&&v+t.length>1&&oe.uniqueSort(u)}return c&&(T=E,l=w),x};return n?se(o):o}return s=oe.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=a(e)),n=t.length;while(n--)(o=Ce(t[n]))[b]?r.push(o):i.push(o);(o=S(e,Ee(i,r))).selector=e}return o},u=oe.select=function(e,t,n,i){var o,u,l,c,f,p="function"==typeof e&&e,d=!i&&a(e=p.selector||e);if(n=n||[],1===d.length){if((u=d[0]=d[0].slice(0)).length>2&&"ID"===(l=u[0]).type&&9===t.nodeType&&g&&r.relative[u[1].type]){if(!(t=(r.find.ID(l.matches[0].replace(Z,ee),t)||[])[0]))return n;p&&(t=t.parentNode),e=e.slice(u.shift().value.length)}o=V.needsContext.test(e)?0:u.length;while(o--){if(l=u[o],r.relative[c=l.type])break;if((f=r.find[c])&&(i=f(l.matches[0].replace(Z,ee),K.test(u[0].type)&&ge(t.parentNode)||t))){if(u.splice(o,1),!(e=i.length&&ve(u)))return L.apply(n,i),n;break}}}return(p||s(e,d))(i,t,!g,n,!t||K.test(e)&&ge(t.parentNode)||t),n},n.sortStable=b.split("").sort(D).join("")===b,n.detectDuplicates=!!f,p(),n.sortDetached=ue(function(e){return 1&e.compareDocumentPosition(d.createElement("fieldset"))}),ue(function(e){return e.innerHTML="","#"===e.firstChild.getAttribute("href")})||le("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),n.attributes&&ue(function(e){return e.innerHTML="",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||le("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ue(function(e){return null==e.getAttribute("disabled")})||le(P,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),oe}(e);w.find=E,w.expr=E.selectors,w.expr[":"]=w.expr.pseudos,w.uniqueSort=w.unique=E.uniqueSort,w.text=E.getText,w.isXMLDoc=E.isXML,w.contains=E.contains,w.escapeSelector=E.escape;var k=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&w(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},D=w.expr.match.needsContext;function N(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var A=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,t,n){return g(t)?w.grep(e,function(e,r){return!!t.call(e,r,e)!==n}):t.nodeType?w.grep(e,function(e){return e===t!==n}):"string"!=typeof t?w.grep(e,function(e){return u.call(t,e)>-1!==n}):w.filter(t,e,n)}w.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?w.find.matchesSelector(r,e)?[r]:[]:w.find.matches(e,w.grep(t,function(e){return 1===e.nodeType}))},w.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(w(e).filter(function(){for(t=0;t1?w.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&D.test(e)?w(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(w.fn.init=function(e,t,n){var i,o;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(i="<"===e[0]&&">"===e[e.length-1]&&e.length>=3?[null,e,null]:L.exec(e))||!i[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(i[1]){if(t=t instanceof w?t[0]:t,w.merge(this,w.parseHTML(i[1],t&&t.nodeType?t.ownerDocument||t:r,!0)),A.test(i[1])&&w.isPlainObject(t))for(i in t)g(this[i])?this[i](t[i]):this.attr(i,t[i]);return this}return(o=r.getElementById(i[2]))&&(this[0]=o,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):g(e)?void 0!==n.ready?n.ready(e):e(w):w.makeArray(e,this)}).prototype=w.fn,q=w(r);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};w.fn.extend({has:function(e){var t=w(e,this),n=t.length;return this.filter(function(){for(var e=0;e-1:1===n.nodeType&&w.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(o.length>1?w.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?u.call(w(e),this[0]):u.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(w.uniqueSort(w.merge(this.get(),w(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}});function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}w.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return k(e,"parentNode")},parentsUntil:function(e,t,n){return k(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return k(e,"nextSibling")},prevAll:function(e){return k(e,"previousSibling")},nextUntil:function(e,t,n){return k(e,"nextSibling",n)},prevUntil:function(e,t,n){return k(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return N(e,"iframe")?e.contentDocument:(N(e,"template")&&(e=e.content||e),w.merge([],e.childNodes))}},function(e,t){w.fn[e]=function(n,r){var i=w.map(this,t,n);return"Until"!==e.slice(-5)&&(r=n),r&&"string"==typeof r&&(i=w.filter(r,i)),this.length>1&&(O[e]||w.uniqueSort(i),H.test(e)&&i.reverse()),this.pushStack(i)}});var M=/[^\x20\t\r\n\f]+/g;function R(e){var t={};return w.each(e.match(M)||[],function(e,n){t[n]=!0}),t}w.Callbacks=function(e){e="string"==typeof e?R(e):w.extend({},e);var t,n,r,i,o=[],a=[],s=-1,u=function(){for(i=i||e.once,r=t=!0;a.length;s=-1){n=a.shift();while(++s-1)o.splice(n,1),n<=s&&s--}),this},has:function(e){return e?w.inArray(e,o)>-1:o.length>0},empty:function(){return o&&(o=[]),this},disable:function(){return i=a=[],o=n="",this},disabled:function(){return!o},lock:function(){return i=a=[],n||t||(o=n=""),this},locked:function(){return!!i},fireWith:function(e,n){return i||(n=[e,(n=n||[]).slice?n.slice():n],a.push(n),t||u()),this},fire:function(){return l.fireWith(this,arguments),this},fired:function(){return!!r}};return l};function I(e){return e}function W(e){throw e}function $(e,t,n,r){var i;try{e&&g(i=e.promise)?i.call(e).done(t).fail(n):e&&g(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}w.extend({Deferred:function(t){var n=[["notify","progress",w.Callbacks("memory"),w.Callbacks("memory"),2],["resolve","done",w.Callbacks("once memory"),w.Callbacks("once memory"),0,"resolved"],["reject","fail",w.Callbacks("once memory"),w.Callbacks("once memory"),1,"rejected"]],r="pending",i={state:function(){return r},always:function(){return o.done(arguments).fail(arguments),this},"catch":function(e){return i.then(null,e)},pipe:function(){var e=arguments;return w.Deferred(function(t){w.each(n,function(n,r){var i=g(e[r[4]])&&e[r[4]];o[r[1]](function(){var e=i&&i.apply(this,arguments);e&&g(e.promise)?e.promise().progress(t.notify).done(t.resolve).fail(t.reject):t[r[0]+"With"](this,i?[e]:arguments)})}),e=null}).promise()},then:function(t,r,i){var o=0;function a(t,n,r,i){return function(){var s=this,u=arguments,l=function(){var e,l;if(!(t=o&&(r!==W&&(s=void 0,u=[e]),n.rejectWith(s,u))}};t?c():(w.Deferred.getStackHook&&(c.stackTrace=w.Deferred.getStackHook()),e.setTimeout(c))}}return w.Deferred(function(e){n[0][3].add(a(0,e,g(i)?i:I,e.notifyWith)),n[1][3].add(a(0,e,g(t)?t:I)),n[2][3].add(a(0,e,g(r)?r:W))}).promise()},promise:function(e){return null!=e?w.extend(e,i):i}},o={};return w.each(n,function(e,t){var a=t[2],s=t[5];i[t[1]]=a.add,s&&a.add(function(){r=s},n[3-e][2].disable,n[3-e][3].disable,n[0][2].lock,n[0][3].lock),a.add(t[3].fire),o[t[0]]=function(){return o[t[0]+"With"](this===o?void 0:this,arguments),this},o[t[0]+"With"]=a.fireWith}),i.promise(o),t&&t.call(o,o),o},when:function(e){var t=arguments.length,n=t,r=Array(n),i=o.call(arguments),a=w.Deferred(),s=function(e){return function(n){r[e]=this,i[e]=arguments.length>1?o.call(arguments):n,--t||a.resolveWith(r,i)}};if(t<=1&&($(e,a.done(s(n)).resolve,a.reject,!t),"pending"===a.state()||g(i[n]&&i[n].then)))return a.then();while(n--)$(i[n],s(n),a.reject);return a.promise()}});var B=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;w.Deferred.exceptionHook=function(t,n){e.console&&e.console.warn&&t&&B.test(t.name)&&e.console.warn("jQuery.Deferred exception: "+t.message,t.stack,n)},w.readyException=function(t){e.setTimeout(function(){throw t})};var F=w.Deferred();w.fn.ready=function(e){return F.then(e)["catch"](function(e){w.readyException(e)}),this},w.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--w.readyWait:w.isReady)||(w.isReady=!0,!0!==e&&--w.readyWait>0||F.resolveWith(r,[w]))}}),w.ready.then=F.then;function _(){r.removeEventListener("DOMContentLoaded",_),e.removeEventListener("load",_),w.ready()}"complete"===r.readyState||"loading"!==r.readyState&&!r.documentElement.doScroll?e.setTimeout(w.ready):(r.addEventListener("DOMContentLoaded",_),e.addEventListener("load",_));var z=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===x(n)){i=!0;for(s in n)z(e,t,s,n[s],!0,o,a)}else if(void 0!==r&&(i=!0,g(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(w(e),n)})),t))for(;s1,null,!0)},removeData:function(e){return this.each(function(){K.remove(this,e)})}}),w.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=J.get(e,t),n&&(!r||Array.isArray(n)?r=J.access(e,t,w.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=w.queue(e,t),r=n.length,i=n.shift(),o=w._queueHooks(e,t),a=function(){w.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return J.get(e,n)||J.access(e,n,{empty:w.Callbacks("once memory").add(function(){J.remove(e,[t+"queue",n])})})}}),w.fn.extend({queue:function(e,t){var n=2;return"string"!=typeof e&&(t=e,e="fx",n--),arguments.length\x20\t\r\n\f]+)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"
    ","
    "],col:[2,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],_default:[0,"",""]};ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;function ye(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&N(e,t)?w.merge([e],n):n}function ve(e,t){for(var n=0,r=e.length;n-1)i&&i.push(o);else if(l=w.contains(o.ownerDocument,o),a=ye(f.appendChild(o),"script"),l&&ve(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}!function(){var e=r.createDocumentFragment().appendChild(r.createElement("div")),t=r.createElement("input");t.setAttribute("type","radio"),t.setAttribute("checked","checked"),t.setAttribute("name","t"),e.appendChild(t),h.checkClone=e.cloneNode(!0).cloneNode(!0).lastChild.checked,e.innerHTML="",h.noCloneChecked=!!e.cloneNode(!0).lastChild.defaultValue}();var be=r.documentElement,we=/^key/,Te=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ce=/^([^.]*)(?:\.(.+)|)/;function Ee(){return!0}function ke(){return!1}function Se(){try{return r.activeElement}catch(e){}}function De(e,t,n,r,i,o){var a,s;if("object"==typeof t){"string"!=typeof n&&(r=r||n,n=void 0);for(s in t)De(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=ke;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return w().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=w.guid++)),e.each(function(){w.event.add(this,t,i,r,n)})}w.event={global:{},add:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.get(e);if(y){n.handler&&(n=(o=n).handler,i=o.selector),i&&w.find.matchesSelector(be,i),n.guid||(n.guid=w.guid++),(u=y.events)||(u=y.events={}),(a=y.handle)||(a=y.handle=function(t){return"undefined"!=typeof w&&w.event.triggered!==t.type?w.event.dispatch.apply(e,arguments):void 0}),l=(t=(t||"").match(M)||[""]).length;while(l--)d=g=(s=Ce.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=w.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=w.event.special[d]||{},c=w.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&w.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(e,r,h,a)||e.addEventListener&&e.addEventListener(d,a)),f.add&&(f.add.call(e,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),w.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.hasData(e)&&J.get(e);if(y&&(u=y.events)){l=(t=(t||"").match(M)||[""]).length;while(l--)if(s=Ce.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){f=w.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,y.handle)||w.removeEvent(e,d,y.handle),delete u[d])}else for(d in u)w.event.remove(e,d+t[l],n,r,!0);w.isEmptyObject(u)&&J.remove(e,"handle events")}},dispatch:function(e){var t=w.event.fix(e),n,r,i,o,a,s,u=new Array(arguments.length),l=(J.get(this,"events")||{})[t.type]||[],c=w.event.special[t.type]||{};for(u[0]=t,n=1;n=1))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n-1:w.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u\x20\t\r\n\f]*)[^>]*)\/>/gi,Ae=/\s*$/g;function Le(e,t){return N(e,"table")&&N(11!==t.nodeType?t:t.firstChild,"tr")?w(e).children("tbody")[0]||e:e}function He(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Oe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Pe(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(J.hasData(e)&&(o=J.access(e),a=J.set(t,o),l=o.events)){delete a.handle,a.events={};for(i in l)for(n=0,r=l[i].length;n1&&"string"==typeof y&&!h.checkClone&&je.test(y))return e.each(function(i){var o=e.eq(i);v&&(t[0]=y.call(this,i,o.html())),Re(o,t,n,r)});if(p&&(i=xe(t,e[0].ownerDocument,!1,e,r),o=i.firstChild,1===i.childNodes.length&&(i=o),o||r)){for(u=(s=w.map(ye(i,"script"),He)).length;f")},clone:function(e,t,n){var r,i,o,a,s=e.cloneNode(!0),u=w.contains(e.ownerDocument,e);if(!(h.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||w.isXMLDoc(e)))for(a=ye(s),r=0,i=(o=ye(e)).length;r0&&ve(a,!u&&ye(e,"script")),s},cleanData:function(e){for(var t,n,r,i=w.event.special,o=0;void 0!==(n=e[o]);o++)if(Y(n)){if(t=n[J.expando]){if(t.events)for(r in t.events)i[r]?w.event.remove(n,r):w.removeEvent(n,r,t.handle);n[J.expando]=void 0}n[K.expando]&&(n[K.expando]=void 0)}}}),w.fn.extend({detach:function(e){return Ie(this,e,!0)},remove:function(e){return Ie(this,e)},text:function(e){return z(this,function(e){return void 0===e?w.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Re(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Le(this,e).appendChild(e)})},prepend:function(){return Re(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Le(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(w.cleanData(ye(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return w.clone(this,e,t)})},html:function(e){return z(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Ae.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=w.htmlPrefilter(e);try{for(;n=0&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))),u}function et(e,t,n){var r=$e(e),i=Fe(e,t,r),o="border-box"===w.css(e,"boxSizing",!1,r),a=o;if(We.test(i)){if(!n)return i;i="auto"}return a=a&&(h.boxSizingReliable()||i===e.style[t]),("auto"===i||!parseFloat(i)&&"inline"===w.css(e,"display",!1,r))&&(i=e["offset"+t[0].toUpperCase()+t.slice(1)],a=!0),(i=parseFloat(i)||0)+Ze(e,t,n||(o?"border":"content"),a,r,i)+"px"}w.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Fe(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=G(t),u=Xe.test(t),l=e.style;if(u||(t=Je(s)),a=w.cssHooks[t]||w.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"==(o=typeof n)&&(i=ie.exec(n))&&i[1]&&(n=ue(e,t,i),o="number"),null!=n&&n===n&&("number"===o&&(n+=i&&i[3]||(w.cssNumber[s]?"":"px")),h.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=G(t);return Xe.test(t)||(t=Je(s)),(a=w.cssHooks[t]||w.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=Fe(e,t,r)),"normal"===i&&t in Ve&&(i=Ve[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),w.each(["height","width"],function(e,t){w.cssHooks[t]={get:function(e,n,r){if(n)return!ze.test(w.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?et(e,t,r):se(e,Ue,function(){return et(e,t,r)})},set:function(e,n,r){var i,o=$e(e),a="border-box"===w.css(e,"boxSizing",!1,o),s=r&&Ze(e,t,r,a,o);return a&&h.scrollboxSize()===o.position&&(s-=Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-parseFloat(o[t])-Ze(e,t,"border",!1,o)-.5)),s&&(i=ie.exec(n))&&"px"!==(i[3]||"px")&&(e.style[t]=n,n=w.css(e,t)),Ke(e,n,s)}}}),w.cssHooks.marginLeft=_e(h.reliableMarginLeft,function(e,t){if(t)return(parseFloat(Fe(e,"marginLeft"))||e.getBoundingClientRect().left-se(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),w.each({margin:"",padding:"",border:"Width"},function(e,t){w.cssHooks[e+t]={expand:function(n){for(var r=0,i={},o="string"==typeof n?n.split(" "):[n];r<4;r++)i[e+oe[r]+t]=o[r]||o[r-2]||o[0];return i}},"margin"!==e&&(w.cssHooks[e+t].set=Ke)}),w.fn.extend({css:function(e,t){return z(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=$e(e),i=t.length;a1)}});function tt(e,t,n,r,i){return new tt.prototype.init(e,t,n,r,i)}w.Tween=tt,tt.prototype={constructor:tt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||w.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(w.cssNumber[n]?"":"px")},cur:function(){var e=tt.propHooks[this.prop];return e&&e.get?e.get(this):tt.propHooks._default.get(this)},run:function(e){var t,n=tt.propHooks[this.prop];return this.options.duration?this.pos=t=w.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):tt.propHooks._default.set(this),this}},tt.prototype.init.prototype=tt.prototype,tt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=w.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){w.fx.step[e.prop]?w.fx.step[e.prop](e):1!==e.elem.nodeType||null==e.elem.style[w.cssProps[e.prop]]&&!w.cssHooks[e.prop]?e.elem[e.prop]=e.now:w.style(e.elem,e.prop,e.now+e.unit)}}},tt.propHooks.scrollTop=tt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},w.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},w.fx=tt.prototype.init,w.fx.step={};var nt,rt,it=/^(?:toggle|show|hide)$/,ot=/queueHooks$/;function at(){rt&&(!1===r.hidden&&e.requestAnimationFrame?e.requestAnimationFrame(at):e.setTimeout(at,w.fx.interval),w.fx.tick())}function st(){return e.setTimeout(function(){nt=void 0}),nt=Date.now()}function ut(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=oe[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function lt(e,t,n){for(var r,i=(pt.tweeners[t]||[]).concat(pt.tweeners["*"]),o=0,a=i.length;o1)},removeAttr:function(e){return this.each(function(){w.removeAttr(this,e)})}}),w.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?w.prop(e,t,n):(1===o&&w.isXMLDoc(e)||(i=w.attrHooks[t.toLowerCase()]||(w.expr.match.bool.test(t)?dt:void 0)),void 0!==n?null===n?void w.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=w.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!h.radioValue&&"radio"===t&&N(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(M);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),dt={set:function(e,t,n){return!1===t?w.removeAttr(e,n):e.setAttribute(n,n),n}},w.each(w.expr.match.bool.source.match(/\w+/g),function(e,t){var n=ht[t]||w.find.attr;ht[t]=function(e,t,r){var i,o,a=t.toLowerCase();return r||(o=ht[a],ht[a]=i,i=null!=n(e,t,r)?a:null,ht[a]=o),i}});var gt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;w.fn.extend({prop:function(e,t){return z(this,w.prop,e,t,arguments.length>1)},removeProp:function(e){return this.each(function(){delete this[w.propFix[e]||e]})}}),w.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&w.isXMLDoc(e)||(t=w.propFix[t]||t,i=w.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=w.find.attr(e,"tabindex");return t?parseInt(t,10):gt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),h.optSelected||(w.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),w.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){w.propFix[this.toLowerCase()]=this});function vt(e){return(e.match(M)||[]).join(" ")}function mt(e){return e.getAttribute&&e.getAttribute("class")||""}function xt(e){return Array.isArray(e)?e:"string"==typeof e?e.match(M)||[]:[]}w.fn.extend({addClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).addClass(e.call(this,t,mt(this)))});if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},removeClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).removeClass(e.call(this,t,mt(this)))});if(!arguments.length)return this.attr("class","");if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])while(r.indexOf(" "+o+" ")>-1)r=r.replace(" "+o+" "," ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(e,t){var n=typeof e,r="string"===n||Array.isArray(e);return"boolean"==typeof t&&r?t?this.addClass(e):this.removeClass(e):g(e)?this.each(function(n){w(this).toggleClass(e.call(this,n,mt(this),t),t)}):this.each(function(){var t,i,o,a;if(r){i=0,o=w(this),a=xt(e);while(t=a[i++])o.hasClass(t)?o.removeClass(t):o.addClass(t)}else void 0!==e&&"boolean"!==n||((t=mt(this))&&J.set(this,"__className__",t),this.setAttribute&&this.setAttribute("class",t||!1===e?"":J.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&(" "+vt(mt(n))+" ").indexOf(t)>-1)return!0;return!1}});var bt=/\r/g;w.fn.extend({val:function(e){var t,n,r,i=this[0];{if(arguments.length)return r=g(e),this.each(function(n){var i;1===this.nodeType&&(null==(i=r?e.call(this,n,w(this).val()):e)?i="":"number"==typeof i?i+="":Array.isArray(i)&&(i=w.map(i,function(e){return null==e?"":e+""})),(t=w.valHooks[this.type]||w.valHooks[this.nodeName.toLowerCase()])&&"set"in t&&void 0!==t.set(this,i,"value")||(this.value=i))});if(i)return(t=w.valHooks[i.type]||w.valHooks[i.nodeName.toLowerCase()])&&"get"in t&&void 0!==(n=t.get(i,"value"))?n:"string"==typeof(n=i.value)?n.replace(bt,""):null==n?"":n}}}),w.extend({valHooks:{option:{get:function(e){var t=w.find.attr(e,"value");return null!=t?t:vt(w.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r-1)&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),w.each(["radio","checkbox"],function(){w.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=w.inArray(w(e).val(),t)>-1}},h.checkOn||(w.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),h.focusin="onfocusin"in e;var wt=/^(?:focusinfocus|focusoutblur)$/,Tt=function(e){e.stopPropagation()};w.extend(w.event,{trigger:function(t,n,i,o){var a,s,u,l,c,p,d,h,v=[i||r],m=f.call(t,"type")?t.type:t,x=f.call(t,"namespace")?t.namespace.split("."):[];if(s=h=u=i=i||r,3!==i.nodeType&&8!==i.nodeType&&!wt.test(m+w.event.triggered)&&(m.indexOf(".")>-1&&(m=(x=m.split(".")).shift(),x.sort()),c=m.indexOf(":")<0&&"on"+m,t=t[w.expando]?t:new w.Event(m,"object"==typeof t&&t),t.isTrigger=o?2:3,t.namespace=x.join("."),t.rnamespace=t.namespace?new RegExp("(^|\\.)"+x.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,t.result=void 0,t.target||(t.target=i),n=null==n?[t]:w.makeArray(n,[t]),d=w.event.special[m]||{},o||!d.trigger||!1!==d.trigger.apply(i,n))){if(!o&&!d.noBubble&&!y(i)){for(l=d.delegateType||m,wt.test(l+m)||(s=s.parentNode);s;s=s.parentNode)v.push(s),u=s;u===(i.ownerDocument||r)&&v.push(u.defaultView||u.parentWindow||e)}a=0;while((s=v[a++])&&!t.isPropagationStopped())h=s,t.type=a>1?l:d.bindType||m,(p=(J.get(s,"events")||{})[t.type]&&J.get(s,"handle"))&&p.apply(s,n),(p=c&&s[c])&&p.apply&&Y(s)&&(t.result=p.apply(s,n),!1===t.result&&t.preventDefault());return t.type=m,o||t.isDefaultPrevented()||d._default&&!1!==d._default.apply(v.pop(),n)||!Y(i)||c&&g(i[m])&&!y(i)&&((u=i[c])&&(i[c]=null),w.event.triggered=m,t.isPropagationStopped()&&h.addEventListener(m,Tt),i[m](),t.isPropagationStopped()&&h.removeEventListener(m,Tt),w.event.triggered=void 0,u&&(i[c]=u)),t.result}},simulate:function(e,t,n){var r=w.extend(new w.Event,n,{type:e,isSimulated:!0});w.event.trigger(r,null,t)}}),w.fn.extend({trigger:function(e,t){return this.each(function(){w.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return w.event.trigger(e,t,n,!0)}}),h.focusin||w.each({focus:"focusin",blur:"focusout"},function(e,t){var n=function(e){w.event.simulate(t,e.target,w.event.fix(e))};w.event.special[t]={setup:function(){var r=this.ownerDocument||this,i=J.access(r,t);i||r.addEventListener(e,n,!0),J.access(r,t,(i||0)+1)},teardown:function(){var r=this.ownerDocument||this,i=J.access(r,t)-1;i?J.access(r,t,i):(r.removeEventListener(e,n,!0),J.remove(r,t))}}});var Ct=e.location,Et=Date.now(),kt=/\?/;w.parseXML=function(t){var n;if(!t||"string"!=typeof t)return null;try{n=(new e.DOMParser).parseFromString(t,"text/xml")}catch(e){n=void 0}return n&&!n.getElementsByTagName("parsererror").length||w.error("Invalid XML: "+t),n};var St=/\[\]$/,Dt=/\r?\n/g,Nt=/^(?:submit|button|image|reset|file)$/i,At=/^(?:input|select|textarea|keygen)/i;function jt(e,t,n,r){var i;if(Array.isArray(t))w.each(t,function(t,i){n||St.test(e)?r(e,i):jt(e+"["+("object"==typeof i&&null!=i?t:"")+"]",i,n,r)});else if(n||"object"!==x(t))r(e,t);else for(i in t)jt(e+"["+i+"]",t[i],n,r)}w.param=function(e,t){var n,r=[],i=function(e,t){var n=g(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(Array.isArray(e)||e.jquery&&!w.isPlainObject(e))w.each(e,function(){i(this.name,this.value)});else for(n in e)jt(n,e[n],t,i);return r.join("&")},w.fn.extend({serialize:function(){return w.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=w.prop(this,"elements");return e?w.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!w(this).is(":disabled")&&At.test(this.nodeName)&&!Nt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=w(this).val();return null==n?null:Array.isArray(n)?w.map(n,function(e){return{name:t.name,value:e.replace(Dt,"\r\n")}}):{name:t.name,value:n.replace(Dt,"\r\n")}}).get()}});var qt=/%20/g,Lt=/#.*$/,Ht=/([?&])_=[^&]*/,Ot=/^(.*?):[ \t]*([^\r\n]*)$/gm,Pt=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Mt=/^(?:GET|HEAD)$/,Rt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Bt=r.createElement("a");Bt.href=Ct.href;function Ft(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(M)||[];if(g(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function _t(e,t,n,r){var i={},o=e===Wt;function a(s){var u;return i[s]=!0,w.each(e[s]||[],function(e,s){var l=s(t,n,r);return"string"!=typeof l||o||i[l]?o?!(u=l):void 0:(t.dataTypes.unshift(l),a(l),!1)}),u}return a(t.dataTypes[0])||!i["*"]&&a("*")}function zt(e,t){var n,r,i=w.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&w.extend(!0,e,r),e}function Xt(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}function Ut(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}w.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Ct.href,type:"GET",isLocal:Pt.test(Ct.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":w.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,w.ajaxSettings),t):zt(w.ajaxSettings,e)},ajaxPrefilter:Ft(It),ajaxTransport:Ft(Wt),ajax:function(t,n){"object"==typeof t&&(n=t,t=void 0),n=n||{};var i,o,a,s,u,l,c,f,p,d,h=w.ajaxSetup({},n),g=h.context||h,y=h.context&&(g.nodeType||g.jquery)?w(g):w.event,v=w.Deferred(),m=w.Callbacks("once memory"),x=h.statusCode||{},b={},T={},C="canceled",E={readyState:0,getResponseHeader:function(e){var t;if(c){if(!s){s={};while(t=Ot.exec(a))s[t[1].toLowerCase()]=t[2]}t=s[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return c?a:null},setRequestHeader:function(e,t){return null==c&&(e=T[e.toLowerCase()]=T[e.toLowerCase()]||e,b[e]=t),this},overrideMimeType:function(e){return null==c&&(h.mimeType=e),this},statusCode:function(e){var t;if(e)if(c)E.always(e[E.status]);else for(t in e)x[t]=[x[t],e[t]];return this},abort:function(e){var t=e||C;return i&&i.abort(t),k(0,t),this}};if(v.promise(E),h.url=((t||h.url||Ct.href)+"").replace(Rt,Ct.protocol+"//"),h.type=n.method||n.type||h.method||h.type,h.dataTypes=(h.dataType||"*").toLowerCase().match(M)||[""],null==h.crossDomain){l=r.createElement("a");try{l.href=h.url,l.href=l.href,h.crossDomain=Bt.protocol+"//"+Bt.host!=l.protocol+"//"+l.host}catch(e){h.crossDomain=!0}}if(h.data&&h.processData&&"string"!=typeof h.data&&(h.data=w.param(h.data,h.traditional)),_t(It,h,n,E),c)return E;(f=w.event&&h.global)&&0==w.active++&&w.event.trigger("ajaxStart"),h.type=h.type.toUpperCase(),h.hasContent=!Mt.test(h.type),o=h.url.replace(Lt,""),h.hasContent?h.data&&h.processData&&0===(h.contentType||"").indexOf("application/x-www-form-urlencoded")&&(h.data=h.data.replace(qt,"+")):(d=h.url.slice(o.length),h.data&&(h.processData||"string"==typeof h.data)&&(o+=(kt.test(o)?"&":"?")+h.data,delete h.data),!1===h.cache&&(o=o.replace(Ht,"$1"),d=(kt.test(o)?"&":"?")+"_="+Et+++d),h.url=o+d),h.ifModified&&(w.lastModified[o]&&E.setRequestHeader("If-Modified-Since",w.lastModified[o]),w.etag[o]&&E.setRequestHeader("If-None-Match",w.etag[o])),(h.data&&h.hasContent&&!1!==h.contentType||n.contentType)&&E.setRequestHeader("Content-Type",h.contentType),E.setRequestHeader("Accept",h.dataTypes[0]&&h.accepts[h.dataTypes[0]]?h.accepts[h.dataTypes[0]]+("*"!==h.dataTypes[0]?", "+$t+"; q=0.01":""):h.accepts["*"]);for(p in h.headers)E.setRequestHeader(p,h.headers[p]);if(h.beforeSend&&(!1===h.beforeSend.call(g,E,h)||c))return E.abort();if(C="abort",m.add(h.complete),E.done(h.success),E.fail(h.error),i=_t(Wt,h,n,E)){if(E.readyState=1,f&&y.trigger("ajaxSend",[E,h]),c)return E;h.async&&h.timeout>0&&(u=e.setTimeout(function(){E.abort("timeout")},h.timeout));try{c=!1,i.send(b,k)}catch(e){if(c)throw e;k(-1,e)}}else k(-1,"No Transport");function k(t,n,r,s){var l,p,d,b,T,C=n;c||(c=!0,u&&e.clearTimeout(u),i=void 0,a=s||"",E.readyState=t>0?4:0,l=t>=200&&t<300||304===t,r&&(b=Xt(h,E,r)),b=Ut(h,b,E,l),l?(h.ifModified&&((T=E.getResponseHeader("Last-Modified"))&&(w.lastModified[o]=T),(T=E.getResponseHeader("etag"))&&(w.etag[o]=T)),204===t||"HEAD"===h.type?C="nocontent":304===t?C="notmodified":(C=b.state,p=b.data,l=!(d=b.error))):(d=C,!t&&C||(C="error",t<0&&(t=0))),E.status=t,E.statusText=(n||C)+"",l?v.resolveWith(g,[p,C,E]):v.rejectWith(g,[E,C,d]),E.statusCode(x),x=void 0,f&&y.trigger(l?"ajaxSuccess":"ajaxError",[E,h,l?p:d]),m.fireWith(g,[E,C]),f&&(y.trigger("ajaxComplete",[E,h]),--w.active||w.event.trigger("ajaxStop")))}return E},getJSON:function(e,t,n){return w.get(e,t,n,"json")},getScript:function(e,t){return w.get(e,void 0,t,"script")}}),w.each(["get","post"],function(e,t){w[t]=function(e,n,r,i){return g(n)&&(i=i||r,r=n,n=void 0),w.ajax(w.extend({url:e,type:t,dataType:i,data:n,success:r},w.isPlainObject(e)&&e))}}),w._evalUrl=function(e){return w.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},w.fn.extend({wrapAll:function(e){var t;return this[0]&&(g(e)&&(e=e.call(this[0])),t=w(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(e){return g(e)?this.each(function(t){w(this).wrapInner(e.call(this,t))}):this.each(function(){var t=w(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=g(e);return this.each(function(n){w(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(e){return this.parent(e).not("body").each(function(){w(this).replaceWith(this.childNodes)}),this}}),w.expr.pseudos.hidden=function(e){return!w.expr.pseudos.visible(e)},w.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},w.ajaxSettings.xhr=function(){try{return new e.XMLHttpRequest}catch(e){}};var Vt={0:200,1223:204},Gt=w.ajaxSettings.xhr();h.cors=!!Gt&&"withCredentials"in Gt,h.ajax=Gt=!!Gt,w.ajaxTransport(function(t){var n,r;if(h.cors||Gt&&!t.crossDomain)return{send:function(i,o){var a,s=t.xhr();if(s.open(t.type,t.url,t.async,t.username,t.password),t.xhrFields)for(a in t.xhrFields)s[a]=t.xhrFields[a];t.mimeType&&s.overrideMimeType&&s.overrideMimeType(t.mimeType),t.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");for(a in i)s.setRequestHeader(a,i[a]);n=function(e){return function(){n&&(n=r=s.onload=s.onerror=s.onabort=s.ontimeout=s.onreadystatechange=null,"abort"===e?s.abort():"error"===e?"number"!=typeof s.status?o(0,"error"):o(s.status,s.statusText):o(Vt[s.status]||s.status,s.statusText,"text"!==(s.responseType||"text")||"string"!=typeof s.responseText?{binary:s.response}:{text:s.responseText},s.getAllResponseHeaders()))}},s.onload=n(),r=s.onerror=s.ontimeout=n("error"),void 0!==s.onabort?s.onabort=r:s.onreadystatechange=function(){4===s.readyState&&e.setTimeout(function(){n&&r()})},n=n("abort");try{s.send(t.hasContent&&t.data||null)}catch(e){if(n)throw e}},abort:function(){n&&n()}}}),w.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),w.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return w.globalEval(e),e}}}),w.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),w.ajaxTransport("script",function(e){if(e.crossDomain){var t,n;return{send:function(i,o){t=w(" + + + """.format(js_utils) + else: + with open(os.path.join(js_dir, 'jquery.min.js')) as f: + jquery = f.read() + with open(os.path.join(js_dir, 'plotly-gl3d-latest.min.js')) as f: + plotly = f.read() + js_lib = """ + + + + """.format(jquery, plotly, js_utils) + return html.replace('INSERT_JS_LIBRARIES_HERE', js_lib) + + +def get_html_template(template_name): + """Get an HTML file from package data""" + template_path = os.path.join( + os.path.dirname(__file__), 'data', 'html', template_name) + with open(template_path, 'rb') as f: + return f.read().decode('utf-8') + + +def _remove_after_n_seconds(file_name, n_seconds): + script = os.path.join(os.path.dirname(__file__), 'rm_file.py') + subprocess.Popen(['python', script, file_name, str(n_seconds)]) + + +class HTMLDocument(object): + """ + Embeds a plot in a web page. + + If you are running a Jupyter notebook, the plot will be displayed + inline if this object is the output of a cell. + Otherwise, use open_in_browser() to open it in a web browser (or + save_as_html("filename.html") to save it as an html file). + + use str(document) or document.html to get the content of the web page, + and document.get_iframe() to have it wrapped in an iframe. + + """ + _all_open_html_repr = weakref.WeakSet() + + def __init__(self, html, width=600, height=400): + self.html = html + self.width = width + self.height = height + self._temp_file = None + self._check_n_open() + + def _check_n_open(self): + HTMLDocument._all_open_html_repr.add(self) + if len(HTMLDocument._all_open_html_repr) > 9: + warnings.warn('It seems you have created more than 10 ' + 'nilearn views. As each view uses dozens ' + 'of megabytes of RAM, you might want to ' + 'delete some of them.') + + def resize(self, width, height): + """Resize the plot displayed in a Jupyter notebook.""" + self.width, self.height = width, height + return self + + def get_iframe(self, width=None, height=None): + """ + Get the document wrapped in an inline frame. + + For inserting in another HTML page of for display in a Jupyter + notebook. + + """ + if width is None: + width = self.width + if height is None: + height = self.height + escaped = cgi.escape(self.html, quote=True) + wrapped = ''.format( + escaped, width, height) + return wrapped + + def get_standalone(self): + """ Get the plot in an HTML page.""" + return self.html + + def _repr_html_(self): + """ + Used by the Jupyter notebook. + + Users normally won't call this method explicitely. + """ + return self.get_iframe() + + def __str__(self): + return self.html + + def save_as_html(self, file_name): + """ + Save the plot in an HTML file, that can later be opened in a browser. + """ + with open(file_name, 'wb') as f: + f.write(self.html.encode('utf-8')) + + def open_in_browser(self, file_name=None, temp_file_lifetime=30): + """ + Save the plot to a temporary HTML file and open it in a browser. + + Parameters + ---------- + + file_name : str, optional + .html file to use as temporary file + + temp_file_lifetime : float, optional (default=30.) + Time, in seconds, after which the temporary file is removed. + If None, it is never removed. + + """ + if file_name is None: + fd, file_name = tempfile.mkstemp('.html', 'nilearn_surface_plot_') + os.close(fd) + self.save_as_html(file_name) + self._temp_file = file_name + file_size = os.path.getsize(file_name) / 1e6 + if temp_file_lifetime is None: + print(("Saved HTML in temporary file: {}\n" + "file size is {:.1f}M, delete it when you're done, " + "for example by calling this.remove_temp_file").format( + file_name, file_size)) + else: + _remove_after_n_seconds(self._temp_file, temp_file_lifetime) + webbrowser.open('file://{}'.format(file_name)) + + def remove_temp_file(self): + """ + Remove the temporary file created by `open_in_browser`, if necessary. + """ + if self._temp_file is None: + return + if not os.path.isfile(self._temp_file): + return + os.remove(self._temp_file) + print('removed {}'.format(self._temp_file)) + self._temp_file = None + + +def colorscale(cmap, values, threshold=None, symmetric_cmap=True, vmax=None): + """Normalize a cmap, put it in plotly format, get threshold and range""" + cmap = mpl_cm.get_cmap(cmap) + abs_values = np.abs(values) + if not symmetric_cmap and (values.min() < 0): + warnings.warn('you have specified symmetric_cmap=False' + 'but the map contains negative values; ' + 'setting symmetric_cmap to True') + symmetric_cmap = True + if vmax is None: + if symmetric_cmap: + vmax = abs_values.max() + vmin = - vmax + else: + vmin, vmax = values.min(), values.max() + else: + vmin = -vmax if symmetric_cmap else 0 + norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) + cmaplist = [cmap(i) for i in range(cmap.N)] + abs_threshold = None + if threshold is not None: + abs_threshold = check_threshold(threshold, values, fast_abs_percentile) + istart = int(norm(-abs_threshold, clip=True) * (cmap.N - 1)) + istop = int(norm(abs_threshold, clip=True) * (cmap.N - 1)) + for i in range(istart, istop): + cmaplist[i] = (0.5, 0.5, 0.5, 1.) # just an average gray color + our_cmap = mpl.colors.LinearSegmentedColormap.from_list( + 'Custom cmap', cmaplist, cmap.N) + x = np.linspace(0, 1, 100) + rgb = our_cmap(x, bytes=True)[:, :3] + rgb = np.array(rgb, dtype=int) + colors = [] + for i, col in zip(x, rgb): + colors.append([np.round(i, 3), "rgb({}, {}, {})".format(*col)]) + return { + 'colors': colors, 'vmin': vmin, 'vmax': vmax, 'cmap': our_cmap, + 'norm': norm, 'abs_threshold': abs_threshold, + 'symmetric_cmap': symmetric_cmap + } + + +def encode(a): + """Base64 encode a numpy array""" + try: + data = a.tobytes() + except AttributeError: + # np < 1.9 + data = a.tostring() + return base64.b64encode(data).decode('utf-8') + + +def decode(b, dtype): + """Decode a numpy array encoded as Base64""" + return np.frombuffer(base64.b64decode(b.encode('utf-8')), dtype) + + +def mesh_to_plotly(mesh): + mesh = surface.load_surf_mesh(mesh) + x, y, z = map(encode, np.asarray(mesh[0].T, dtype=' + + + """) in _normalize_ws(cdn) + inline = _normalize_ws(js_plotting_utils.add_js_lib(html, embed_js=True)) + assert _normalize_ws("""/*! jQuery v3.3.1 | (c) JS Foundation and other + contributors | jquery.org/license */""") in inline + assert _normalize_ws("""** + * plotly.js (gl3d - minified) v1.38.3 + * Copyright 2012-2018, Plotly, Inc. + * All rights reserved. + * Licensed under the MIT license + */ """) in inline + assert "decodeBase64" in inline + + +def check_colors(colors): + assert len(colors) == 100 + val, cstring = zip(*colors) + assert np.allclose(np.linspace(0, 1, 100), val, atol=1e-3) + assert val[0] == 0 + assert val[-1] == 1 + for cs in cstring: + assert re.match(r'rgb\(\d+, \d+, \d+\)', cs) + return val, cstring + + +def test_colorscale_no_threshold(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = None + colors = js_plotting_utils.colorscale(cmap, values, threshold) + check_colors(colors['colors']) + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert colors['abs_threshold'] is None + + +def test_colorscale_threshold_0(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = '0%' + colors = js_plotting_utils.colorscale(cmap, values, threshold) + check_colors(colors['colors']) + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert colors['abs_threshold'] == 1.5 + assert colors['symmetric_cmap'] + + +def test_colorscale_threshold_99(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = '99%' + colors = js_plotting_utils.colorscale(cmap, values, threshold) + check_colors(colors['colors']) + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert colors['abs_threshold'] == 13 + assert colors['symmetric_cmap'] + + +def test_colorscale_threshold_50(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = '50%' + colors = js_plotting_utils.colorscale(cmap, values, threshold) + val, cstring = check_colors(colors['colors']) + assert cstring[50] == 'rgb(127, 127, 127)' + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert np.allclose(colors['abs_threshold'], 7.55, 2) + assert colors['symmetric_cmap'] + + +def test_colorscale_absolute_threshold(): + cmap = 'jet' + values = np.linspace(-13, -1.5, 20) + threshold = 7.25 + colors = js_plotting_utils.colorscale(cmap, values, threshold) + val, cstring = check_colors(colors['colors']) + assert cstring[50] == 'rgb(127, 127, 127)' + assert (colors['vmin'], colors['vmax']) == (-13, 13) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) + assert np.allclose(colors['abs_threshold'], 7.25) + assert colors['symmetric_cmap'] + + +def test_colorscale_asymmetric_cmap(): + cmap = 'jet' + values = np.arange(15) + colors = js_plotting_utils.colorscale(cmap, values, symmetric_cmap=False) + assert (colors['vmin'], colors['vmax']) == (0, 14) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (14, 0) + assert not colors['symmetric_cmap'] + + +def test_colorscale_vmax(): + cmap = 'jet' + values = np.arange(15) + colors = js_plotting_utils.colorscale(cmap, values, vmax=7) + assert (colors['vmin'], colors['vmax']) == (-7, 7) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (7, -7) + assert colors['symmetric_cmap'] + + +def test_colorscale_asymmetric_cmap_vmax(): + cmap = 'jet' + values = np.arange(15) + colors = js_plotting_utils.colorscale(cmap, values, vmax=7, + symmetric_cmap=False) + assert (colors['vmin'], colors['vmax']) == (0, 7) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (7, 0) + assert not colors['symmetric_cmap'] + + +def test_colorscale_asymmetric_cmap_negative_values(): + cmap = 'jet' + values = np.linspace(-15, 4) + assert_warns(UserWarning, js_plotting_utils.colorscale, cmap, + values, symmetric_cmap=False) + + colors = js_plotting_utils.colorscale(cmap, values, vmax=7, + symmetric_cmap=False) + assert (colors['vmin'], colors['vmax']) == (-7, 7) + assert colors['cmap'].N == 256 + assert (colors['norm'].vmax, colors['norm'].vmin) == (7, -7) + assert colors['symmetric_cmap'] + + +def test_encode(): + for dtype in ['f4', '>i4']: + a = np.arange(10, dtype=dtype) + encoded = js_plotting_utils.encode(a) + decoded = base64.b64decode(encoded.encode('utf-8')) + b = np.frombuffer(decoded, dtype=dtype) + assert np.allclose(js_plotting_utils.decode(encoded, dtype=dtype), b) + assert np.allclose(a, b) + + +def test_mesh_to_plotly(): + fsaverage = fetch_surf_fsaverage() + coord, triangles = surface.load_surf_mesh(fsaverage['pial_left']) + plotly = js_plotting_utils.mesh_to_plotly(fsaverage['pial_left']) + for i, key in enumerate(['_x', '_y', '_z']): + assert np.allclose( + js_plotting_utils.decode(plotly[key], '' in str(html) + _check_open_in_browser(html) + resized = html.resize(3, 17) + assert resized is html + assert (html.width, html.height) == (3, 17) + assert "width=3 height=17" in html.get_iframe() + assert "width=33 height=37" in html.get_iframe(33, 37) + if not LXML_INSTALLED: + return + root = etree.HTML(html.html.encode('utf-8'), + parser=etree.HTMLParser(huge_tree=True)) + head = root.find('head') + assert len(head.findall('script')) == 5 + body = root.find('body') + div = body.find('div') + assert ('id', plot_div_id) in div.items() + if not check_selects: + return + selects = body.findall('select') + assert len(selects) == 3 + hemi = selects[0] + assert ('id', 'select-hemisphere') in hemi.items() + assert len(hemi.findall('option')) == 2 + kind = selects[1] + assert ('id', 'select-kind') in kind.items() + assert len(kind.findall('option')) == 2 + view = selects[2] + assert ('id', 'select-view') in view.items() + assert len(view.findall('option')) == 7 + + +def _open_mock(f): + print('opened {}'.format(f)) + + +def _check_open_in_browser(html): + wb_open = webbrowser.open + webbrowser.open = _open_mock + try: + html.open_in_browser(temp_file_lifetime=None) + temp_file = html._temp_file + assert html._temp_file is not None + assert os.path.isfile(temp_file) + html.remove_temp_file() + assert html._temp_file is None + assert not os.path.isfile(temp_file) + html.remove_temp_file() + html._temp_file = 'aaaaaaaaaaaaaaaaaaaaaa' + html.remove_temp_file() + finally: + webbrowser.open = wb_open + try: + os.remove(temp_file) + except Exception: + pass + + +def test_temp_file_removing(): + html = js_plotting_utils.HTMLDocument('hello') + wb_open = webbrowser.open + webbrowser.open = _open_mock + try: + html.open_in_browser(temp_file_lifetime=.5) + assert os.path.isfile(html._temp_file) + time.sleep(1.5) + assert not os.path.isfile(html._temp_file) + html.open_in_browser(temp_file_lifetime=None) + assert os.path.isfile(html._temp_file) + time.sleep(1.5) + assert os.path.isfile(html._temp_file) + finally: + webbrowser.open = wb_open + try: + os.remove(html._temp_file) + except Exception: + pass + + +def _open_views(): + return [js_plotting_utils.HTMLDocument('') for i in range(12)] + + +def _open_one_view(): + for i in range(12): + v = js_plotting_utils.HTMLDocument('') + return v + + +def test_open_view_warning(): + # opening many views (without deleting the SurfaceView objects) + # should raise a warning about memory usage + assert_warns(UserWarning, _open_views) + assert_no_warnings(_open_one_view) + + +def test_to_color_strings(): + colors = [[0, 0, 1], [1, 0, 0], [.5, .5, .5]] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] + + colors = [[0, 0, 1, 1], [1, 0, 0, 1], [.5, .5, .5, 0]] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] + + colors = ['#0000ff', '#ff0000', '#7f7f7f'] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] + + colors = [[0, 0, 1, 1], [1, 0, 0, 1], [.5, .5, .5, 0]] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] + + colors = ['r', 'green', 'black', 'white'] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#ff0000', '#008000', '#000000', '#ffffff'] + + if matplotlib.__version__ < '2': + return + + colors = ['#0000ffff', '#ff0000ab', '#7f7f7f00'] + as_str = js_plotting_utils.to_color_strings(colors) + assert as_str == ['#0000ff', '#ff0000', '#7f7f7f'] diff --git a/nilearn/plotting/tests/test_surf_plotting.py b/nilearn/plotting/tests/test_surf_plotting.py index ca41672aa3..55d10725a3 100644 --- a/nilearn/plotting/tests/test_surf_plotting.py +++ b/nilearn/plotting/tests/test_surf_plotting.py @@ -16,11 +16,6 @@ def test_plot_surf(): - # Axes3DSubplot has no attribute 'plot_trisurf' for older versions of - # matplotlib - if LooseVersion(matplotlib.__version__) <= LooseVersion('1.3.1'): - raise SkipTest - mesh = _generate_surf() rng = np.random.RandomState(0) bg = rng.randn(mesh[0].shape[0], ) @@ -38,15 +33,14 @@ def test_plot_surf(): plot_surf(mesh, bg_map=bg, view='medial') plot_surf(mesh, bg_map=bg, hemi='right', view='medial') + # Plot with colorbar + plot_surf(mesh, bg_map=bg, colorbar=True) + # Save execution time and memory plt.close() def test_plot_surf_error(): - # Axes3DSubplot has no attribute 'plot_trisurf' for older versions of - # matplotlib - if LooseVersion(matplotlib.__version__) <= LooseVersion('1.3.1'): - raise SkipTest mesh = _generate_surf() rng = np.random.RandomState(0) @@ -74,11 +68,6 @@ def test_plot_surf_error(): def test_plot_surf_stat_map(): - # Axes3DSubplot has no attribute 'plot_trisurf' for older versions of - # matplotlib - if LooseVersion(matplotlib.__version__) <= LooseVersion('1.3.1'): - raise SkipTest - mesh = _generate_surf() rng = np.random.RandomState(0) bg = rng.randn(mesh[0].shape[0], ) @@ -86,38 +75,61 @@ def test_plot_surf_stat_map(): # Plot mesh with stat map plot_surf_stat_map(mesh, stat_map=data) + plot_surf_stat_map(mesh, stat_map=data, colorbar=True) plot_surf_stat_map(mesh, stat_map=data, alpha=1) # Plot mesh with background and stat map plot_surf_stat_map(mesh, stat_map=data, bg_map=bg) plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, - bg_on_stat=True, darkness=0.5) + bg_on_data=True, darkness=0.5) + plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, colorbar=True, + bg_on_data=True, darkness=0.5) # Apply threshold plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, - bg_on_stat=True, darkness=0.5, + bg_on_data=True, darkness=0.5, + threshold=0.3) + plot_surf_stat_map(mesh, stat_map=data, bg_map=bg, colorbar=True, + bg_on_data=True, darkness=0.5, threshold=0.3) # Change vmax plot_surf_stat_map(mesh, stat_map=data, vmax=5) + plot_surf_stat_map(mesh, stat_map=data, vmax=5, colorbar=True) # Change colormap plot_surf_stat_map(mesh, stat_map=data, cmap='cubehelix') + plot_surf_stat_map(mesh, stat_map=data, cmap='cubehelix', colorbar=True) # Plot to axes axes = plt.subplots(ncols=2, subplot_kw={'projection': '3d'})[1] for ax in axes.flatten(): plot_surf_stat_map(mesh, stat_map=data, ax=ax) - + axes = plt.subplots(ncols=2, subplot_kw={'projection': '3d'})[1] + for ax in axes.flatten(): + plot_surf_stat_map(mesh, stat_map=data, ax=ax, colorbar=True) + + fig = plot_surf_stat_map(mesh, stat_map=data, colorbar=False) + assert len(fig.axes) == 1 + # symmetric_cbar + fig = plot_surf_stat_map( + mesh, stat_map=data, colorbar=True, symmetric_cbar=True) + assert len(fig.axes) == 2 + yticklabels = fig.axes[1].get_yticklabels() + first, last = yticklabels[0].get_text(), yticklabels[-1].get_text() + assert float(first) == - float(last) + # no symmetric_cbar + fig = plot_surf_stat_map( + mesh, stat_map=data, colorbar=True, symmetric_cbar=False) + assert len(fig.axes) == 2 + yticklabels = fig.axes[1].get_yticklabels() + first, last = yticklabels[0].get_text(), yticklabels[-1].get_text() + assert float(first) != - float(last) # Save execution time and memory plt.close() def test_plot_surf_stat_map_error(): - # Axes3DSubplot has no attribute 'plot_trisurf' for older versions of - # matplotlib - if LooseVersion(matplotlib.__version__) <= LooseVersion('1.3.1'): - raise SkipTest mesh = _generate_surf() rng = np.random.RandomState(0) data = 10 * rng.randn(mesh[0].shape[0], ) @@ -140,10 +152,6 @@ def test_plot_surf_stat_map_error(): def test_plot_surf_roi(): - # Axes3DSubplot has no attribute 'plot_trisurf' for older versions of - # matplotlib - if LooseVersion(matplotlib.__version__) <= LooseVersion('1.3.1'): - raise SkipTest mesh = _generate_surf() rng = np.random.RandomState(0) roi1 = rng.randint(0, mesh[0].shape[0], size=5) @@ -152,12 +160,15 @@ def test_plot_surf_roi(): # plot roi plot_surf_roi(mesh, roi_map=roi1) + plot_surf_roi(mesh, roi_map=roi1, colorbar=True) # plot parcellation plot_surf_roi(mesh, roi_map=parcellation) + plot_surf_roi(mesh, roi_map=parcellation, colorbar=True) # plot roi list plot_surf_roi(mesh, roi_map=[roi1, roi2]) + plot_surf_roi(mesh, roi_map=[roi1, roi2], colorbar=True) # plot to axes plot_surf_roi(mesh, roi_map=roi1, ax=None, figure=plt.gcf()) @@ -166,6 +177,9 @@ def test_plot_surf_roi(): with tempfile.NamedTemporaryFile() as tmp_file: plot_surf_roi(mesh, roi_map=roi1, ax=plt.gca(), figure=None, output_file=tmp_file.name) + with tempfile.NamedTemporaryFile() as tmp_file: + plot_surf_roi(mesh, roi_map=roi1, ax=plt.gca(), figure=None, + output_file=tmp_file.name, colorbar=True) # Save execution time and memory plt.close() diff --git a/nilearn/regions/parcellations.py b/nilearn/regions/parcellations.py index 5f208e8916..47e1476aaf 100644 --- a/nilearn/regions/parcellations.py +++ b/nilearn/regions/parcellations.py @@ -277,7 +277,7 @@ def _raw_fit(self, data): kmeans = MiniBatchKMeans(n_clusters=self.n_parcels, init='k-means++', random_state=self.random_state, - verbose=self.verbose) + verbose=max(0, self.verbose - 1)) labels = self._cache(_estimator_fit, func_memory_level=1)(components.T, kmeans) else: diff --git a/nilearn/regions/signal_extraction.py b/nilearn/regions/signal_extraction.py index 46410e3a1d..e543407654 100644 --- a/nilearn/regions/signal_extraction.py +++ b/nilearn/regions/signal_extraction.py @@ -64,6 +64,8 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None, -------- nilearn.regions.signals_to_img_labels nilearn.regions.img_to_signals_maps + nilearn.input_data.NiftiLabelsMasker : Signal extraction on labels images + e.g. clusters """ labels_img = _utils.check_niimg_3d(labels_img) @@ -99,7 +101,8 @@ def img_to_signals_labels(imgs, labels_img, mask_img=None, labels_data[np.logical_not(mask_data)] = background_label data = _safe_get_data(imgs) - signals = np.ndarray((data.shape[-1], len(labels)), order=order) + signals = np.ndarray((data.shape[-1], len(labels)), order=order, + dtype=data.dtype) for n, img in enumerate(np.rollaxis(data, -1)): signals[n] = np.asarray(ndimage.measurements.mean(img, labels=labels_data, @@ -150,6 +153,8 @@ def signals_to_img_labels(signals, labels_img, mask_img=None, -------- nilearn.regions.img_to_signals_labels nilearn.regions.signals_to_img_maps + nilearn.input_data.NiftiLabelsMasker : Signal extraction on labels + images e.g. clusters """ labels_img = _utils.check_niimg_3d(labels_img) @@ -235,6 +240,8 @@ def img_to_signals_maps(imgs, maps_img, mask_img=None): -------- nilearn.regions.img_to_signals_labels nilearn.regions.signals_to_img_maps + nilearn.input_data.NiftiMapsMasker : Signal extraction on probabilistic + maps e.g. ICA """ maps_img = _utils.check_niimg_4d(maps_img) @@ -302,6 +309,7 @@ def signals_to_img_maps(region_signals, maps_img, mask_img=None): -------- nilearn.regions.signals_to_img_labels nilearn.regions.img_to_signals_maps + nilearn.input_data.NiftiMapsMasker """ maps_img = _utils.check_niimg_4d(maps_img) diff --git a/nilearn/regions/tests/test_region_extractor.py b/nilearn/regions/tests/test_region_extractor.py index 5d4de657d3..4687b9a67b 100644 --- a/nilearn/regions/tests/test_region_extractor.py +++ b/nilearn/regions/tests/test_region_extractor.py @@ -11,8 +11,8 @@ from nilearn.regions.region_extractor import (_threshold_maps_ratio, _remove_small_regions) -from nilearn._utils import testing -from nilearn._utils.testing import assert_raises_regex, generate_maps +from nilearn._utils.testing import assert_raises_regex +from nilearn._utils.data_gen import generate_maps, generate_labeled_regions from nilearn._utils.exceptions import DimensionError @@ -202,8 +202,8 @@ def test_error_messages_connected_label_regions(): shape = (13, 11, 12) affine = np.eye(4) n_regions = 2 - labels_img = testing.generate_labeled_regions(shape, affine=affine, - n_regions=n_regions) + labels_img = generate_labeled_regions(shape, affine=affine, + n_regions=n_regions) assert_raises_regex(ValueError, "Expected 'min_size' to be specified as integer.", connected_label_regions, @@ -243,8 +243,8 @@ def test_connected_label_regions(): shape = (13, 11, 12) affine = np.eye(4) n_regions = 9 - labels_img = testing.generate_labeled_regions(shape, affine=affine, - n_regions=n_regions) + labels_img = generate_labeled_regions(shape, affine=affine, + n_regions=n_regions) labels_data = labels_img.get_data() n_labels_wo_reg_ext = len(np.unique(labels_data)) @@ -353,8 +353,8 @@ def test_connected_label_regions(): # Test if labels (or names to regions) given is a string without a list. # Then, we expect it to be split to regions extracted and returned as list. labels_in_str = 'region_a' - labels_img_in_str = testing.generate_labeled_regions(shape, affine=affine, - n_regions=1) + labels_img_in_str = generate_labeled_regions(shape, affine=affine, + n_regions=1) extract_regions, new_labels = connected_label_regions(labels_img_in_str, labels=labels_in_str) assert_true(isinstance(new_labels, list)) diff --git a/nilearn/regions/tests/test_signal_extraction.py b/nilearn/regions/tests/test_signal_extraction.py index 171ae624dc..a8acf50a57 100644 --- a/nilearn/regions/tests/test_signal_extraction.py +++ b/nilearn/regions/tests/test_signal_extraction.py @@ -10,10 +10,10 @@ import nibabel from nilearn.regions import signal_extraction -from nilearn._utils.testing import generate_timeseries, generate_regions_ts -from nilearn._utils.testing import generate_labeled_regions, generate_maps -from nilearn._utils.testing import generate_fake_fmri from nilearn._utils.testing import write_tmp_imgs, assert_raises_regex +from nilearn._utils.data_gen import generate_timeseries, generate_regions_ts +from nilearn._utils.data_gen import generate_labeled_regions, generate_maps +from nilearn._utils.data_gen import generate_fake_fmri from nilearn._utils.exceptions import DimensionError _TEST_DIM_ERROR_MSG = ("Input data has incompatible dimensionality: " diff --git a/nilearn/surface/__init__.py b/nilearn/surface/__init__.py index 50e66acc53..5d0f188ddb 100644 --- a/nilearn/surface/__init__.py +++ b/nilearn/surface/__init__.py @@ -2,6 +2,8 @@ Functions for surface manipulation. """ -from .surface import vol_to_surf, load_surf_data, load_surf_mesh +from .surface import (vol_to_surf, load_surf_data, + load_surf_mesh, check_mesh_and_data) -__all__ = ['vol_to_surf', 'load_surf_data', 'load_surf_mesh'] +__all__ = ['vol_to_surf', 'load_surf_data', 'load_surf_mesh', + 'check_mesh_and_data'] diff --git a/nilearn/surface/surface.py b/nilearn/surface/surface.py index 73f7b51259..00c1f795d2 100644 --- a/nilearn/surface/surface.py +++ b/nilearn/surface/surface.py @@ -3,6 +3,8 @@ """ import os import warnings +import gzip +from distutils.version import LooseVersion import numpy as np from scipy import sparse, interpolate @@ -512,6 +514,44 @@ def vol_to_surf(img, surf_mesh, return texture.T +def _load_surf_files_gifti_gzip(surf_file): + """Load surface data Gifti files which are gzipped. This + function is used by load_surf_mesh and load_surf_data for + extracting gzipped files. + + Part of the code can be removed while bumping nibabel 2.0.2 + """ + with gzip.open(surf_file) as f: + as_bytes = f.read() + if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'): + parser = gifti.GiftiImage.parser() + parser.parse(as_bytes) + gifti_img = parser.img + else: + from nibabel.gifti.parse_gifti_fast import ParserCreate, Outputter + parser = ParserCreate() + parser.buffer_text = True + out = Outputter() + parser.StartElementHandler = out.StartElementHandler + parser.EndElementHandler = out.EndElementHandler + parser.CharacterDataHandler = out.CharacterDataHandler + parser.Parse(as_bytes) + gifti_img = out.img + return gifti_img + + +def _gifti_img_to_data(gifti_img): + """Load surface image e.g. sulcal depth or statistical map in + nibabel.gifti.GiftiImage to data + + Used by load_surf_data function in common to surface sulcal data + acceptable to .gii or .gii.gz + """ + if not gifti_img.darrays: + raise ValueError('Gifti must contain at least one data array') + return np.asarray([arr.data for arr in gifti_img.darrays]).T.squeeze() + + # function to figure out datatype and load data def load_surf_data(surf_data): """Loading data to be represented on a surface mesh. @@ -520,7 +560,7 @@ def load_surf_data(surf_data): ---------- surf_data : str or numpy.ndarray Either a file containing surface data (valid format are .gii, - .mgz, .nii, .nii.gz, or Freesurfer specific files such as + .gii.gz, .mgz, .nii, .nii.gz, or Freesurfer specific files such as .thickness, .curv, .sulc, .annot, .label) or a Numpy array containing surface data. Returns @@ -541,20 +581,20 @@ def load_surf_data(surf_data): elif surf_data.endswith('label'): data = nibabel.freesurfer.io.read_label(surf_data) elif surf_data.endswith('gii'): - gii = gifti.read(surf_data) - try: - data = np.zeros((len(gii.darrays[0].data), len(gii.darrays))) - for arr in range(len(gii.darrays)): - data[:, arr] = gii.darrays[arr].data - data = np.squeeze(data) - except IndexError: - raise ValueError('Gifti must contain at least one data array') + if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'): + gii = nibabel.load(surf_data) + else: + gii = gifti.read(surf_data) + data = _gifti_img_to_data(gii) + elif surf_data.endswith('gii.gz'): + gii = _load_surf_files_gifti_gzip(surf_data) + data = _gifti_img_to_data(gii) else: raise ValueError(('The input type is not recognized. %r was given ' 'while valid inputs are a Numpy array or one of ' - 'the following file formats: .gii, .mgz, .nii, ' - '.nii.gz, Freesurfer specific files such as ' - '.curv, .sulc, .thickness, .annot, ' + 'the following file formats: .gii, .gii.gz, ' + '.mgz, .nii, .nii.gz, Freesurfer specific files ' + 'such as .curv, .sulc, .thickness, .annot, ' '.label') % surf_data) # if the input is a numpy array elif isinstance(surf_data, np.ndarray): @@ -562,12 +602,48 @@ def load_surf_data(surf_data): else: raise ValueError('The input type is not recognized. ' 'Valid inputs are a Numpy array or one of the ' - 'following file formats: .gii, .mgz, .nii, .nii.gz, ' - 'Freesurfer specific files such as .curv, .sulc, ' - '.thickness, .annot, .label') + 'following file formats: .gii, .gii.gz, .mgz, .nii, ' + '.nii.gz, Freesurfer specific files such as .curv, ' + '.sulc, .thickness, .annot, .label') return data +def _gifti_img_to_mesh(gifti_img): + """Load surface image in nibabel.gifti.GiftiImage to data + + Used by load_surf_mesh function in common to surface mesh + acceptable to .gii or .gii.gz + """ + if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'): + try: + coords = gifti_img.get_arrays_from_intent( + nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data + except IndexError: + raise ValueError('Gifti file needs to contain a data array ' + 'with intent NIFTI_INTENT_POINTSET') + try: + faces = gifti_img.get_arrays_from_intent( + nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data + except IndexError: + raise ValueError('Gifti file needs to contain a data array ' + 'with intent NIFTI_INTENT_TRIANGLE') + else: + try: + coords = gifti_img.getArraysFromIntent( + nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data + except IndexError: + raise ValueError('Gifti file needs to contain a data array ' + 'with intent NIFTI_INTENT_POINTSET') + try: + faces = gifti_img.getArraysFromIntent( + nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data + except IndexError: + raise ValueError('Gifti file needs to contain a data array ' + 'with intent NIFTI_INTENT_TRIANGLE') + + return coords, faces + + # function to figure out datatype and load data def load_surf_mesh(surf_mesh): """Loading a surface mesh geometry @@ -576,8 +652,8 @@ def load_surf_mesh(surf_mesh): ---------- surf_mesh : str or numpy.ndarray Either a file containing surface mesh geometry (valid formats - are .gii or Freesurfer specific files such as .orig, .pial, - .sphere, .white, .inflated) or a list of two Numpy arrays, + are .gii .gii.gz or Freesurfer specific files such as .orig, .pial, + .sphere, .white, .inflated) or a list or tuple of two Numpy arrays, the first containing the x-y-z coordinates of the mesh vertices, the second containing the indices (into coords) of the mesh faces. @@ -595,45 +671,51 @@ def load_surf_mesh(surf_mesh): surf_mesh.endswith('inflated')): coords, faces = nibabel.freesurfer.io.read_geometry(surf_mesh) elif surf_mesh.endswith('gii'): - try: - coords = gifti.read(surf_mesh).getArraysFromIntent( - nibabel.nifti1.intent_codes[ - 'NIFTI_INTENT_POINTSET'])[0].data - except IndexError: - raise ValueError('Gifti file needs to contain a data array ' - 'with intent NIFTI_INTENT_POINTSET') - try: - faces = gifti.read(surf_mesh).getArraysFromIntent( - nibabel.nifti1.intent_codes[ - 'NIFTI_INTENT_TRIANGLE'])[0].data - except IndexError: - raise ValueError('Gifti file needs to contain a data array ' - 'with intent NIFTI_INTENT_TRIANGLE') + if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'): + gifti_img = nibabel.load(surf_mesh) + else: + gifti_img = gifti.read(surf_mesh) + coords, faces = _gifti_img_to_mesh(gifti_img) + elif surf_mesh.endswith('.gii.gz'): + gifti_img = _load_surf_files_gifti_gzip(surf_mesh) + coords, faces = _gifti_img_to_mesh(gifti_img) else: raise ValueError(('The input type is not recognized. %r was given ' 'while valid inputs are one of the following ' - 'file formats: .gii, Freesurfer specific files ' - 'such as .orig, .pial, .sphere, .white, ' + 'file formats: .gii, .gii.gz, Freesurfer specific' + ' files such as .orig, .pial, .sphere, .white, ' '.inflated or a list containing two Numpy ' 'arrays [vertex coordinates, face indices]' ) % surf_mesh) - elif isinstance(surf_mesh, list): - if len(surf_mesh) == 2: - coords, faces = surf_mesh[0], surf_mesh[1] - else: - raise ValueError(('If a list is given as input, it must have ' - 'two elements, the first is a Numpy array ' - 'containing the x-y-z coordinates of the mesh ' - 'vertices, the second is a Numpy array ' - 'containing the indices (into coords) of the ' - 'mesh faces. The input was a list with ' + elif isinstance(surf_mesh, (list, tuple)): + try: + coords, faces = surf_mesh + except Exception: + raise ValueError(('If a list or tuple is given as input, ' + 'it must have two elements, the first is ' + 'a Numpy array containing the x-y-z coordinates ' + 'of the mesh vertices, the second is a Numpy ' + 'array containing the indices (into coords) of ' + 'the mesh faces. The input was a list with ' '%r elements.') % len(surf_mesh)) else: raise ValueError('The input type is not recognized. ' 'Valid inputs are one of the following file ' - 'formats: .gii, Freesurfer specific files such as ' - '.orig, .pial, .sphere, .white, .inflated ' + 'formats: .gii, .gii.gz, Freesurfer specific files ' + 'such as .orig, .pial, .sphere, .white, .inflated ' 'or a list containing two Numpy arrays ' '[vertex coordinates, face indices]') return [coords, faces] + + +def check_mesh_and_data(mesh, data): + """Load surface mesh and data, check that they have compatible shapes.""" + mesh = load_surf_mesh(mesh) + nodes, faces = mesh + data = load_surf_data(data) + if len(data) != len(nodes): + raise ValueError( + 'Mismatch between number of nodes in mesh ({}) and ' + 'size of surface data ({})'.format(len(nodes), len(data))) + return mesh, data diff --git a/nilearn/surface/tests/test_surface.py b/nilearn/surface/tests/test_surface.py index 402b298680..d8bf0ae57e 100644 --- a/nilearn/surface/tests/test_surface.py +++ b/nilearn/surface/tests/test_surface.py @@ -25,6 +25,8 @@ from nilearn.image.tests.test_resampling import rotation from nilearn.surface import surface from nilearn.surface import load_surf_data, load_surf_mesh, vol_to_surf +from nilearn.surface.surface import (_gifti_img_to_mesh, + _load_surf_files_gifti_gzip) currdir = os.path.dirname(os.path.abspath(__file__)) datadir = os.path.join(currdir, 'data') @@ -81,6 +83,23 @@ def test_load_surf_data_file_nii_gii(): os.remove(filename_niigz) +def test_load_surf_data_gii_gz(): + # Test the loader `load_surf_data` with gzipped fsaverage5 files + + # surface data + fsaverage = datasets.fetch_surf_fsaverage().sulc_left + gii = _load_surf_files_gifti_gzip(fsaverage) + assert_true(isinstance(gii, gifti.GiftiImage)) + + data = load_surf_data(fsaverage) + assert_true(isinstance(data, np.ndarray)) + + # surface mesh + fsaverage = datasets.fetch_surf_fsaverage().pial_left + gii = _load_surf_files_gifti_gzip(fsaverage) + assert_true(isinstance(gii, gifti.GiftiImage)) + + def test_load_surf_data_file_freesurfer(): # test loading of fake data from sulc and thickness files # using load_surf_data. @@ -150,6 +169,30 @@ def test_load_surf_mesh_list(): del mesh +def test_gifti_img_to_mesh(): + mesh = _generate_surf() + + coord_array = gifti.GiftiDataArray(data=mesh[0]) + coord_array.intent = nb.nifti1.intent_codes['NIFTI_INTENT_POINTSET'] + + face_array = gifti.GiftiDataArray(data=mesh[1]) + face_array.intent = nb.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'] + + gii = gifti.GiftiImage(darrays=[coord_array, face_array]) + coords, faces = _gifti_img_to_mesh(gii) + assert_array_equal(coords, mesh[0]) + assert_array_equal(faces, mesh[1]) + + +def test_load_surf_mesh_file_gii_gz(): + # Test the loader `load_surf_mesh` with gzipped fsaverage5 files + + fsaverage = datasets.fetch_surf_fsaverage().pial_left + coords, faces = load_surf_mesh(fsaverage) + assert_true(isinstance(coords, np.ndarray)) + assert_true(isinstance(faces, np.ndarray)) + + def test_load_surf_mesh_file_gii(): # Test the loader `load_surf_mesh` @@ -259,13 +302,10 @@ def test_load_uniform_ball_cloud(): assert_equal(len(w), 0) assert_warns(surface.EfficiencyWarning, surface._load_uniform_ball_cloud, n_points=3) - # before 0.18 k-means was computed differently, so the result - # would differ from the stored values, computed with version 0.2 - if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'): - for n_points in [3, 10, 20]: - computed = surface._uniform_ball_cloud(n_points) - loaded = surface._load_uniform_ball_cloud(n_points) - assert_array_almost_equal(computed, loaded) + for n_points in [3, 10, 20]: + computed = surface._uniform_ball_cloud(n_points) + loaded = surface._load_uniform_ball_cloud(n_points) + assert_array_almost_equal(computed, loaded) def test_sample_locations(): @@ -406,3 +446,14 @@ def _check_vol_to_surf_results(img, mesh): nodes, _ = surface.load_surf_mesh(mesh) assert_array_equal(proj_4d.shape, [nodes.shape[0], 2]) assert_array_almost_equal(proj_4d[:, 0], proj_1, 3) + + +def test_check_mesh_and_data(): + mesh = _generate_surf() + data = mesh[0][:, 0] + m, d = surface.check_mesh_and_data(mesh, data) + assert (m[0] == mesh[0]).all() + assert (m[1] == mesh[1]).all() + assert (d == data).all() + data = mesh[0][::2, 0] + assert_raises(ValueError, surface.check_mesh_and_data, mesh, data) diff --git a/nilearn/tests/test_init.py b/nilearn/tests/test_init.py new file mode 100644 index 0000000000..bf3c5a825c --- /dev/null +++ b/nilearn/tests/test_init.py @@ -0,0 +1,12 @@ +import sys +import warnings + +from nose.tools import assert_true +from nilearn import _py2_deprecation_warning + + +def test_py2_deprecation_warning(): + if sys.version_info.major == 2: + with warnings.catch_warnings(record=True) as raised_warnings: + _py2_deprecation_warning() + assert_true(raised_warnings[0].category is DeprecationWarning) diff --git a/nilearn/tests/test_masking.py b/nilearn/tests/test_masking.py index d72fb4ccc5..0ef8ba05d0 100644 --- a/nilearn/tests/test_masking.py +++ b/nilearn/tests/test_masking.py @@ -6,16 +6,23 @@ import numpy as np from numpy.testing import assert_array_equal -from nose.tools import assert_true, assert_false, assert_equal, \ - assert_raises +from nose.tools import ( + assert_true, + assert_false, + assert_equal, + assert_raises, + ) from nibabel import Nifti1Image from nilearn import masking from nilearn.masking import (compute_epi_mask, compute_multi_epi_mask, - compute_background_mask, unmask, _unmask_3d, - _unmask_4d, intersect_masks, MaskWarning) + compute_background_mask, compute_gray_matter_mask, + compute_multi_gray_matter_mask, + unmask, _unmask_3d, _unmask_4d, intersect_masks, + MaskWarning, _extrapolate_out_mask) from nilearn._utils.testing import (write_tmp_imgs, assert_raises_regex) +from nilearn._utils.testing import assert_warns from nilearn._utils.exceptions import DimensionError from nilearn.input_data import NiftiMasker @@ -27,7 +34,7 @@ "Expected dimension is 3D and you provided " "a %s image") - + def test_compute_epi_mask(): mean_image = np.ones((9, 9, 3)) mean_image[3:-2, 3:-2, :] = 10 @@ -97,6 +104,28 @@ def test_compute_background_mask(): assert_true(isinstance(w[0].message, masking.MaskWarning)) +def test_compute_gray_matter_mask(): + image = Nifti1Image(np.ones((9, 9, 9)), np.eye(4)) + + mask = compute_gray_matter_mask(image, threshold=-1) + mask1 = np.zeros((9, 9, 9)) + mask1[2:-2, 2:-2, 2:-2] = 1 + + np.testing.assert_array_equal(mask1, mask.get_data()) + + # Check that we get a useful warning for empty masks + assert_warns(masking.MaskWarning, compute_gray_matter_mask, image, threshold=1) + + # Check that masks obtained from same FOV are the same + img1 = Nifti1Image(np.full((9, 9, 9), np.random.rand()), np.eye(4)) + img2 = Nifti1Image(np.full((9, 9, 9), np.random.rand()), np.eye(4)) + + mask_img1 = compute_gray_matter_mask(img1) + mask_img2 = compute_gray_matter_mask(img2) + np.testing.assert_array_equal(mask_img1.get_data(), + mask_img2.get_data()) + + def test_apply_mask(): """ Test smoothing of timeseries extraction """ @@ -384,6 +413,26 @@ def test_compute_multi_epi_mask(): assert_array_equal(mask_ab, mask_ab_.get_data()) +def test_compute_multi_gray_matter_mask(): + assert_raises(TypeError, compute_multi_gray_matter_mask, []) + + # Check error raised if images with different shapes are given as input + imgs = [Nifti1Image(np.ones((9, 9, 9)), np.eye(4)), + Nifti1Image(np.ones((9, 9, 8)), np.eye(4))] + assert_raises(ValueError, compute_multi_gray_matter_mask, imgs) + + # Check results are the same if affine is the same + imgs1 = [Nifti1Image(np.random.randn(9, 9, 9), np.eye(4)), + Nifti1Image(np.random.randn(9, 9, 9), np.eye(4))] + mask1 = compute_multi_gray_matter_mask(imgs1) + + imgs2 = [Nifti1Image(np.random.randn(9, 9, 9), np.eye(4)), + Nifti1Image(np.random.randn(9, 9, 9), np.eye(4))] + mask2 = compute_multi_gray_matter_mask(imgs2) + + assert_array_equal(mask1.get_data(), mask2.get_data()) + + def test_error_shape(random_state=42, shape=(3, 5, 7, 11)): # open-ended `if .. elif` in masking.unmask @@ -423,3 +472,83 @@ def test_unmask_list(random_state=42): a = unmask(mask_data[mask_data], mask_img) b = unmask(mask_data[mask_data].tolist(), mask_img) # shouldn't crash assert_array_equal(a.get_data(), b.get_data()) + + +def test__extrapolate_out_mask(): + # Input data: + initial_data = np.zeros((5,5,5)) + initial_data[1,2,2] = 1 + initial_data[2,1,2] = 2 + initial_data[2,2,1] = 3 + initial_data[3,2,2] = 4 + initial_data[2,3,2] = 5 + initial_data[2,2,3] = 6 + initial_mask = initial_data.copy() != 0 + + # Expected result + target_data = np.array([[[0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 1. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ]], + + [[0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 1.5, 0. , 0. ], + [0. , 2. , 1. , 3.5, 0. ], + [0. , 0. , 3. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ]], + + [[0. , 0. , 2. , 0. , 0. ], + [0. , 2.5, 2. , 4. , 0. ], + [3. , 3. , 3.5, 6. , 6. ], + [0. , 4. , 5. , 5.5, 0. ], + [0. , 0. , 5. , 0. , 0. ]], + + [[0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 3. , 0. , 0. ], + [0. , 3.5, 4. , 5. , 0. ], + [0. , 0. , 4.5, 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ]], + + [[0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 4. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. ]]]) + target_mask = np.array([[[False, False, False, False, False], + [False, False, False, False, False], + [False, False, True, False, False], + [False, False, False, False, False], + [False, False, False, False, False]], + + [[False, False, False, False, False], + [False, False, True, False, False], + [False, True, True, True, False], + [False, False, True, False, False], + [False, False, False, False, False]], + + [[False, False, True, False, False], + [False, True, True, True, False], + [ True, True, True, True, True], + [False, True, True, True, False], + [False, False, True, False, False]], + + [[False, False, False, False, False], + [False, False, True, False, False], + [False, True, True, True, False], + [False, False, True, False, False], + [False, False, False, False, False]], + + [[False, False, False, False, False], + [False, False, False, False, False], + [False, False, True, False, False], + [False, False, False, False, False], + [False, False, False, False, False]]]) + + + # Test: + extrapolated_data, extrapolated_mask = _extrapolate_out_mask(initial_data, + initial_mask, + iterations=1) + assert_array_equal(extrapolated_data, target_data) + assert_array_equal(extrapolated_mask, target_mask) diff --git a/nilearn/tests/test_ndimage.py b/nilearn/tests/test_ndimage.py index 6f35b2f41c..ec40ac1346 100644 --- a/nilearn/tests/test_ndimage.py +++ b/nilearn/tests/test_ndimage.py @@ -8,7 +8,7 @@ from nilearn._utils.ndimage import (largest_connected_component, _peak_local_max) -from nilearn._utils import testing +from nilearn._utils import data_gen def test_largest_cc(): @@ -30,8 +30,8 @@ def test_largest_cc(): np.testing.assert_equal(a, largest_connected_component(b_change_type)) # Tests for correct errors, when an image or string are passed. - img = testing.generate_labeled_regions(shape=(10, 11, 12), - n_regions=2) + img = data_gen.generate_labeled_regions(shape=(10, 11, 12), + n_regions=2) assert_raises(ValueError, largest_connected_component, img) assert_raises(ValueError, largest_connected_component, "Test String") diff --git a/nilearn/tests/test_signal.py b/nilearn/tests/test_signal.py index ea915b1cb1..7bc140e2e1 100644 --- a/nilearn/tests/test_signal.py +++ b/nilearn/tests/test_signal.py @@ -5,6 +5,8 @@ # License: simplified BSD import os.path +import warnings +from distutils.version import LooseVersion import numpy as np from nose.tools import assert_true, assert_false, assert_raises @@ -123,7 +125,20 @@ def test_butterworth(): # single timeseries data = rand_gen.randn(n_samples) data_original = data.copy() - + ''' + May be only on py3.5: + Bug in scipy 1.1.0 generates an unavoidable FutureWarning. + (More info: https://github.com/scipy/scipy/issues/9086) + The number of warnings generated is overwhelming TravisCI's log limit, + causing it to fail tests. + This hack prevents that and will be removed in future. + ''' + buggy_scipy = (LooseVersion(scipy.__version__) < LooseVersion('1.2') + and LooseVersion(scipy.__version__) > LooseVersion('1.0') + ) + if buggy_scipy: + warnings.simplefilter('ignore') + ''' END HACK ''' out_single = nisignal.butterworth(data, sampling, low_pass=low_pass, high_pass=high_pass, copy=True) diff --git a/nilearn/tests/test_testing.py b/nilearn/tests/test_testing.py index 446c8c62d3..1cf0f2dcfc 100644 --- a/nilearn/tests/test_testing.py +++ b/nilearn/tests/test_testing.py @@ -3,8 +3,9 @@ from nose.tools import assert_equal, assert_raises -from nilearn._utils.testing import generate_fake_fmri, with_memory_profiler +from nilearn._utils.testing import with_memory_profiler from nilearn._utils.testing import assert_memory_less_than, assert_raises_regex +from nilearn._utils.data_gen import generate_fake_fmri def create_object(size): diff --git a/nilearn/version.py b/nilearn/version.py index eca9e122a3..52f21101dd 100644 --- a/nilearn/version.py +++ b/nilearn/version.py @@ -21,7 +21,7 @@ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # -__version__ = '0.4.2' +__version__ = '0.5.0a' _NILEARN_INSTALL_MSG = 'See %s for installation information.' % ( 'http://nilearn.github.io/introduction.html#installation') @@ -30,22 +30,22 @@ # in some meaningful order (more => less 'core'). REQUIRED_MODULE_METADATA = ( ('numpy', { - 'min_version': '1.6.1', + 'min_version': '1.11', 'required_at_installation': True, 'install_info': _NILEARN_INSTALL_MSG}), ('scipy', { - 'min_version': '0.14', + 'min_version': '0.17', 'required_at_installation': True, 'install_info': _NILEARN_INSTALL_MSG}), ('sklearn', { - 'min_version': '0.15', + 'min_version': '0.18', 'required_at_installation': True, 'install_info': _NILEARN_INSTALL_MSG}), ('nibabel', { 'min_version': '2.0.2', 'required_at_installation': False})) -OPTIONAL_MATPLOTLIB_MIN_VERSION = '1.1.1' +OPTIONAL_MATPLOTLIB_MIN_VERSION = '1.5.1' def _import_module_with_version_check( diff --git a/setup.py b/setup.py index 51336b92fd..878e089ddf 100755 --- a/setup.py +++ b/setup.py @@ -80,13 +80,17 @@ def is_installing(): 'Operating System :: MacOS', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', ], packages=find_packages(), package_data={'nilearn.datasets.data': ['*.nii.gz', '*.csv'], + 'nilearn.datasets.data.fsaverage5': ['*.gz'], 'nilearn.surface.data': ['*.csv'], + 'nilearn.plotting.data.js': ['*.js'], + 'nilearn.plotting.data.html': ['*.html'], 'nilearn.plotting.glass_brain_files': ['*.json'], 'nilearn.tests.data': ['*'], 'nilearn.image.tests.data': ['*.mgz'],