diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 8e851b54..00000000 --- a/.flake8 +++ /dev/null @@ -1,4 +0,0 @@ -[flake8] -max-line-length = 88 -extend-ignore = E501 -; extend-ignore = E203, E501, E231, E731, E741 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index 07764a78..00000000 --- a/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -* text eol=lf \ No newline at end of file diff --git a/.github/workflows/CD.yml b/.github/workflows/CD.yml deleted file mode 100644 index 37f6d101..00000000 --- a/.github/workflows/CD.yml +++ /dev/null @@ -1,120 +0,0 @@ -on: - push: -name: release-please -jobs: - continuous-integration: - name: Continuous integration ${{ matrix.os }} python ${{ matrix.python-version }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: ${{ fromJSON(vars.BUILD_OS)}} - python-version: ${{ fromJSON(vars.PYTHON_VERSIONS)}} - steps: - - uses: actions/checkout@v2 - - uses: conda-incubator/setup-miniconda@v2 - with: - python-version: ${{ matrix.python }} - miniforge-variant: Mambaforge - miniforge-version: latest - use-mamba: true - - name: Installing dependencies - shell: bash -l {0} - run: | - conda install -c loop3d --file dependencies.txt -y - - - name: Checking formatting of code - shell: bash -l {0} - run: | - pip install flake8 - # stop the build if there are Python syntax errors or undefined names - flake8 map2loop --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 map2loop --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Building and install - shell: bash -l {0} - run: | - pip install . - documentation-test: - runs-on: ubuntu-latest - #needs: continuous-integration - steps: - - uses: actions/checkout@v2 - - run: | - cp CHANGELOG.md docs/source/CHANGELOG.md - docker build . -t=docs -f docs/Dockerfile - docker run -v $(pwd):/map2loop docs bash map2loop/docs/build_docs.sh - - name: upload artifacts - uses: actions/upload-artifact@v3 - with: - path: docs/build/html - release-please: - runs-on: ubuntu-latest - if: github.ref == 'refs/heads/master' - steps: - - uses: GoogleCloudPlatform/release-please-action@v3 - id: release - with: - release-type: python - package-name: map2loop - include-v-in-tag: false - - outputs: - release_created: ${{ steps.release.outputs.release_created }} - - conda-deploy: - name: Uploading to Loop3d for python ${{ matrix.os }}) - needs: release-please - if: ${{ needs.release-please.outputs.release_created }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: ${{ fromJSON(vars.BUILD_OS)}} - python-version: ${{ fromJSON(vars.PYTHON_VERSIONS)}} - steps: - - uses: conda-incubator/setup-miniconda@v2 - with: - auto-update-conda: true - python-version: ${{ matrix.python-version }} - miniforge-variant: Mambaforge - miniforge-version: latest - use-mamba: true - - - uses: actions/checkout@v2 - - name: update submodules - run: | - git submodule update --init --recursive - - name: Add msbuild to PATH - if: matrix.os == 'windows-latest' - uses: microsoft/setup-msbuild@v1.0.2 - - name: Conda build' - env: - ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }} - shell: bash -l {0} - run: | - conda install conda-build - conda install -c loop3d --file dependencies.txt -y - conda build -c loop3d --no-test --python ${{ matrix.python-version }} --output-folder conda conda - conda install anaconda-client -y - - name: upload windows - env: - ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }} - if: matrix.os == 'windows-latest' - shell: bash -l {0} - run: | - anaconda upload --label main conda/win-64/*.tar.bz2 - - name: upload linux - env: - ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }} - if: matrix.os == 'ubuntu-latest' - shell: bash -l {0} - run: | - anaconda upload --label main conda/linux-64/*.tar.bz2 - - name: upload macosx - env: - ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }} - if: matrix.os == 'macos-latest' - shell: bash -l {0} - run: | - anaconda upload --label main conda/osx-64/*.tar.bz2 diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 64b7b6e0..00000000 --- a/.gitignore +++ /dev/null @@ -1,219 +0,0 @@ - -### M2L stuff ### -/model-test* -.ipynb_checkpoints -notebooks/Testing.ipynb -.vim/ -.vscode/ -_version.py -3/ - -# Created by https://www.toptal.com/developers/gitignore/api/python,visualstudiocode,c++,cmake -# Edit at https://www.toptal.com/developers/gitignore?templates=python,visualstudiocode,c++,cmake - -### C++ ### -# Prerequisites -*.d - -# Compiled Object files -*.slo -*.lo -*.o -*.obj - -# Precompiled Headers -*.gch -*.pch - -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod -*.smod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app - -### CMake ### -CMakeLists.txt.user -CMakeCache.txt -CMakeFiles -CMakeScripts -Testing -cmake_install.cmake -install_manifest.txt -compile_commands.json -CTestTestfile.cmake -_deps - -### CMake Patch ### -# External projects -*-prefix/ - -### Python ### -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -### VisualStudioCode ### -.vscode/* -!.vscode/settings.json -!.vscode/tasks.json -!.vscode/launch.json -!.vscode/extensions.json -*.code-workspace - -### VisualStudioCode Patch ### -# Ignore all local history of files -.history -.vscode/ - -docs/build -docs/source/_autosummary/* -# End of https://www.toptal.com/developers/gitignore/api/python,visualstudiocode,c++,cmake -#ignore automatically generated doc files -docs/source/_autosummary -docs/source/_auto_examples/* -docs/source/_auto_examples -examples/*_tmp -*.loop3d -m2l_data_tmp/* \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index e37fb7df..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,22 +0,0 @@ -# Changelog - -## [3.0.4](https://github.com/Loop3D/map2loop-3/compare/3.0.3...3.0.4) (2024-01-11) - - -### Bug Fixes - -* Issue with conda build test env not working, skipping ([2fffb69](https://github.com/Loop3D/map2loop-3/commit/2fffb696556f48f0620dfbac356971301fb15e89)) - -## [3.0.3](https://github.com/Loop3D/map2loop-3/compare/3.0.2...3.0.3) (2024-01-11) - - -### Bug Fixes - -* Add owslib as dependency for testing ([0439da8](https://github.com/Loop3D/map2loop-3/commit/0439da869856b187defbed631c592a848b3684a1)) - -## [3.0.2](https://github.com/Loop3D/map2loop-3/compare/3.0.1...3.0.2) (2024-01-11) - - -### Bug Fixes - -* Still forcing versioning for release-please ([08e647b](https://github.com/Loop3D/map2loop-3/commit/08e647bac1dded0c807bbb9a3571a741505bd488)) diff --git a/docs/Dockerfile b/Dockerfile similarity index 100% rename from docs/Dockerfile rename to Dockerfile diff --git a/LICENSE b/LICENSE deleted file mode 100644 index af374940..00000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020-2021 The Loop Project - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/docs/Makefile b/Makefile similarity index 100% rename from docs/Makefile rename to Makefile diff --git a/README.md b/README.md deleted file mode 100644 index 67130b65..00000000 --- a/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# Map2Loop 3.0 - -Generate 3D geological model inputs from geographical maps — a high-level implementation and extension of the original map2loop code developed by Prof. Mark Jessell at UWA. To see an example interactive model built with map2loop and LoopStructural, follow this link: - -3D Model from the Hamersley region, Western Australia - -## Install - -You will need some flavour of conda (a python package manager, [see here](https://docs.anaconda.com/anaconda/install/index.html)), as well as Python ≥ 3.8 - -### Run - -To just use map2loop, issue the following - -```bash -conda install -c conda-forge -c loop3d map2loop -y -``` - -### Documentation - -If you can call it that, is available here - -### Development - -If you want to tinker yourself/contribute, clone the source code with - -```bash -git clone https://github.com/Loop3D/map2loop.git -``` - -Or get the source + example notebooks with - -```bash -git clone https://github.com/Loop3D/map2loop.git -git clone --single-branch --branch yohan https://github.com/Loop3D/map2loop-3-notebooks -``` - -Navigate into map2loop, and issue the following to install map2loop and its dependencies. _Note_: The 'develop' flag makes your source changes take effect on saving, so you only need to run this once - -```bash -conda install -c loop3d --file dependencies.txt -pip install . -``` - -## Building with Docker - -Fair warning, we recommend conda to almost everyone. With great software development power comes great environment setup inconvenience. You'll need to download and install the [docker containerisation software](https://docs.docker.com/get-docker/), and the docker and docker-compose CLI. - -### Development - -1. Clone this repo and navigate inside as per above -2. Run the following and click on the Jupyter server forwarded link to access and edit the notebooks - - ```bash - docker-compose up --build - ``` - -3. To hop into a bash shell in a running container, open a terminal and issue - - ```bash - docker ps - ``` - - Find the container name or ID and then run - - ```bash - docker exec -it bash - # Probably -> docker exec -it map2loop_dev_1 bash - ``` - -## Usage - -Our notebooks cover use cases in more detail, but here is an example of processing Loop's South Australia remote geospatial data in just 20 lines of Python. - -First, lets import map2loop and define a bounding box. You can use GIS software to find one or use [Loop's Graphical User Interface](https://loop3d.github.io/downloads.html) for the best experience and complete toolset. Remember what projection your coordinates are in! - -```python -from map2loop.project import Project -from map2loop.m2l_enums import VerboseLevel - -# Note that this region is defined in the EPSG 28354 projection and -# x and y represent easting and northing respectively -bbox_3d = { - 'minx': 250805.1529856466, - 'miny': 6405084.328058686, - 'maxx': 336682.921539395, - 'maxy': 6458336.085975628, - 'base': -3200, - 'top': 1200 -} -``` - -![sa example](docs/Untitled.png?raw=true) - -Then, specify: the state, directory for the output, the bounding box and projection from above - and hit go! That's it. - -```python -proj = Project(use_australian_state_data = "SA", - working_projection = 'EPSG:28354', - bounding_box = bbox_3d, - loop_project_filename = "output.loop3d" - ) - -proj.run_all() -``` - -This is a minimal example and a small part of Loop. - -Our _documentation and other resources outline how to extend map2loop and port to the LoopStructural modelling engine. We are working to incorporate geophysical tools and best provide visualisation and workflow consolidation in the GUI._ - -_Loop is led by Laurent Ailleres (Monash University) with a team of Work Package leaders from:_ - -- _Monash University: Roy Thomson, Lachlan Grose and Robin Armit_ -- _University of Western Australia: Mark Jessell, Jeremie Giraud, Mark Lindsay and Guillaume Pirot_ -- _Geological Survey of Canada: Boyan Brodaric and Eric de Kemp_ - ---- - -### Known Issues and FAQs - -- Developing with docker on Windows means you won't have GPU passthrough and can’t use a discrete graphics card in the container even if you have one. -- If Jupyter links require a token or password, it may mean port 8888 is already in use. To fix, either make docker map to another port on the host ie -p 8889:8888 or stop any other instances on 8888. - -### Links - -[https://loop3d.github.io/](https://loop3d.github.io/) - -[https://github.com/Loop3D/LoopStructural](https://github.com/Loop3D/LoopStructural) diff --git a/docs/build_docs.sh b/build_docs.sh similarity index 100% rename from docs/build_docs.sh rename to build_docs.sh diff --git a/conda/conda-build.py b/conda/conda-build.py deleted file mode 100644 index d351259c..00000000 --- a/conda/conda-build.py +++ /dev/null @@ -1,85 +0,0 @@ -from subprocess import Popen, PIPE -from concurrent.futures import ThreadPoolExecutor -import tqdm -import sys -import os - - -def process(python_version): - try: - - command = 'conda build --py {} .'.format( - python_version) - p = Popen( - command.split(), stdin=PIPE, stdout=PIPE, stderr=PIPE) - except Exception as e: - return e - - output, err = p.communicate() - return str(err.decode('ascii')) - - -def build(packages): - for dirname in packages: - if not os.path.exists(dirname): - sys.exit( - "Make sure each package directory is at the same level as this script.") - - python_versions = [3.8, 3.9, 3.10, 3.11] - print("Building " + " ".join(packages), - "for python versions {}".format(python_versions)) - - with ThreadPoolExecutor(15) as executor: - output = list( - tqdm.tqdm(executor.map(process, python_versions), total=len(packages))) - - for message in output: - lines = message.split('\\n') - for line in lines: - print(line) - - -def upload(root_buildpath): - - # Create OSX packages from linux versions - output = '' - command = "conda convert -p osx-64 {}*tar.bz2 -o {} -f".format( - root_buildpath + 'linux-64/', root_buildpath) - p = Popen( - command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE) - output += p.communicate()[0].decode('ascii') - output += p.communicate()[1].decode('ascii') - command = "conda convert -p win-64 {}*tar.bz2 -o {} -f".format( - root_buildpath + 'linux-64/', root_buildpath) - p = Popen( - command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE) - output += p.communicate()[0].decode('ascii') - output += p.communicate()[1].decode('ascii') - - command = "anaconda upload {} {} {} --force".format( - root_buildpath + 'linux-64/*.tar.bz2', - root_buildpath + 'osx-64/*.tar.bz2', - root_buildpath + 'win-64/*tar.bz2', - ) - p = Popen( - command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE) - output += p.communicate()[0].decode('ascii') - output += p.communicate()[1].decode('ascii') - print(output) - - -if __name__ == "__main__": - - packages = [ - '.' - ] - - command = 'which conda' - p = Popen( - command.split(), stdin=PIPE, stdout=PIPE, stderr=PIPE) - std, err = p.communicate() - root_buildpath = "/".join(std.decode('ascii').split('/') - [:-2]) + '/conda-bld/' - - build(packages) - # upload(root_buildpath) diff --git a/conda/conda_build_config.yaml b/conda/conda_build_config.yaml deleted file mode 100644 index a75aff37..00000000 --- a/conda/conda_build_config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -python: - - 3.8 - - 3.9 - - 3.10 - - 3.11 diff --git a/conda/meta.yaml b/conda/meta.yaml deleted file mode 100644 index 684c1fd9..00000000 --- a/conda/meta.yaml +++ /dev/null @@ -1,58 +0,0 @@ -{% set name = "map2loop" %} - - -package: - name: "{{ name|lower }}" -# version: "{{ environ.get('GIT_DESCRIBE_TAG', '') }}" - version: "3.0.1" - -source: - git_url: https://github.com/Loop3D/map2loop - -build: - number: 0 - script: "{{ PYTHON }} -m pip install ." - -requirements: - host: - - pip - - python - - setuptools - - numpy - - geopandas - - map2model - - hjson - - loopprojectfile - - beartype - - owslib - run: - - python - - numpy - - pandas - - geopandas - - map2model - - hjson - - loopprojectfile - - beartype - - owslib - -test: - imports: - - map2loop - - geopandas - - map2model - - hjson - - loopprojectfile - - beartype - - owslib - -about: - home: "https://github.com/Loop3D/map2loop" - license: MIT - license_family: MIT - license_file: ../LICENSE - summary: "Generate 3D model data using 2D maps." - -extra: - recipe-maintainers: - - roythomson diff --git a/dependencies.txt b/dependencies.txt deleted file mode 100644 index 2aeb97cc..00000000 --- a/dependencies.txt +++ /dev/null @@ -1,11 +0,0 @@ -numpy -pandas -geopandas=0.12.2 -shapely -tqdm -networkx -owslib -map2model -hjson -loopprojectfile -beartype diff --git a/docs/docker-compose.yml b/docker-compose.yml similarity index 100% rename from docs/docker-compose.yml rename to docker-compose.yml diff --git a/examples/README.rst b/examples/README.rst deleted file mode 100644 index 7d4d0aae..00000000 --- a/examples/README.rst +++ /dev/null @@ -1,2 +0,0 @@ -Examples -======== \ No newline at end of file diff --git a/examples/plot_hamersley.py b/examples/plot_hamersley.py deleted file mode 100644 index 619fb844..00000000 --- a/examples/plot_hamersley.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -============================ -Hamersley, Western Australia -============================ -""" -import time -import os -from map2loop.project import Project -from map2loop.m2l_enums import VerboseLevel, Datatype -from map2loop.sorter import ( - SorterAlpha, - SorterAgeBased, - SorterUseHint, - SorterUseNetworkX, - SorterMaximiseContacts, - SorterObservationProjections, -) -from map2loop.sampler import SamplerSpacing -from datetime import datetime - -#################################################################### -# Set the region of interest for the project -# ------------------------------------------- -# Define the bounding box for the ROI - -bbox_3d = { - "minx": 515687.31005864, - "miny": 7493446.76593407, - "maxx": 562666.860106543, - "maxy": 7521273.57407786, - "base": -3200, - "top": 3000, -} - -# Specify minimum details (which Australian state, projection and bounding box -# and output file) -loop_project_filename = "wa_output.loop3d" -proj = Project( - use_australian_state_data="WA", - working_projection="EPSG:28350", - bounding_box=bbox_3d, - verbose_level=VerboseLevel.NONE, - loop_project_filename=loop_project_filename, -) - -# Set the distance between sample points for arial and linestring geometry -proj.set_sampler(Datatype.GEOLOGY, SamplerSpacing(200.0)) -proj.set_sampler(Datatype.FAULT, SamplerSpacing(200.0)) - -# Choose which stratigraphic sorter to use or run_all with "take_best" flag to run them all -proj.set_sorter(SorterAlpha()) -# proj.set_sorter(SorterAgeBased()) -# proj.set_sorter(SorterUseHint()) -# proj.set_sorter(SorterUseNetworkx()) -# proj.set_sorter(SorterMaximiseContacts()) -# proj.set_sorter(SorterObservationProjections()) -proj.run_all(take_best=True) - -#################################################################### -# Visualise the map2loop results -# ------------------------------------------- - -proj.map_data.basal_contacts.plot() diff --git a/docs/make.bat b/make.bat similarity index 100% rename from docs/make.bat rename to make.bat diff --git a/map2loop/__init__.py b/map2loop/__init__.py deleted file mode 100644 index 4fd825cc..00000000 --- a/map2loop/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .project import Project - -__all__ = ["Project"] -from .version import __version__ diff --git a/map2loop/aus_state_urls.py b/map2loop/aus_state_urls.py deleted file mode 100644 index 3c3b10f4..00000000 --- a/map2loop/aus_state_urls.py +++ /dev/null @@ -1,72 +0,0 @@ - -class AustraliaStateUrls: - aus_geology_urls = { - "WA": "http://13.211.217.129:8080/geoserver/loop/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=loop:500k_geol_28350&bbox={BBOX_STR}&srs=EPSG:28350&outputFormat=shape-zip", - "QLD": "http://13.211.217.129:8080/geoserver/QLD/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=QLD:qld_geol_asud&bbox={BBOX_STR}&srsName=EPSG:28355&outputFormat=shape-zip", - "SA": "http://13.211.217.129:8080/geoserver/SA/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=SA:2m_surface_geology_28354_relage&bbox={BBOX_STR}&srs=EPSG:28354&outputFormat=shape-zip", - "VIC": "http://13.211.217.129:8080/geoserver/VIC/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=VIC:geolunit_250k_py_28355&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "NSW": "http://13.211.217.129:8080/geoserver/NSW/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=NSW:ge_rockunit_lao2&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "ACT": "", - "TAS": "http://13.211.217.129:8080/geoserver/TAS/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=TAS:geol_poly_250_28355&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "NT": "" - } - aus_structure_urls = { - "WA": "http://13.211.217.129:8080/geoserver/loop/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=loop:waroxi_wa_28350&bbox={BBOX_STR}&srs=EPSG:28350&outputFormat=shape-zip", - "QLD": "http://13.211.217.129:8080/geoserver/QLD/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=QLD:outcrops_28355&bbox={BBOX_STR}&srsName=EPSG:28355&outputFormat=shape-zip", - "SA": "http://13.211.217.129:8080/geoserver/SA/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=SA:sth_flinders_28354&bbox={BBOX_STR}&srs=EPSG:28354&outputFormat=shape-zip", - "VIC": "http://13.211.217.129:8080/geoserver/VIC/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=VIC:struc_28355&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "NSW": "http://13.211.217.129:8080/geoserver/NSW/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=NSW:lao_struct_pt&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "ACT": "", - "TAS": "http://13.211.217.129:8080/geoserver/TAS/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=TAS:geol_struc_250_28355&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "NT": "" - } - aus_fault_urls = { - "WA": "http://13.211.217.129:8080/geoserver/loop/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=loop:500k_faults_28350&bbox={BBOX_STR}&srs=EPSG:28350&outputFormat=shape-zip", - "QLD": "http://13.211.217.129:8080/geoserver/QLD/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=QLD:qld_faults_folds_28355&bbox={BBOX_STR}&srsName=EPSG:28355&outputFormat=shape-zip", - "SA": "http://13.211.217.129:8080/geoserver/SA/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=SA:2m_linear_structures&bbox={BBOX_STR}&srs=EPSG:28354&outputFormat=shape-zip", - "VIC": "http://13.211.217.129:8080/geoserver/VIC/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=VIC:geolstructure_250k_ln_28355&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "NSW": "http://13.211.217.129:8080/geoserver/NSW/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=NSW:faults_joined_left_contains_drop_dups&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "ACT": "", - "TAS": "http://13.211.217.129:8080/geoserver/TAS/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=TAS:geol_line_250_28355&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "NT": "" - } - aus_fold_urls = { - "WA": "http://13.211.217.129:8080/geoserver/loop/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=loop:500k_faults_28350&bbox={BBOX_STR}&srs=EPSG:28350&outputFormat=shape-zip", - "QLD": "http://13.211.217.129:8080/geoserver/QLD/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=QLD:qld_faults_folds_28355&bbox={BBOX_STR}&srsName=EPSG:28355&outputFormat=shape-zip", - "SA": "http://13.211.217.129:8080/geoserver/SA/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=SA:2m_linear_structures&bbox={BBOX_STR}&srs=EPSG:28354&outputFormat=shape-zip", - "VIC": "http://13.211.217.129:8080/geoserver/VIC/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=VIC:geolstructure_250k_ln_28355&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "NSW": "http://13.211.217.129:8080/geoserver/NSW/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=NSW:folds_lao&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "ACT": "", - "TAS": "http://13.211.217.129:8080/geoserver/TAS/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=TAS:geol_line_250_28355&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "NT": "" - } - aus_mindep_loopdata = { - "WA": "http://13.211.217.129:8080/geoserver/loop/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=loop:mindeps_2018_28350&bbox={BBOX_STR}&srs=EPSG:28350&outputFormat=shape-zip", - "QLD": "http://13.211.217.129:8080/geoserver/QLD/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=qld_mindeps_28355&bbox={BBOX_STR}&srsName=EPSG:28355&outputFormat=shape-zip", - "SA": "", - "VIC": "http://13.211.217.129:8080/geoserver/VIC/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=VIC:mindeps_2018_28350&bbox={BBOX_STR}&srs=EPSG:28350&outputFormat=shape-zip", - "NSW": "http://13.211.217.129:8080/geoserver/NSW/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName=NSW:nsw_mindeps&bbox={BBOX_STR}&srs=EPSG:28355&outputFormat=shape-zip", - "ACT": "", - "TAS": "http://13.211.217.129:8080/geoserver/VIC/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=VIC:mindeps_2018_28350&bbox={BBOX_STR}&srs=EPSG:28350&outputFormat=shape-zip", - "NT": "" - } - aus_config_urls = { - "WA": "https://gist.githubusercontent.com/yohanderose/8f843de0dde531f009a3973cbdadcc9f/raw/cad97e91a71b4e4791629a6d384e2700095eb3b6/WA_meta.hjson", - "QLD": "https://gist.githubusercontent.com/yohanderose/7ad2ae1b36e4e0b3f27dff17eeae0cc2/raw/ec8d33e52d913e2df4658ca94e7eb467e35adccd/QLD_meta.hjson", - "SA": "https://gist.githubusercontent.com/yohanderose/9401ce1ac72969e2e7fe78e64ca57212/raw/bb7afca4436b6a844053b0ae06978bf59495ed87/SA_meta.hjson", - "VIC": "https://gist.githubusercontent.com/markjessell/17acdbc9061530af76109b0afe519673/raw/80816c511ebad0152a826edcd51f5bea32c2b3db/VIC.hjson", - "NSW": "https://gist.githubusercontent.com/markjessell/0bcb02e6f3ae6da9f00f299b5f0a7f77/raw/09c8018e2d92c7f0051a45be4f9e19dc7cd9a30c/NSW.hjson", - "ACT": "", - "TAS": "https://gist.githubusercontent.com/markjessell/e6521f0d0b7a29014deb04996033554f/raw/296c364b618a49654e18eba10196fa3cc2aabd2f/TAS.hjson", - "NT": "" - } - aus_clut_urls = { - "WA": "https://gist.githubusercontent.com/yohanderose/8f7e2d57db9086fbe1a7c651b9e25996/raw/9144994d162662ec015321699c3658a8dbf57def/WA_clut.csv", - "QLD": "", - "SA": "https://gist.githubusercontent.com/yohanderose/c7927fa6f9a2b9e983eeec6941ad9f56/raw/253b33afee74fcb1d487ef1c12093134f31093f4/SA_clut.csv", - "VIC": "", - "NSW": "", - "ACT": "", - "TAS": "", - "NT": "" - } diff --git a/map2loop/config.py b/map2loop/config.py deleted file mode 100644 index 72941c78..00000000 --- a/map2loop/config.py +++ /dev/null @@ -1,185 +0,0 @@ -import beartype -import hjson -import urllib - - -class Config: - """ - A data structure containing column name mappings for files and keywords - - Attributes - ---------- - structure_config: dict - column names and keywords for structure mappings - geology_config: dict - column names and keywords for geology mappings - fault_config: dict - column names and keywords for fault mappings - fold_config: dict - column names and keywords for fold mappings - """ - def __init__(self): - self.structure_config = { - "orientation_type": "dip direction", - "dipdir_column": "DIPDIR", - "dip_column": "DIP", - "description_column": "DESCRIPTION", - "bedding_text": "bedding", - "overturned_column": "DESCRIPTION", - "overturned_text": "overturned", - "objectid_column": "ID" - } - self.geology_config = { - "unitname_column": "UNITNAME", - "alt_unitname_column": "CODE", - "group_column": "GROUP", - "supergroup_column": "SUPERGROUP", - "description_column": "DESCRIPTION", - "minage_column": "MIN_AGE", - "maxage_column": "MAX_AGE", - "rocktype_column": "ROCKTYPE1", - "alt_rocktype_column": "ROCKTYPE2", - "sill_text": "sill", - "intrusive_text": "intrusive", - "volcanic_text": "volcanic", - "objectid_column": "ID", - "ignore_codes": ["cover"] - } - self.fault_config = { - "structtype_column": "FEATURE", - "fault_text": "fault", - "dip_null_value": "-999", - "dipdir_flag": "num", - "dipdir_column": "DIPDIR", - "dip_column": "DIP", - "dipestimate_column": "DIP_ESTIMATE", - "dipestimate_text": "'NORTH_EAST','NORTH',,'NOT ACCESSED'", - "name_column": "NAME", - "objectid_column": "ID" - } - self.fold_config = { - "structtype_column": "FEATURE", - "fold_text": "fold", - "description_column": "DESCRIPTION", - "synform_text": "syncline", - "foldname_column": "NAME", - "objectid_column": "ID" - } - - @beartype.beartype - def update_from_dictionary(self, dictionary: dict, lower: bool = False): - """ - Update the config dictionary from a provided dict - - Args: - dictionary (dict): The dictionary to update from - """ - if "structure" in dictionary: - self.structure_config.update(dictionary["structure"]) - for key in dictionary["structure"].keys(): - if key not in self.structure_config: - print(f"Config dictionary structure segment contained {key} which is not used") - dictionary.pop("structure") - if "geology" in dictionary: - self.geology_config.update(dictionary["geology"]) - for key in dictionary["geology"].keys(): - if key not in self.geology_config: - print(f"Config dictionary geology segment contained {key} which is not used") - dictionary.pop("geology") - if "fault" in dictionary: - self.fault_config.update(dictionary["fault"]) - for key in dictionary["fault"].keys(): - if key not in self.fault_config: - print(f"Config dictionary fault segment contained {key} which is not used") - dictionary.pop("fault") - if "fold" in dictionary: - self.fold_config.update(dictionary["fold"]) - for key in dictionary["fold"].keys(): - if key not in self.fold_config: - print(f"Config dictionary fold segment contained {key} which is not used") - dictionary.pop("fold") - if len(dictionary): - print(f"Unused keys from config format {list(dictionary.keys())}") - - @beartype.beartype - def update_from_legacy_file(self, file_map: dict, lower: bool = False): - """ - Update the config dictionary from the provided old version dictionary - - Args: - file_map (dict): The old version dictionary to update from - """ - - code_mapping = { - "otype": (self.structure_config, "orientation_type"), - "dd": (self.structure_config, "dipdir_column"), - "d": (self.structure_config, "dip_column"), - "sf": (self.structure_config, "desciption_column"), - "bedding": (self.structure_config, "bedding_text"), - "bo": (self.structure_config, "overturned_column"), - "btype": (self.structure_config, "overturned_text"), - "gi": (self.structure_config, "objectid_column"), - "c": (self.geology_config, "unitname_column"), - "u": (self.geology_config, "alt_unitname_column"), - "g": (self.geology_config, "group_column"), - "g2": (self.geology_config, "supergroup_column"), - "ds": (self.geology_config, "description_column"), - "min": (self.geology_config, "minage_column"), - "max": (self.geology_config, "maxage_column"), - "r1": (self.geology_config, "rocktype_column"), - "r2": (self.geology_config, "alt_rocktype_column"), - "sill": (self.geology_config, "sill_text"), - "intrusive": (self.geology_config, "intrusive_text"), - "volcanic": (self.geology_config, "volcanic_text"), - "f": (self.fault_config, "structtype_column"), - "fault": (self.fault_config, "fault_text"), - "fdipnull": (self.fault_config, "dip_null_value"), - "fdipdip_flag": (self.fault_config, "dipdir_flag"), - "fdipdir": (self.fault_config, "dipdir_column"), - "fdip": (self.fault_config, "dip_column"), - "fdipest": (self.fault_config, "dipestimate_column"), - "fdipest_vals": (self.fault_config, "dipestimate_text"), - "n": (self.fault_config, "name_column"), - "ff": (self.fold_config, "structtype_column"), - "fold": (self.fold_config, "fold_text"), - "t": (self.fold_config, "description_column"), - "syn": (self.fold_config, "synform_text"), - } - for code in code_mapping: - if code in file_map: - if lower is True: - file_map[code] = str(file_map[code]).lower() - code_mapping[code][0][code_mapping[code][1]] = file_map[code] - file_map.pop(code) - - if "o" in file_map: - self.structure_config["objectid_column"] = file_map["o"] - self.fault_config["objectid_column"] = file_map["o"] - self.fold_config["objectid_column"] = file_map["o"] - file_map.pop("o") - - if len(file_map) > 0: - print(f"Unused keys from legacy format {list(file_map.keys())}") - - @beartype.beartype - def update_from_file(self, filename: str, legacy_format: bool = False, lower: bool = False): - """ - Update the config dictionary from the provided json filename or url - - Args: - filename (str): Filename or URL of the JSON config file - legacy_format (bool, optional): Whether the JSON is an old version. Defaults to False. - """ - if legacy_format: - func = self.update_from_legacy_file - else: - func = self.update_from_dictionary - - if filename.startswith("http") or filename.startswith("ftp"): - with urllib.request.urlopen(filename) as url_data: - data = hjson.load(url_data) - func(data, lower) - else: - with open(filename) as url_data: - data = hjson.load(url_data) - func(data, lower) diff --git a/map2loop/deformation_history.py b/map2loop/deformation_history.py deleted file mode 100644 index b36dc9f2..00000000 --- a/map2loop/deformation_history.py +++ /dev/null @@ -1,295 +0,0 @@ -import pandas -import numpy -import beartype -import geopandas -import math - - -class DeformationHistory: - """ - A class containing all the fault and fold summaries and relationships - - Attributes - ---------- - minimum_fault_length_to_export: float - The cutoff for ignoring faults. Any fault shorter than this is not exported - history: list - The time ordered list of deformation events - faultColumns: numpy.dtype - Column names and types for fault summary - foldColumns: numpy.dtype - Column names and types for fold summary - faults: pandas.DataFrame - The fault summary - folds: pandas.DataFrame - The fold summary - - """ - def __init__(self): - """ - The initialiser for the deformation history. All attributes are defaulted - """ - self.minimum_fault_length_to_export = 500.0 - self.history = [] - self.fault_fault_relationships = [] - - # Create empty fault and fold dataframes - self.faultColumns = numpy.dtype( - [ - ("eventId", int), - ("name", str), - ("minAge", float), - ("maxAge", float), - ("group", str), - ("supergroup", str), - ("avgDisplacement", float), - ("avgDownthrowDir", float), - ("influenceDistance", float), - ("verticalRadius", float), - ("horizontalRadius", float), - ("colour", str), - ("centreX", float), - ("centreY", float), - ("centreZ", float), - ("avgSlipDirX", float), - ("avgSlipDirY", float), - ("avgSlipDirZ", float), - ("avgNormalX", float), - ("avgNormalY", float), - ("avgNormalZ", float), - ("length", float), - ] - ) - self.faults = pandas.DataFrame(numpy.empty(0, dtype=self.faultColumns)) - # self.faults = self.faults.set_index("name") - - self.foldColumns = numpy.dtype( - [ - ("eventId", int), - ("name", str), - ("minAge", float), - ("maxAge", float), - ("periodic", bool), - ("wavelength", float), - ("amplitude", float), - ("asymmetry", bool), - ("asymmetryShift", float), - ("secondaryWavelength", float), - ("secondaryAmplitude", float), - ] - ) - self.folds = pandas.DataFrame(numpy.empty(0, dtype=self.foldColumns)) - # self.folds = self.folds.set_index("name") - - def set_minimum_fault_length(self, length): - """ - Sets the minimum fault length to export - - Args: - length (float or int): - The fault length cutoff - """ - self.minimum_fault_length_to_export = length - - def get_minimum_fault_length(self): - """ - Getter for the fault length cutoff - - Returns: - float: The fault length cutoff - """ - return self.minimum_fault_length_to_export - - def findfault(self, id): - """ - Find the fault in the summary based on its eventId - - Args: - id (int or str): - The eventId or name to look for - - Returns: - pandas.DataFrame: The sliced data frame containing the requested fault - """ - if type(id) is int: - return self.faults[self.faults["eventId"] == id] - elif type(id) is str: - return self.faults[self.faults["name"] == id] - else: - print("ERROR: Unknown identifier type used to find fault") - - def findfold(self, id): - """ - Find the fold in the summary based on its eventId - - Args: - id (int or str): - The eventId or name to look for - - Returns: - pandas.DataFrame: The sliced data frame containing the requested fold - """ - if type(id) is int: - return self.folds[self.folds["foldId"] == id] - elif type(id) is str: - return self.folds[self.folds["name"] == id] - else: - print("ERROR: Unknown identifier type used to find fold") - - def addFault(self, fault): - """ - Add fault to the fault summary - - Args: - fault (pandas.DataFrame or dict): - The fault information to add - """ - if type(fault) is pandas.DataFrame or type(fault) is dict: - if "name" in fault.keys(): - if fault["name"] in self.faults.index: - print("Replacing fault", fault["name"]) - self.faults[fault["name"]] = fault - else: - print("No name field in fault", fault) - else: - print("Cannot add fault to dataframe with type", type(fault)) - - def removeFaultByName(self, name: str): - """ - Remove the fault from the summary by name - - Args: - name (str): - The name of the fault(s) to remove - """ - self.faults = self.faults[self.faults["name"] != name].copy() - - def removeFaultByEventId(self, eventId: int): - """ - Remove the fault from the summary by eventId - - Args: - eventId (int): - The eventId of the fault to remove - """ - self.faults = self.faults[self.faults["eventId"] != eventId].copy() - - def addFold(self, fold): - """ - Add fold to the fold summary - - Args: - fold (pandas.DataFrame or dict): - The fold information to add - """ - if type(fold) is pandas.DataFrame or type(fold) is dict: - if "name" in fold.keys(): - if fold["name"] in self.folds.index: - print("Replacing fold", fold["name"]) - self.folds[fold["name"]] = fold - else: - print("No name field in fold", fold) - else: - print("Cannot add fold to dataframe with type", type(fold)) - - @beartype.beartype - def populate(self, faults_map_data: geopandas.GeoDataFrame): - """ - Populate the fault (and fold) summaries from a geodataframe - - Args: - faults_map_data (geopandas.GeoDataFrame): - The parsed data frame from the map - """ - if faults_map_data.shape[0] == 0: - return - faults_data = faults_map_data.copy() - faults_data = faults_data.dissolve(by="NAME", as_index=False) - faults_data = faults_data.reset_index(drop=True) - - self.stratigraphicUnits = pandas.DataFrame( - numpy.empty(faults_data.shape[0], dtype=self.faultColumns) - ) - self.faults["eventId"] = faults_data["ID"] - self.faults["name"] = faults_data["NAME"] - self.faults["minAge"] = -1 - self.faults["maxAge"] = -1 - self.faults["group"] = "" - self.faults["supergroup"] = "" - self.faults["avgDisplacement"] = -1 - self.faults["avgDownthrowDir"] = 0 - self.faults["influenceDistance"] = 0 - self.faults["verticalRadius"] = 0 - self.faults["horizontalRadius"] = 0 - self.faults["colour"] = "#000000" - self.faults["centreX"] = 0 - self.faults["centreY"] = 0 - self.faults["centreZ"] = 0 - self.faults["avgSlipDirX"] = 0 - self.faults["avgSlipDirY"] = 0 - self.faults["avgSlipDirZ"] = 0 - self.faults["avgNormalX"] = 0 - self.faults["avgNormalY"] = 0 - self.faults["avgNormalZ"] = 0 - self.faults["length"] = faults_data.geometry.length - for index, fault in self.faults.iterrows(): - bounds = faults_map_data[faults_map_data["ID"] == fault["eventId"]].geometry.bounds - xdist = bounds.maxx - bounds.minx - ydist = bounds.maxy - bounds.miny - length = math.sqrt(xdist*xdist + ydist*ydist) - self.faults.at[index, "verticalRadius"] = length - self.faults.at[index, "horizontalRadius"] = length / 2.0 - self.faults.at[index, "influenceDistance"] = length / 4.0 - - @beartype.beartype - def summarise_data(self, fault_observations: pandas.DataFrame): - """ - Use fault observations data to add summary data for each fault - - Args: - fault_observations (pandas.DataFrame): - The fault observations data - """ - id_list = self.faults["eventId"].unique() - for id in id_list: - observations = fault_observations[fault_observations["ID"] == id] - if len(observations) < 2: - self.removeFaultByEventId(id) - - # id_list = self.faults["eventId"].unique() - for index, fault in self.faults.iterrows(): - observations = fault_observations[fault_observations["ID"] == fault["eventId"]] - # calculate centre point - self.faults.at[index, "centreX"] = numpy.mean(observations["X"]) - self.faults.at[index, "centreY"] = numpy.mean(observations["Y"]) - self.faults.at[index, "centreZ"] = numpy.mean(observations["Z"]) - - def get_faults_for_export(self): - """ - Get the faults for export (removes any fault that is shorter than the cutoff) - - Returns: - pandas.DataFrame: The filtered fault summary - """ - return self.faults[self.faults["length"] >= self.minimum_fault_length_to_export].copy() - - @beartype.beartype - def get_fault_relationships_with_ids(self, fault_fault_relationships: pandas.DataFrame): - """ - Ammend the fault relationships DataFrame with the fault eventIds - - Args: - fault_fault_relationships (pandas.DataFrame): The fault_fault_relationships - - Returns: - pandas.DataFrame: The fault_relationships with the correct eventIds - """ - faultIds = self.get_faults_for_export()[["eventId", "name"]].copy() - rel = fault_fault_relationships.copy() - rel = rel.merge(faultIds, left_on="Fault1", right_on="name") - rel.rename(columns={"eventId": "eventId1"}, inplace=True) - rel.drop(columns=["name"], inplace=True) - rel = rel.merge(faultIds, left_on="Fault2", right_on="name") - rel.rename(columns={"eventId": "eventId2"}, inplace=True) - rel.drop(columns=["name"], inplace=True) - return rel diff --git a/map2loop/m2l_enums.py b/map2loop/m2l_enums.py deleted file mode 100644 index 3ce274a3..00000000 --- a/map2loop/m2l_enums.py +++ /dev/null @@ -1,31 +0,0 @@ -from enum import IntEnum - - -class Datatype(IntEnum): - GEOLOGY = 0 - STRUCTURE = 1 - FAULT = 2 - FOLD = 3 - DTM = 4 - - -class Datastate(IntEnum): - UNNAMED = 0 - UNLOADED = 1 - LOADED = 2 - REPROJECTED = 3 - CLIPPED = 4 - COMPLETE = 5 - ERRORED = 9 - - -class ErrorState(IntEnum): - NONE = 0 - URLERROR = 1 - CONFIGERROR = 2 - - -class VerboseLevel(IntEnum): - NONE = 0 - TEXTONLY = 1 - ALL = 2 diff --git a/map2loop/map2model_wrapper.py b/map2loop/map2model_wrapper.py deleted file mode 100644 index 8f9f443a..00000000 --- a/map2loop/map2model_wrapper.py +++ /dev/null @@ -1,217 +0,0 @@ -import map2model -import pandas -import numpy -import os -from .m2l_enums import VerboseLevel -import re - - -class Map2ModelWrapper(): - """ - A wrapper around map2model functionality - - Attributes - ---------- - sorted_units: None or list - map2model's estimate of the stratigraphic column - fault_fault_relationships: None or pandas.DataFrame - data frame of fault to fault relationships with columns ["Fault1", "Fault2", "Type", "Angle"] - unit_fault_relationships: None or pandas.DataFrame - data frame of unit fault relationships with columns ["Unit", "Fault"] - unit_unit_relationships: None or pandas.DataFrame - data frame of unit unit relationships with columns ["Index1", "UnitName1", "Index2", "UnitName2"] - map_data: MapData - A pointer to the map data structure in project - verbose_level: m2l_enum.VerboseLevel - A selection that defines how much console logging is output - """ - def __init__(self, map_data, verbose_level: VerboseLevel = VerboseLevel.NONE): - """ - The initialiser for the map2model wrapper - - Args: - map_data (MapData): - The project map data structure to reference - verbose_level (VerboseLevel, optional): - How much console output is sent. Defaults to VerboseLevel.ALL. - """ - self.sorted_units = None - self.fault_fault_relationships = None - self.unit_fault_relationships = None - self.unit_unit_relationships = None - self.map_data = map_data - self.verbose_level = verbose_level - - def reset(self): - """ - Reset the wrapper to before the map2model process - """ - self.sorted_units = None - self.fault_fault_relationships = None - self.unit_fault_relationships = None - self.unit_unit_relationships = None - - def get_sorted_units(self): - """ - Getter for the map2model sorted units - - Returns: - list: The map2model stratigraphic column estimate - """ - if self.sorted_units is None: - self.run() - return self.sorted_units - - def get_fault_fault_relationships(self): - """ - Getter for the fault fault relationships - - Returns: - pandas.DataFrame: The fault fault relationships - """ - if self.fault_fault_relationships is None: - self.run() - return self.fault_fault_relationships - - def get_unit_fault_relationships(self): - """ - Getter for the unit fault relationships - - Returns: - pandas.DataFrame: The unit fault relationships - """ - if self.unit_fault_relationships is None: - self.run() - return self.unit_fault_relationships - - def get_unit_unit_relationships(self): - """ - Getter for the unit unit relationships - - Returns: - pandas.DataFrame: The unit unit relationships - """ - if self.unit_unit_relationships is None: - self.run() - return self.unit_unit_relationships - - def run(self, verbose_level: VerboseLevel = None): - """ - The main execute function that prepares, runs and parse the output of the map2model process - - Args: - verbose_level (VerboseLevel, optional): - How much console output is sent. Defaults to None (which uses the wrapper attribute). - """ - if verbose_level is None: - verbose_level = self.verbose_level - if verbose_level != VerboseLevel.NONE: - print("Exporting map data for map2model") - self.map_data.export_wkt_format_files() - if verbose_level != VerboseLevel.NONE: - print("Running map2model...") - map2model_code_map = { - "o": "ID", # FIELD_COORDINATES - "f": "FEATURE", # FIELD_FAULT_ID - "u": "CODE", # FIELD_POLYGON_LEVEL1_NAME - "g": "GROUP", # FIELD_POLYGON_LEVEL2_NAME - "min": "MIN_AGE", # FIELD_POLYGON_MIN_AGE - "max": "MAX_AGE", # FIELD_POLYGON_MAX_AGE - "c": "UNITNAME", # FIELD_POLYGON_CODE - "ds": "DESCRIPTION", # FIELD_POLYGON_DESCRIPTION - "r1": "ROCKTYPE1", # FIELD_POLYGON_ROCKTYPE1 - "r2": "ROCKTYPE2", # FIELD_POLYGON_ROCKTYPE2 - "msc": "", # FIELD_SITE_CODE - "mst": "", # FIELD_SITE_TYPE - "mscm": "", # FIELD_SITE_COMMO - "fold": self.map_data.config.fold_config["fold_text"], # FAULT_AXIAL_FEATURE_NAME - "sill": self.map_data.config.geology_config["sill_text"], # SILL_STRING - "intrusive": self.map_data.config.geology_config["intrusive_text"], # IGNEOUS_STRING - "volcanic": self.map_data.config.geology_config["volcanic_text"], # VOLCANIC_STRING - "deposit_dist": 100, # deposit_dist - } - # TODO: Simplify. Note: this is external so have to match fix to map2model module - run_log = map2model.run( - os.path.join(self.map_data.tmp_path, "map2model_data"), - os.path.join(self.map_data.tmp_path, "map2model_data", "geology_wkt.csv"), - os.path.join(self.map_data.tmp_path, "map2model_data", "faults_wkt.csv"), - "", - self.map_data.get_bounding_box(), - map2model_code_map, - verbose_level == VerboseLevel.NONE, - "None" - ) - if verbose_level == VerboseLevel.ALL: - print("map2model log:") - print(run_log) - if verbose_level != VerboseLevel.NONE: - print("map2model complete") - - # Parse units sorted - units_sorted = pandas.read_csv(os.path.join(self.map_data.tmp_path, "map2model_data", "units_sorted.txt"), header=None, sep=' ') - if units_sorted.shape == 0: - self.sorted_units = [] - else: - self.sorted_units = list(units_sorted[5]) - - # Parse fault intersections - out = [] - fault_fault_intersection_filename = os.path.join(self.map_data.tmp_path, "map2model_data", "fault-fault-intersection.txt") - if os.path.isfile(fault_fault_intersection_filename) and os.path.getsize(fault_fault_intersection_filename) > 0: - df = pandas.read_csv( - fault_fault_intersection_filename, - delimiter="{", - header=None, - ) - df[1] = list(df[1].str.replace("}", "", regex=False)) - df[1] = [re.findall("\(.*?\)", i) for i in df[1]] # noqa: W605 Valid escape for regex - df[0] = list(df[0].str.replace("^[0-9]*, ", "", regex=True)) - df[0] = list(df[0].str.replace(", ", "", regex=False)) - df[0] = "Fault_" + df[0] - for j in range(len(df)): - df[1][j] = [i.strip("()").replace(" ", "").split(",") for i in df[1][j]] - - for _, row in df.iterrows(): - for i in numpy.arange(len(row[1])): - out += [[row[0], "Fault_"+row[1][i][0], row[1][i][1], float(row[1][i][2])]] - - df_out = pandas.DataFrame(columns=["Fault1", "Fault2", "Type", "Angle"], data=out) - self.fault_fault_relationships = df_out - - # Parse unit fault relationships - out = [] - unit_fault_intersection_filename = os.path.join(self.map_data.tmp_path, "map2model_data", "unit-fault-intersection.txt") - if os.path.isfile(unit_fault_intersection_filename) and os.path.getsize(unit_fault_intersection_filename) > 0: - df = pandas.read_csv(unit_fault_intersection_filename, header=None, sep='{') - df[1] = list(df[1].str.replace("}", "", regex=False)) - df[1] = df[1].astype(str).str.split(", ") - df[0] = list(df[0].str.replace("^[0-9]*, ", "", regex=True)) - df[0] = list(df[0].str.replace(", ", "", regex=False)) - - for _, row in df.iterrows(): - for i in numpy.arange(len(row[1])): - out += [[row[0], "Fault_"+row[1][i]]] - - df_out = pandas.DataFrame(columns=["Unit", "Fault"], data=out) - self.unit_fault_relationships = df_out - - # Parse unit unit relationships - units = [] - links = [] - graph_filename = os.path.join(self.map_data.tmp_path, "map2model_data", "graph_all_None.gml.txt") - if os.path.isfile(graph_filename) and os.path.getsize(graph_filename) > 0: - with open(os.path.join(self.map_data.tmp_path, "map2model_data", "graph_all_None.gml.txt")) as file: - contents = file.read() - segments = contents.split("\n\n") - for line in segments[0].split("\n"): - units += [line.split(" ")] - for line in segments[1].split("\n")[:-1]: - links += [line.split(" ")] - - df = pandas.DataFrame(columns=["index", "unit"], data=units) - df.set_index("index", inplace=True) - out = [] - for row in links: - out += [[int(row[0]), df["unit"][row[0]], int(row[1]), df["unit"][row[1]]]] - df_out = pandas.DataFrame(columns=["Index1", "UnitName1", "Index2", "UnitName2"], data=out) - self.unit_unit_relationships = df_out diff --git a/map2loop/mapdata.py b/map2loop/mapdata.py deleted file mode 100644 index 71d61ce3..00000000 --- a/map2loop/mapdata.py +++ /dev/null @@ -1,1491 +0,0 @@ -import geopandas -import pandas -import numpy -import shapely -from osgeo import gdal, osr -from owslib.wcs import WebCoverageService -import urllib -from gzip import GzipFile -from uuid import uuid4 -import beartype -import os -from io import BytesIO -from .m2l_enums import Datatype, Datastate -from .m2l_enums import VerboseLevel -from .config import Config -from .aus_state_urls import AustraliaStateUrls -from .random_colour import random_colours_hex - - -class MapData: - """ - A data structure containing all the map data loaded from map files - - Attributes - ---------- - raw_data: list of geopandas.DataFrames - A list containing the raw geopanda data frames before any modification - data: list of geopandas.DataFrames - A list containing the geopanda data frames or raster image of all the different map data - contacts: geopandas.DataFrame - A dataframe containing the contacts between units in polylines - basal_contacts: geopandas.DataFrame - A dataframe containing the contacts between units labelled with whether it is basal or not - filenames: list of data source filenames - A list of the filenames/urls of where to find the data to be loaded - dirtyflags: list of booleans - A list of flags indicating whether the data has been loaded or dirtied - data_states: intEnum - Enums representing the state of each of the data sets - working_projection: str - A string containing the projection e.g. "EPSG:28350" - bounding_box: dict - The bounding box in cartesian coordinates with 6 elements - bounding_box_polygon: shapely.Polygon - The bounding box in polygonal form - bounding_box_str: str - The bounding box in string form (used for url requests) - config_filename: str - The filename of the json config file - colour_filename: str - The filename of the csv colour table file (columns are unit name and colour in #000000 form) - tmp_path: str - The path to the directory holding the temporary files - verbose_level: m2l_enums.VerboseLevel - A selection that defines how much console logging is output - config: Config - A link to the config structure which is defined in config.py - """ - - def __init__( - self, tmp_path: str = "", verbose_level: VerboseLevel = VerboseLevel.ALL - ): - """ - The initialiser for the map data - - Args: - tmp_path (str, optional): - The directory for storing temporary files. Defaults to "". - verbose_level (VerboseLevel, optional): - How much console output is sent. Defaults to VerboseLevel.ALL. - """ - self.raw_data = [None] * len(Datatype) - self.data = [None] * len(Datatype) - self.contacts = None - self.basal_contacts = None - self.filenames = [None] * len(Datatype) - # self.output_filenames = [None] * len(Datatype) - self.dirtyflags = [True] * len(Datatype) - self.data_states = [Datastate.UNNAMED] * len(Datatype) - self.working_projection = None - self.bounding_box = None - self.bounding_box_polygon = None - self.bounding_box_str = None - self.config_filename = None - self.colour_filename = None - self.tmp_path = tmp_path - self.verbose_level = verbose_level - - self.config = Config() - - def set_working_projection(self, projection): - """ - Set the working projection for the map data - - Args: - projection (int or str): - The projection to use for map reprojection - """ - if type(projection) is int: - projection = "EPSG:" + str(projection) - self.working_projection = projection - elif type(projection) is str: - self.working_projection = projection - else: - print( - f"Warning: Unknown projection set {projection}. Leaving all map data in original projection\n" - ) - if self.bounding_box is not None: - self.recreate_bounding_box_str() - - def get_working_projection(self): - """ - Get the working projection - - Returns: - str: The working projection in "EPSG:" form - """ - return self.working_projection - - def set_bounding_box(self, bounding_box): - """ - Set the bounding box of the map data - - Args: - bounding_box (tuple or dict): - The bounding box to use for maps - """ - # Convert tuple bounding_box to dict else assign directly - if type(bounding_box) is tuple: - self.bounding_box = { - "minx": bounding_box[0], - "maxx": bounding_box[1], - "miny": bounding_box[2], - "maxy": bounding_box[3], - } - if len(bounding_box) == 6: - self.bounding_box["top"] = bounding_box[4] - self.bounding_box["base"] = bounding_box[5] - elif type(bounding_box) is dict: - self.bounding_box = bounding_box - else: - raise TypeError(f"Invalid type for bounding_box {type(bounding_box)}") - - # Check for map based bounding_box and add depth boundaries - if len(self.bounding_box) == 4: - self.bounding_box["top"] = 0 - self.bounding_box["base"] = 2000 - - # Check that bounding_box has all the right keys - for i in ["minx", "maxx", "miny", "maxy", "top", "base"]: - if i not in self.bounding_box: - raise KeyError(f"bounding_box dictionary does not contain {i} key") - - # Create geodataframe boundary for clipping - minx = self.bounding_box["minx"] - miny = self.bounding_box["miny"] - maxx = self.bounding_box["maxx"] - maxy = self.bounding_box["maxy"] - lat_point_list = [miny, miny, maxy, maxy, miny] - lon_point_list = [minx, maxx, maxx, minx, minx] - self.bounding_box_polygon = geopandas.GeoDataFrame( - index=[0], - crs=self.working_projection, - geometry=[shapely.Polygon(zip(lon_point_list, lat_point_list))], - ) - self.recreate_bounding_box_str() - - def recreate_bounding_box_str(self): - """ - Creates the bounding box string from the bounding box dict - """ - minx = self.bounding_box["minx"] - miny = self.bounding_box["miny"] - maxx = self.bounding_box["maxx"] - maxy = self.bounding_box["maxy"] - self.bounding_box_str = f"{minx},{miny},{maxx},{maxy},{self.working_projection}" - - @beartype.beartype - def get_bounding_box(self, polygon: bool = False): - """ - Get the bounding box in dict or polygon form - - Args: - polygon (bool, optional): Flag to get the bounding box in polygon form. Defaults to False. - - Returns: - dict or shapely.Polygon: The bounding box in the requested form - """ - if polygon: - return self.bounding_box_polygon - else: - return self.bounding_box - - @beartype.beartype - def set_filename(self, datatype: Datatype, filename: str): - """ - Set the filename for a specific datatype - - Args: - datatype (Datatype): - The datatype for the filename specified - filename (str): - The filename to store - """ - if self.filenames[datatype] != filename: - self.filenames[datatype] = filename - self.data_states[datatype] = Datastate.UNLOADED - if filename == "": - self.dirtyflags[datatype] = False - else: - self.dirtyflags[datatype] = True - - @beartype.beartype - def get_filename(self, datatype: Datatype): - """ - Get a filename of a specified datatype - - Args: - datatype (Datatype): - The datatype of the filename wanted - - Returns: - string: The filename of the datatype specified - """ - if self.data_states != Datastate.UNNAMED: - return self.filenames[datatype] - else: - print(f"Requested filename for {str(type(datatype))} is not set\n") - return None - - @beartype.beartype - def set_config_filename( - self, filename: str, legacy_format: bool = False, lower: bool = False - ): - """ - Set the config filename and update the config structure - - Args: - filename (str): - The filename of the config file - legacy_format (bool, optional): - Whether the file is in m2lv2 form. Defaults to False. - """ - self.config_filename = filename - self.config.update_from_file(filename, legacy_format=legacy_format, lower=lower) - - def get_config_filename(self): - """ - Get the config filename - - Returns: - str: The config filename - """ - return self.config_filename - - @beartype.beartype - def set_colour_filename(self, filename: str): - """ - Set the filename of the colour csv file - - Args: - filename (str): - The csv colour look up table filename - """ - self.colour_filename = filename - - def get_colour_filename(self): - """ - Get the colour lookup table filename - - Returns: - str: The colour lookup table filename - """ - return self.colour_filename - - @beartype.beartype - def set_ignore_codes(self, codes: list): - """ - Set the codes to ignore in the geology shapefile - - Args: - codes (list): - The list of codes to ignore - """ - self.config.geology_config["ignore_codes"] = codes - self.data_states[Datatype.GEOLOGY] = Datastate.CLIPPED - self.dirtyflags[Datatype.GEOLOGY] = True - - @beartype.beartype - def get_ignore_codes(self) -> list: - """ - Get the list of codes to ingnore - - Returns: - list: The list of strings to ignore - """ - return self.config.geology_config["ignore_codes"] - - @beartype.beartype - def set_filenames_from_australian_state(self, state: str): - """ - Set the shape/dtm filenames appropriate to the Australian state - - Args: - state (str): - The abbreviated Australian state name - - Raises: - ValueError: state string not in state list ['WA', 'SA', 'QLD', 'NSW', 'TAS', 'VIC', 'ACT', 'NT'] - """ - if state in ["WA", "SA", "QLD", "NSW", "TAS", "VIC", "ACT", "NT"]: - self.set_filename( - Datatype.GEOLOGY, AustraliaStateUrls.aus_geology_urls[state] - ) - self.set_filename( - Datatype.STRUCTURE, AustraliaStateUrls.aus_structure_urls[state] - ) - self.set_filename(Datatype.FAULT, AustraliaStateUrls.aus_fault_urls[state]) - self.set_filename(Datatype.FOLD, AustraliaStateUrls.aus_fold_urls[state]) - self.set_filename(Datatype.DTM, "au") - lower = state == "SA" - self.set_config_filename( - AustraliaStateUrls.aus_config_urls[state], - legacy_format=True, - lower=lower, - ) - self.set_colour_filename(AustraliaStateUrls.aus_clut_urls[state]) - else: - raise ValueError(f"Australian state {state} not in state url database") - - @beartype.beartype - def check_filename(self, datatype: Datatype) -> bool: - """ - Check the filename for datatype is set - - Args: - datatype (Datatype): - The datatype of the filename to check - - Returns: - bool: true if the filename is set, false otherwise - """ - if self.filenames[datatype] is None or self.filenames[datatype] == "": - print(f"Warning: Filename for {str(datatype)} is not set") - return False - return True - - def check_filenames(self) -> bool: - """ - Check all filenames to see if they are valid - - Returns: - bool: true if the filenames are set, false otherwise - """ - ret: bool = True - for datatype in Datatype: - ret = ret and self.check_filename(datatype) - return ret - - @beartype.beartype - def load_all_map_data(self): - """ - Load all the map data for each datatype. Cycles through each type and loads it - """ - for i in [ - Datatype.GEOLOGY, - Datatype.STRUCTURE, - Datatype.FAULT, - Datatype.FOLD, - ]: - self.load_map_data(i) - self.load_raster_map_data(Datatype.DTM) - - @beartype.beartype - def load_map_data(self, datatype: Datatype): - """ - Load map data from file, reproject and clip it and then check data is valid - - Args: - datatype (Datatype): - The datatype to load - """ - if ( - self.filenames[datatype] is None - or self.data_states[datatype] == Datastate.UNNAMED - ): - print(f"Datatype {datatype.name} is not set and so cannot be loaded\n") - self.data[datatype] = self.get_empty_dataframe(datatype) - self.dirtyflags[datatype] = False - self.data_states[datatype] = Datastate.COMPLETE - elif self.dirtyflags[datatype] is True: - if self.data_states[datatype] == Datastate.UNLOADED: - # Load data from file - try: - map_filename = self.filenames[datatype] - map_filename = self.update_filename_with_bounding_box(map_filename) - map_filename = self.update_filename_with_projection(map_filename) - self.raw_data[datatype] = geopandas.read_file(map_filename) - self.data_states[datatype] = Datastate.LOADED - except Exception: - print( - f"Failed to open {datatype.name} file called '{self.filenames[datatype]}'\n" - ) - print(f"Cannot continue as {datatype.name} was not loaded\n") - return - if self.data_states[datatype] == Datastate.LOADED: - # Reproject geopanda to required CRS - self.set_working_projection_on_map_data(datatype) - self.data_states[datatype] = Datastate.REPROJECTED - if self.data_states[datatype] == Datastate.REPROJECTED: - # Clip geopanda to bounding polygon - self.raw_data[datatype] = geopandas.clip( - self.raw_data[datatype], self.bounding_box_polygon - ) - self.data_states[datatype] = Datastate.CLIPPED - if self.data_states[datatype] == Datastate.CLIPPED: - # Convert column names using codes_and_labels dictionary - self.check_map(datatype) - self.data_states[datatype] = Datastate.COMPLETE - self.dirtyflags[datatype] = False - - @beartype.beartype - def get_empty_dataframe(self, datatype: Datatype): - """ - Create a basic empty geodataframe for a specified datatype - - Args: - datatype (Datatype): - The datatype of the empty dataset - - Returns: - geopandas.GeoDataFrame or None: The created geodataframe - """ - data = None - if datatype == Datatype.FAULT: - data = geopandas.GeoDataFrame( - columns=["geometry", "ID", "NAME", "DIPDIR", "DIP"], - crs=self.working_projection, - ) - elif datatype == Datatype.FOLD: - data = geopandas.GeoDataFrame( - columns=["geometry", "ID", "NAME", "SYNCLINE"], - crs=self.working_projection, - ) - return data - - @beartype.beartype - def open_http_query(url: str): - """ - Attempt to open the http url and unzip the file if required - Note: This is specific to opening remotely hosted dtm data in geotiff format - - Args: - url (str): - The url to open - - Returns: - _type_: The geotiff file - """ - try: - request = urllib.Request(url, headers={"Accept-Encoding": "gzip"}) - response = urllib.request.urlopen(request, timeout=30) - if response.info().get("Content-Encoding") == "gzip": - return GzipFile(fileobj=BytesIO(response.read())) - else: - return response - except urllib.URLError: - return None - - @beartype.beartype - def __check_and_create_tmp_path(self): - """ - Create the temporary files directory if it is not valid - """ - if not os.path.isdir(self.tmp_path): - os.mkdir(self.tmp_path) - - @beartype.beartype - def __retrieve_tif(self, filename: str): - """ - Load geoTIFF files from Geoscience Australia or NOAA hawaii or https sources - - Args: - filename (str): - The filename or url or "au"/"hawaii" source to load from - - Returns: - _type_: The open geotiff in a gdal handler - """ - self.__check_and_create_tmp_path() - # For gdal debugging use exceptions - gdal.UseExceptions() - bb_ll = tuple( - self.bounding_box_polygon.to_crs("EPSG:4326").geometry.total_bounds - ) - # try: - if filename.lower() == "aus" or filename.lower() == "au": - url = "http://services.ga.gov.au/gis/services/DEM_SRTM_1Second_over_Bathymetry_Topography/MapServer/WCSServer?" - wcs = WebCoverageService(url, version="1.0.0") - coverage = wcs.getCoverage( - identifier="1", - bbox=bb_ll, - format="GeoTIFF", - crs=4326, - width=2048, - height=2048, - ) - # This is stupid that gdal cannot read a byte stream and has to have a - # file on the local system to open or otherwise create a gdal file - # from scratch with Create - tmp_file = os.path.join(self.tmp_path, "StupidGDALLocalFile.tif") - with open(tmp_file, "wb") as fh: - fh.write(coverage.read()) - tif = gdal.Open(tmp_file) - elif filename == "hawaii": - import netCDF4 - - bbox_str = f"[({str(bb_ll[1])}):1:({str(bb_ll[3])})][({str(bb_ll[0])}):1:({str(bb_ll[2])})]" - filename = f"https://pae-paha.pacioos.hawaii.edu/erddap/griddap/srtm30plus_v11_land.nc?elev{bbox_str}" - f = urllib.request.urlopen(filename) - ds = netCDF4.Dataset("in-mem-file", mode="r", memory=f.read()) - spatial = [ - ds.geospatial_lon_min, - ds.geospatial_lon_resolution, - 0, - ds.geospatial_lat_min, - 0, - ds.geospatial_lat_resolution, - ] - srs = osr.SpatialReference() - srs.ImportFromEPSG(4326) - driver = gdal.GetDriverByName("GTiff") - tif = driver.Create( - f"/vsimem/{str(uuid4())}", - xsize=ds.dimensions["longitude"].size, - ysize=ds.dimensions["latitude"].size, - bands=1, - eType=gdal.GDT_Float32, - ) - tif.SetGeoTransform(spatial) - tif.SetProjection(srs.ExportToWkt()) - tif.GetRasterBand(1).WriteArray(numpy.flipud(ds.variables["elev"][:][:])) - elif filename.startswith("http"): - image_data = self.open_http_query(filename) - mmap_name = f"/vsimem/{str(uuid4())}" - gdal.FileFromMemBuffer(mmap_name, image_data.read()) - tif = gdal.Open(mmap_name) - else: - tif = gdal.Open(filename, gdal.GA_ReadOnly) - # except Exception: - # print( - # f"Failed to open geoTIFF file from '{filename}'\n" - # ) - return tif - - @beartype.beartype - def load_raster_map_data(self, datatype: Datatype): - """ - Load raster map data from file, reproject and clip it and then check data is valid - - Args: - datatype (Datatype): - The raster datatype to load - """ - if ( - self.filenames[datatype] is None - or self.data_states[datatype] == Datastate.UNNAMED - ): - print(f"Datatype {datatype.name} is not set and so cannot be loaded\n") - elif self.dirtyflags[datatype] is True: - if self.data_states[datatype] == Datastate.UNLOADED: - # Load data from file - self.data[datatype] = self.__retrieve_tif(self.filenames[datatype]) - self.data_states[datatype] = Datastate.LOADED - if self.data_states[datatype] == Datastate.LOADED: - # Reproject raster to required CRS - try: - self.data[datatype] = gdal.Warp( - "", - self.data[datatype], - srcSRS=self.data[datatype].GetProjection(), - dstSRS=self.working_projection, - format="VRT", - outputType=gdal.GDT_Float32, - ) - except Exception: - print(f"Warp failed for {datatype.name}\n") - self.data_states[datatype] = Datastate.REPROJECTED - if self.data_states[datatype] == Datastate.REPROJECTED: - # Clip raster image to bounding polygon - bounds = [ - self.bounding_box["minx"], - self.bounding_box["maxy"], - self.bounding_box["maxx"], - self.bounding_box["miny"], - ] - self.data[datatype] = gdal.Translate( - "", - self.data[datatype], - format="VRT", - outputType=gdal.GDT_Float32, - outputSRS=self.working_projection, - projWin=bounds, - projWinSRS=self.working_projection, - ) - self.data_states[datatype] = Datastate.COMPLETE - self.dirtyflags[datatype] = False - - @beartype.beartype - def check_map(self, datatype: Datatype): - """ - Check the validity of a map data from file - - Args: - datatype (Datatype): - The datatype to check - """ - func = None - if datatype == Datatype.GEOLOGY: - func = self.parse_geology_map - elif datatype == Datatype.STRUCTURE: - func = self.parse_structure_map - elif datatype == Datatype.FAULT: - func = self.parse_fault_map - elif datatype == Datatype.FOLD: - func = self.parse_fold_map - - if func: - error, message = func() - if error: - print(message) - - @beartype.beartype - def parse_structure_map(self) -> tuple: - """ - Parse the structure shapefile data into a consistent format - - Returns: - tuple: A tuple of (bool: success/fail, str: failure message) - """ - # Check type and size of loaded structure map - if ( - self.raw_data[Datatype.STRUCTURE] is None - or type(self.raw_data[Datatype.STRUCTURE]) is not geopandas.GeoDataFrame - ): - return (True, "Structure map is not loaded or valid") - - if len(self.raw_data[Datatype.STRUCTURE]) < 2: - print( - "Stucture map does not enough orientations to complete calculations (need at least 2), projection may be inconsistent" - ) - - # Create new geodataframe - structure = geopandas.GeoDataFrame( - self.raw_data[Datatype.STRUCTURE]["geometry"] - ) - config = self.config.structure_config - - # Parse dip direction and dip columns - if config["dipdir_column"] in self.raw_data[Datatype.STRUCTURE]: - if config["orientation_type"] == "strike": - structure["DIPDIR"] = self.raw_data[Datatype.STRUCTURE].apply( - lambda row: (row[config["dipdir_column"]] + 90.0) % 360.0, axis=1 - ) - else: - structure["DIPDIR"] = self.raw_data[Datatype.STRUCTURE][ - config["dipdir_column"] - ] - else: - print( - f"Structure map does not contain dipdir_column '{config['dipdir_column']}'" - ) - - if config["dip_column"] in self.raw_data[Datatype.STRUCTURE]: - structure["DIP"] = self.raw_data[Datatype.STRUCTURE][config["dip_column"]] - else: - print(f"Structure map does not contain dip_column '{config['dip_column']}'") - - # Add bedding and overturned booleans - if config["overturned_column"] in self.raw_data[Datatype.STRUCTURE]: - structure["OVERTURNED"] = ( - self.raw_data[Datatype.STRUCTURE][config["overturned_column"]] - .astype(str) - .str.contains(config["overturned_text"]) - ) - else: - structure["OVERTURNED"] = False - - if config["description_column"] in self.raw_data[Datatype.STRUCTURE]: - structure["BEDDING"] = ( - self.raw_data[Datatype.STRUCTURE][config["description_column"]] - .astype(str) - .str.contains(config["bedding_text"]) - ) - else: - structure["BEDDING"] = False - - # Add object id - if config["objectid_column"] in self.raw_data[Datatype.STRUCTURE]: - structure["ID"] = self.raw_data[Datatype.STRUCTURE][ - config["objectid_column"] - ] - else: - structure["ID"] = numpy.arange(len(structure)) - - self.data[Datatype.STRUCTURE] = structure - return (False, "") - - @beartype.beartype - def parse_geology_map(self) -> tuple: - """ - Parse the geology shapefile data into a consistent format - - Returns: - tuple: A tuple of (bool: success/fail, str: failure message) - """ - # Check type of loaded geology map - if ( - self.raw_data[Datatype.GEOLOGY] is None - or type(self.raw_data[Datatype.GEOLOGY]) is not geopandas.GeoDataFrame - ): - return (True, "Geology map is not loaded or valid") - - # Create new geodataframe - geology = geopandas.GeoDataFrame(self.raw_data[Datatype.GEOLOGY]["geometry"]) - config = self.config.geology_config - - # Parse unit names and codes - if config["unitname_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["UNITNAME"] = self.raw_data[Datatype.GEOLOGY][ - config["unitname_column"] - ].astype(str) - else: - msg = f"Geology map does not contain unitname_column {config['unitname_column']}" - print(msg) - return (True, msg) - if config["alt_unitname_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["CODE"] = self.raw_data[Datatype.GEOLOGY][ - config["alt_unitname_column"] - ].astype(str) - else: - msg = f"Geology map does not contain alt_unitname_column {config['alt_unitname_column']}" - print(msg) - return (True, msg) - - # Parse group and supergroup columns - if config["group_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["GROUP"] = self.raw_data[Datatype.GEOLOGY][ - config["group_column"] - ].astype(str) - else: - geology["GROUP"] = "" - if config["supergroup_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["SUPERGROUP"] = self.raw_data[Datatype.GEOLOGY][ - config["supergroup_column"] - ].astype(str) - else: - geology["SUPERGROUP"] = "" - - # Parse description and rocktype columns for sill and intrusive flags - if config["description_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["SILL"] = ( - self.raw_data[Datatype.GEOLOGY][config["description_column"]] - .astype(str) - .str.contains(config["sill_text"]) - ) - geology["DESCRIPTION"] = self.raw_data[Datatype.GEOLOGY][ - config["description_column"] - ].astype(str) - else: - geology["SILL"] = False - geology["DESCRIPTION"] = "" - - if config["rocktype_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["INTRUSIVE"] = ( - self.raw_data[Datatype.GEOLOGY][config["rocktype_column"]] - .astype(str) - .str.contains(config["intrusive_text"]) - ) - geology["ROCKTYPE1"] = self.raw_data[Datatype.GEOLOGY][ - config["rocktype_column"] - ].astype(str) - else: - geology["INTRUSIVE"] = False - geology["ROCKTYPE1"] = "" - - if config["alt_rocktype_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["ROCKTYPE2"] = self.raw_data[Datatype.GEOLOGY][ - config["alt_rocktype_column"] - ].astype(str) - else: - geology["ROCKTYPE2"] = "" - - # TODO: Explode intrusion multipart geology - - # Parse age columns - if config["minage_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["MIN_AGE"] = self.raw_data[Datatype.GEOLOGY][ - config["minage_column"] - ].astype(numpy.float64) - else: - geology["MIN_AGE"] = 0.0 - if config["maxage_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["MAX_AGE"] = self.raw_data[Datatype.GEOLOGY][ - config["maxage_column"] - ].astype(numpy.float64) - else: - geology["MAX_AGE"] = 100000.0 - - # Add object id - if config["objectid_column"] in self.raw_data[Datatype.GEOLOGY]: - geology["ID"] = self.raw_data[Datatype.GEOLOGY][config["objectid_column"]] - else: - geology["ID"] = numpy.arange(len(geology)) - - # TODO: Check for duplicates in "ID" - # TODO: Check that the exploded geology has more than 1 unit - # Do we need to explode the geometry at this stage for geology/faults/folds??? - # If not subsequent classes will need to be able to deal with them - # TODO: Check for Nans or blanks in "UNITNAME", "GROUP", "SUPERGROUP", "DESCRIPTION", "CODE", "ROCKTYPE" - # Strip out whitespace (/n /t) and '-', ',', '?' from "UNITNAME", "CODE" "GROUP" "SUPERGROUP" - geology["UNITNAME"] = geology["UNITNAME"].str.replace("[ -/?]", "_", regex=True) - geology["CODE"] = geology["CODE"].str.replace("[ -/?]", "_", regex=True) - geology["GROUP"] = geology["GROUP"].str.replace("[ -/?]", "_", regex=True) - geology["SUPERGROUP"] = geology["SUPERGROUP"].str.replace( - "[ -/?]", "_", regex=True - ) - - # Mask out ignored unit_names/codes (ie. for cover) - for code in self.config.geology_config["ignore_codes"]: - geology = geology[~geology["CODE"].astype(str).str.contains(code)] - geology = geology[~geology["UNITNAME"].astype(str).str.contains(code)] - - geology = geology.dissolve(by="UNITNAME", as_index=False) - - # Note: alt_rocktype_column and volcanic_text columns not used - self.data[Datatype.GEOLOGY] = geology - return (False, "") - - @beartype.beartype - def parse_fault_map(self) -> tuple: - """ - Parse the fault shapefile data into a consistent format - - Returns: - tuple: A tuple of (bool: success/fail, str: failure message) - """ - # Check type of loaded fault map - if ( - self.raw_data[Datatype.FAULT] is None - or type(self.raw_data[Datatype.FAULT]) is not geopandas.GeoDataFrame - ): - return (True, "Fault map is not loaded or valid") - - # Create new geodataframe - faults = geopandas.GeoDataFrame(self.raw_data[Datatype.FAULT]["geometry"]) - config = self.config.fault_config - - if config["structtype_column"] in self.raw_data[Datatype.FAULT]: - faults["FEATURE"] = self.raw_data[Datatype.FAULT][ - config["structtype_column"] - ] - faults = faults[ - faults["FEATURE"].astype(str).str.contains(config["fault_text"]) - ] - if self.verbose_level > VerboseLevel.NONE: - if ( - len(faults) < len(self.raw_data[Datatype.GEOLOGY]) - and len(faults) == 0 - ): - msg = f"Fault map reduced to 0 faults as structtype_column ({config['structtype_column']}) does not contains as row with fault_text \"{config['fault_text']}\"" - print(msg) - - if config["name_column"] in self.raw_data[Datatype.FAULT]: - faults["NAME"] = self.raw_data[Datatype.FAULT][ - config["name_column"] - ].astype(str) - else: - faults["NAME"] = "Fault_" + faults.index.astype(str) - - if config["dip_column"] in self.raw_data[Datatype.FAULT]: - faults["DIP"] = self.raw_data[Datatype.FAULT][config["dip_column"]].astype( - numpy.float64 - ) - else: - faults["DIP"] = numpy.nan # config["dip_null_value"] - - # Replace dip 0 with nan as dip 0 means unknown - faults["DIP"] = [numpy.nan if dip == 0 else dip for dip in faults["DIP"]] - - # Parse the dip direction for the fault - if config["dipdir_flag"] != "alpha": - if config["dipdir_column"] in self.raw_data[Datatype.FAULT]: - faults["DIPDIR"] = self.raw_data[Datatype.FAULT][ - config["dipdir_column"] - ].astype(numpy.float64) - else: - faults["DIPDIR"] = numpy.nan - else: - # Take the geoDataSeries of the dipdir estimates (assume it's a string description) - if config["dip_estimate_column"] in self.raw_data[Datatype.FAULT]: - dipdir_text_estimates = self.raw_data[Datatype.FAULT][ - config["dip_estimate_column"] - ].astype(str) - elif config["dipdir_column"] in self.raw_data[Datatype.FAULT]: - dipdir_text_estimates = self.raw_data[Datatype.FAULT][ - config["dipdir_column"] - ].astype(str) - else: - dipdir_text_estimates = None - faults["DIPDIR"] = numpy.nan - - # Map dipdir_estimates in text form to cardinal direction - if dipdir_text_estimates: - direction_map = { - "north_east": 45.0, - "south_east": 135.0, - "south_west": 225.0, - "north_west": 315.0, - "north": 0.0, - "east": 90.0, - "south": 180.0, - "west": 270.0, - } - for direction in direction_map: - dipdir_text_estimates = dipdir_text_estimates.astype( - str - ).str.replace( - f".*{direction}.*", direction_map[direction], regex=True - ) - # Catch all for any field that still contains anything that isn't a number - dipdir_text_estimates = dipdir_text_estimates.astype(str).str.replace( - ".*[^0-9.].*", numpy.nan, regex=True - ) - faults["DIPDIR"] = dipdir_text_estimates.astype(numpy.float64) - - # Add object id - if config["objectid_column"] in self.raw_data[Datatype.FAULT]: - faults["ID"] = self.raw_data[Datatype.FAULT][ - config["objectid_column"] - ].astype(int) - else: - faults["ID"] = faults.index - - if len(faults): - faults["NAME"] = faults.apply( - lambda fault: "Fault_" + str(fault["ID"]) - if fault["NAME"].lower() == "nan" - else fault["NAME"], - axis=1, - ) - faults["NAME"] = faults.apply( - lambda fault: "Fault_" + str(fault["ID"]) - if fault["NAME"].lower() == "none" - else fault["NAME"], - axis=1, - ) - faults["NAME"] = faults["NAME"].str.replace(" -/?", "_", regex=True) - - self.data[Datatype.FAULT] = faults - return (False, "") - - @beartype.beartype - def parse_fold_map(self) -> tuple: - """ - Parse the fold shapefile data into a consistent format - - Returns: - tuple: A tuple of (bool: success/fail, str: failure message) - """ - # Check type of loaded fold map - if ( - self.raw_data[Datatype.FOLD] is None - or type(self.raw_data[Datatype.FOLD]) is not geopandas.GeoDataFrame - ): - return (True, "Fold map is not loaded or valid") - - # Create new geodataframe - folds = geopandas.GeoDataFrame(self.raw_data[Datatype.FOLD]["geometry"]) - config = self.config.fold_config - - if config["structtype_column"] in self.raw_data[Datatype.FOLD]: - folds[config["structtype_column"]] = self.raw_data[Datatype.FOLD][ - config["structtype_column"] - ] - folds = folds[ - folds[config["structtype_column"]] - .astype(str) - .str.contains(config["fold_text"]) - ] - if self.verbose_level > VerboseLevel.NONE: - if ( - len(folds) < len(self.raw_data[Datatype.GEOLOGY]) - and len(folds) == 0 - ): - msg = f"Fold map reduced to 0 folds as structtype_column ({config['structtype_column']}) does not contains any row with fold_text \"{config['fold_text']}\"" - print(msg) - - if config["foldname_column"] in self.raw_data[Datatype.FOLD]: - folds["NAME"] = self.raw_data[Datatype.FOLD][ - config["foldname_column"] - ].astype(str) - else: - folds["NAME"] = numpy.arange(len(folds)) - folds["NAME"] = "Fold_" + folds["NAME"].astype(str) - - # Extract syncline from description field - if config["description_column"] in self.raw_data[Datatype.FOLD]: - folds["SYNCLINE"] = ( - self.raw_data[Datatype.FOLD][config["description_column"]] - .astype(str) - .str.contains(config["synform_text"]) - ) - else: - folds["SYNCLINE"] = False - - # Add object id - if config["objectid_column"] in self.raw_data[Datatype.FOLD]: - folds["ID"] = self.raw_data[Datatype.FOLD][config["objectid_column"]] - else: - folds["ID"] = numpy.arange(len(folds)) - - self.data[Datatype.FOLD] = folds - return (False, "") - - @beartype.beartype - def set_working_projection_on_map_data(self, datatype: Datatype): - """ - Set the working projection on the GeoDataFrame structure or if already set reproject the data - - Args: - datatype (Datatype): - The datatype to set or reproject - """ - if self.working_projection is None: - print("No working projection set leaving map data in original projection") - elif type(self.raw_data[datatype]) is geopandas.GeoDataFrame: - if self.data_states[datatype] >= Datastate.LOADED: - if self.raw_data[datatype].crs is None: - print( - f"No projection on original map data, assigning to working_projection {self.working_projection}" - ) - self.raw_data[datatype].crs = self.working_projection - else: - self.raw_data[datatype].to_crs( - crs=self.working_projection, inplace=True - ) - else: - print( - f"Type of {datatype.name} map not a GeoDataFrame so cannot change map crs projection" - ) - - @beartype.beartype - def save_all_map_data(self, output_dir: str, extension: str = ".csv"): - """ - Save all the map data to file - - Args: - output_dir (str): - The directory to save to - extension (str, optional): - The extension to use for the data. Defaults to ".csv". - """ - for i in [ - Datatype.GEOLOGY, - Datatype.STRUCTURE, - Datatype.FAULT, - Datatype.FOLD, - ]: - self.save_raw_map_data(output_dir, i, extension) - - @beartype.beartype - def save_raw_map_data( - self, output_dir: str, datatype: Datatype, extension: str = ".shp.zip" - ): - """ - Save the map data from datatype to file - - Args: - output_dir (str): - The directory to save to - datatype (Datatype): - The datatype of the geopanda to save - extension (str, optional): - The extension to use for the data. Defaults to ".csv". - """ - try: - filename = os.path.join(output_dir, datatype.name + extension) - if extension == ".csv": - # TODO: Add geopandas to pandas converter and then write csv - # self.raw_data[datatype].write_csv(filename) - print("GeoDataFrame to CSV conversion not implimented") - else: - self.raw_data[datatype].to_file(filename) - except Exception: - print(f"Failed to save {datatype.name} to file named {filename}\n") - - @beartype.beartype - def get_raw_map_data(self, datatype: Datatype): - """ - Get the raw data geopanda of the specified datatype - - Args: - datatype (Datatype): - The datatype to retrieve - - Returns: - geopandas.GeoDataFrame: The raw data - """ - if ( - self.data_states[datatype] != Datastate.COMPLETE - or self.dirtyflags[datatype] is True - ): - self.load_map_data(datatype) - return self.raw_data[datatype] - - @beartype.beartype - def get_map_data(self, datatype: Datatype): - """ - Get the data geopanda of the specified datatype - - Args: - datatype (Datatype): - The datatype to retrieve - - Returns: - geopandas.GeoDataFrame: The dataframe - """ - if ( - self.data_states[datatype] != Datastate.COMPLETE - or self.dirtyflags[datatype] is True - ): - self.load_map_data(datatype) - return self.data[datatype] - - @beartype.beartype - def update_filename_with_bounding_box(self, filename: str): - """ - Update the filename/url with the bounding box - This replace all instances of {BBOX_STR} with the bounding_box_str - - Args: - filename (str): - The original filename - - Raises: - ValueError: Filename is blank - - Returns: - str: The modified filename - """ - if filename is None: - raise ValueError(f"Filename {filename} is invalid") - return filename.replace("{BBOX_STR}", self.bounding_box_str) - - @beartype.beartype - def update_filename_with_projection(self, filename: str): - """ - Update the filename/url with the projection - This replace all instances of {PROJ_STR} with the projection string - - Args: - filename (str): - The original filename - - Raises: - ValueError: Filename is blank - - Returns: - str: The modified filename - """ - if filename is None: - raise ValueError(f"Filename {filename} is invalid") - return filename.replace("{PROJ_STR}", self.working_projection) - - def calculate_bounding_box_and_projection(self): - """ - Calculate the bounding box and projection from the geology file - - Returns: - dict, str: The bounding box and projection of the geology shapefile - """ - if self.filenames[Datatype.GEOLOGY] is None: - print( - "Could not open geology file as none set, no bounding box or projection available" - ) - return None, None - temp_geology_filename = self.filenames[Datatype.GEOLOGY] - temp_geology_filename = temp_geology_filename.replace("{BBOX_STR}", "") - temp_geology_filename = temp_geology_filename.replace("{PROJ_STR}", "") - try: - geology_data = geopandas.read_file(temp_geology_filename) - bounds = geology_data.total_bounds - return { - "minx": bounds[0], - "maxx": bounds[1], - "miny": bounds[2], - "maxy": bounds[3], - }, geology_data.crs - except Exception: - print( - f"Could not open geology file {temp_geology_filename} so no bounding box or projection found" - ) - return None, None - - @beartype.beartype - def export_wkt_format_files(self): - """ - Save out the geology and fault GeoDataFrames in WKT format - This is used by map2model - """ - # TODO: - Move away from tab seperators entirely (topology and map2model) - - self.__check_and_create_tmp_path() - if not os.path.isdir(os.path.join(self.tmp_path, "map2model_data")): - os.mkdir(os.path.join(self.tmp_path, "map2model_data")) - - # Check geology data status and export to a WKT format file - self.load_map_data(Datatype.GEOLOGY) - if type(self.data[Datatype.GEOLOGY]) is not geopandas.GeoDataFrame: - print("Cannot export geology data as it is not a GeoDataFrame") - elif self.data_states[Datatype.GEOLOGY] != Datastate.COMPLETE: - print( - f"Cannot export geology data as it only loaded to {self.data_states[Datatype.GEOLOGY].name} status" - ) - else: - columns = [ - "geometry", - "ID", - "UNITNAME", - "GROUP", - "MIN_AGE", - "MAX_AGE", - "CODE", - "ROCKTYPE1", - "ROCKTYPE2", - "DESCRIPTION", - ] - geology = self.get_map_data(Datatype.GEOLOGY)[columns].copy() - geology.reset_index(inplace=True, drop=True) - geology.rename( - columns={"geometry": "WKT", "CODE": "UNITNAME", "UNITNAME": "CODE"}, - inplace=True, - ) - geology["MIN_AGE"] = geology["MIN_AGE"].replace("None", 0) - geology["MAX_AGE"] = geology["MAX_AGE"].replace("None", 4500000000) - geology["GROUP"] = geology["GROUP"].replace("None", "") - geology["ROCKTYPE1"] = geology["ROCKTYPE1"].replace("", "None") - geology["ROCKTYPE2"] = geology["ROCKTYPE2"].replace("", "None") - geology.to_csv( - os.path.join(self.tmp_path, "map2model_data", "geology_wkt.csv"), - sep="\t", - index=False, - ) - - # Check faults data status and export to a WKT format file - self.load_map_data(Datatype.FAULT) - if type(self.data[Datatype.FAULT]) is not geopandas.GeoDataFrame: - print("Cannot export fault data as it is not a GeoDataFrame") - elif self.data_states[Datatype.FAULT] != Datastate.COMPLETE: - print( - f"Cannot export fault data as it only loaded to {self.data_states[Datatype.FAULT].name} status" - ) - else: - faults = self.get_map_data(Datatype.FAULT).copy() - faults.rename(columns={"geometry": "WKT"}, inplace=True) - faults.to_csv( - os.path.join(self.tmp_path, "map2model_data", "faults_wkt.csv"), - sep="\t", - index=False, - ) - - @beartype.beartype - def get_value_from_raster(self, datatype: Datatype, x, y): - """ - Get the value from a raster map at the specified point - - Args: - datatype (Datatype): - The datatype of the raster map to retrieve from - x (float or int): - The easting coordinate of the value - y (float or int): - The northing coordinate of the value - - Returns: - float or int: The value at the point specified - """ - data = self.get_map_data(datatype) - if data is None: - print(f"Cannot get value from {datatype.name} data as data is not loaded") - return None - inv_geotransform = gdal.InvGeoTransform(data.GetGeoTransform()) - - px = int( - inv_geotransform[0] + inv_geotransform[1] * x + inv_geotransform[2] * y - ) - py = int( - inv_geotransform[3] + inv_geotransform[4] * x + inv_geotransform[5] * y - ) - # Clamp values to the edges of raster if past boundary, similiar to GL_CLIP - px = max(px, 0) - px = min(px, data.RasterXSize - 1) - py = max(py, 0) - py = min(py, data.RasterYSize - 1) - val = data.ReadAsArray(px, py, 1, 1)[0][0] - return val - - @beartype.beartype - def __value_from_raster(self, inv_geotransform, data, x: float, y: float): - """ - Get the value from a raster dataset at the specified point - - Args: - inv_geotransform (gdal.GeoTransform): - The inverse of the data's geotransform - data (numpy.array): - The raster data - x (float): - The easting coordinate of the value - y (float): - The northing coordinate of the value - - Returns: - float or int: The value at the point specified - """ - px = int( - inv_geotransform[0] + inv_geotransform[1] * x + inv_geotransform[2] * y - ) - py = int( - inv_geotransform[3] + inv_geotransform[4] * x + inv_geotransform[5] * y - ) - # Clamp values to the edges of raster if past boundary, similiar to GL_CLIP - px = max(px, 0) - px = min(px, data.shape[0] - 1) - py = max(py, 0) - py = min(py, data.shape[1] - 1) - return data[px][py] - - @beartype.beartype - def get_value_from_raster_df(self, datatype: Datatype, df: pandas.DataFrame): - """ - Add a 'Z' column to a dataframe with the heights from the 'X' and 'Y' coordinates - - Args: - datatype (Datatype): - The datatype of the raster map to retrieve from - df (pandas.DataFrame): - The original dataframe with 'X' and 'Y' columns - - Returns: - pandas.DataFrame: The modified dataframe - """ - if len(df) <= 0: - df["Z"] = [] - return df - data = self.get_map_data(datatype) - if data is None: - print("Cannot get value from data as data is not loaded") - return None - - inv_geotransform = gdal.InvGeoTransform(data.GetGeoTransform()) - data_array = numpy.array(data.GetRasterBand(1).ReadAsArray().T) - - df["Z"] = df.apply( - lambda row: self.__value_from_raster( - inv_geotransform, data_array, row["X"], row["Y"] - ), - axis=1, - ) - return df - - @beartype.beartype - def extract_all_contacts(self, save_contacts=True): - """ - Extract the contacts between units in the geology GeoDataFrame - """ - geology = self.get_map_data(Datatype.GEOLOGY).copy() - geology = geology.dissolve(by="UNITNAME", as_index=False) - # Remove intrusions - geology = geology[geology["INTRUSIVE"]==False] - geology = geology[geology["SILL"]==False] - # Remove faults from contact geomety - if self.get_map_data(Datatype.FAULT) is not None: - faults = self.get_map_data(Datatype.FAULT).copy() - faults["geometry"] = faults.buffer(50) - geology = geopandas.overlay( - geology, faults, how="difference", keep_geom_type=False - ) - units = geology["UNITNAME"].unique() - column_names = ["UNITNAME_1", "UNITNAME_2", "geometry"] - contacts = geopandas.GeoDataFrame( - crs=geology.crs, columns=column_names, data=None - ) - while len(units) > 1: - unit1 = units[0] - units = units[1:] - for unit2 in units: - if unit1 != unit2: - join = geopandas.overlay( - geology[geology["UNITNAME"] == unit1], - geology[geology["UNITNAME"] == unit2], - keep_geom_type=False, - )[column_names] - join["geometry"] = join.buffer(1) - buffered = geology[geology["UNITNAME"] == unit2][ - ["geometry"] - ].copy() - buffered["geometry"] = buffered.boundary - end = geopandas.overlay(buffered, join, keep_geom_type=False) - if len(end): - contacts = pandas.concat([contacts, end], ignore_index=True) - # contacts["TYPE"] = "UNKNOWN" - contacts["length"] = [row.length for row in contacts["geometry"]] - if save_contacts: - self.contacts = contacts - return contacts - - @beartype.beartype - def extract_basal_contacts(self, stratigraphic_column: list, save_contacts=True): - """ - Identify the basal unit of the contacts based on the stratigraphic column - - Args: - stratigraphic_column (list): - The stratigraphic column to use - """ - units = stratigraphic_column - basal_contacts = self.contacts.copy() - basal_contacts["ID"] = basal_contacts.apply( - lambda row: min( - units.index(row["UNITNAME_1"]), units.index(row["UNITNAME_2"]) - ), - axis=1, - ) - basal_contacts["basal_unit"] = basal_contacts.apply( - lambda row: units[row["ID"]], axis=1 - ) - basal_contacts["distance"] = basal_contacts.apply( - lambda row: abs( - units.index(row["UNITNAME_1"]) - units.index(row["UNITNAME_2"]) - ), - axis=1, - ) - basal_contacts["type"] = basal_contacts.apply( - lambda row: "ABNORMAL" if abs(row["distance"]) > 1 else "BASAL", axis=1 - ) - basal_contacts = basal_contacts[["ID", "basal_unit", "type", "geometry"]] - if save_contacts: - self.basal_contacts = basal_contacts - return basal_contacts - - @beartype.beartype - def colour_units(self, stratigraphic_units: pandas.DataFrame, random: bool = False): - """ - Add a colour column to the units in the stratigraphic units structure - - Args: - stratigraphic_units (pandas.DataFrame): - The stratigraphic units data - random (bool, optional): - Flag of whether to add random colours to missing entries in the colour lookup table. Defaults to False. - - Returns: - pandas.DataFrame: The modified units - """ - - colour_lookup = pandas.DataFrame(columns=["UNITNAME", "colour"]) - try: - colour_lookup = pandas.read_csv(self.colour_filename, sep=",") - except FileNotFoundError: - print(f"Colour Lookup file {self.colour_filename} not found") - - colour_lookup["colour"] = colour_lookup["colour"].str.upper() - if "UNITNAME" in colour_lookup.columns and "colour" in colour_lookup.columns: - stratigraphic_units = stratigraphic_units.merge( - colour_lookup, - left_on="name", - right_on="UNITNAME", - suffixes=("_old", ""), - how="left", - ) - stratigraphic_units.loc[ - stratigraphic_units["colour"].isna(), ["colour"] - ] = random_colours_hex(stratigraphic_units["colour"].isna().sum()) - stratigraphic_units.drop(columns=["UNITNAME", "colour_old"], inplace=True) - else: - print( - f"Colour Lookup file {self.colour_filename} does not contain 'UNITNAME' or 'colour' field" - ) - return stratigraphic_units diff --git a/map2loop/project.py b/map2loop/project.py deleted file mode 100644 index 0a24dd32..00000000 --- a/map2loop/project.py +++ /dev/null @@ -1,683 +0,0 @@ -import beartype -from .m2l_enums import VerboseLevel, ErrorState, Datatype -from .mapdata import MapData -from .sampler import Sampler, SamplerDecimator, SamplerSpacing -from .thickness_calculator import ThicknessCalculator, ThicknessCalculatorAlpha -from .throw_calculator import ThrowCalculator, ThrowCalculatorAlpha -from .sorter import Sorter, SorterAgeBased, SorterAlpha, SorterUseNetworkX, SorterUseHint, SorterMaximiseContacts, SorterObservationProjections -from .stratigraphic_column import StratigraphicColumn -from .deformation_history import DeformationHistory -from .map2model_wrapper import Map2ModelWrapper -import LoopProjectFile as LPF - -import numpy -import pandas -import geopandas -import os -import re -from matplotlib.colors import to_rgba -from osgeo import gdal - -# TODO: When geopandas gets updated check that this FutureWarning supression -# is still needed for GeoDataFrame.clip methods -import warnings -warnings.simplefilter(action="ignore", category=FutureWarning) - - -class Project(object): - """ - The main entry point into using map2loop - - Attiributes - ----------- - verbose_level: m2l_enums.VerboseLevel - A selection that defines how much console logging is output - samplers: Sampler - A list of samplers used to extract point samples from polyonal or line segments. Indexed by m2l_enum.Dataype - sorter: Sorter - The sorting algorithm to use for calculating the stratigraphic column - thickness_calculator: ThicknessCalulator - The algorithm to use for making unit thickness estimations - loop_filename: str - The name of the loop project file used in this project - map_data: MapData - The structure that holds all map and dtm data - map2model: Map2ModelWrapper - A wrapper around the map2model module that extracts unit and fault adjacency - stratigraphic_column: StratigraphicColumn - The structure that holds the unit information and ordering - deformation_history: DeformationHistory - The structura that holds the fault and fold information and interactions - """ - - @beartype.beartype - def __init__( - self, - verbose_level: VerboseLevel = VerboseLevel.ALL, - tmp_path: str = "m2l_data_tmp", - working_projection=None, - bounding_box=None, - use_australian_state_data: str = "", - geology_filename: str = "", - structure_filename: str = "", - fault_filename: str = "", - fold_filename: str = "", - dtm_filename: str = "", - config_filename: str = "", - config_dictionary: dict = {}, - clut_filename: str = "", - clut_file_legacy: bool = False, - save_pre_checked_map_data: bool = False, - loop_project_filename: str = "", - **kwargs - ): - """ - The initialiser for the map2loop project - - Args: - verbose_level (VerboseLevel, optional): - How much console output is sent. Defaults to VerboseLevel.ALL. - tmp_path (str, optional): - The directory for storing temporary files. Defaults to "./m2l_data_tmp". - working_projection (str or int, optional): - The EPSG projection to use. Defaults to None. - bounding_box (list or dict, optional): - The boundary extents of the project (m). Defaults to None. - use_australian_state_data (str, optional): - Whether to use default state data and which state to use. Defaults to "". - geology_filename (str, optional): - The filename of the geology shapefile. Defaults to "". - structure_filename (str, optional): - The filename of the structure shapefile. Defaults to "". - fault_filename (str, optional): - The filename of the fault shapefile. Defaults to "". - fold_filename (str, optional): - The filename fo the fold shapefile. Defaults to "". - dtm_filename (str, optional): - The filename of the digital terrain map to use. Defaults to "". - config_filename (str, optional): - The filename of the configuration json file to use (if not using config_dictionary). Defaults to "". - config_dictionary (dict, optional): - A dictionary version of the configuration file. Defaults to {}. - clut_filename (str, optional): - The filename of the colour look up table to use. Defaults to "". - clut_file_legacy (bool, optional): - A flag to indicate if the clut file is in the legacy format. Defaults to False. - save_pre_checked_map_data (bool, optional): - A flag to save all map data to file before use. Defaults to False. - loop_project_filename (str, optional): - The filename of the loop project file. Defaults to "". - - Raises: - TypeError: Type of working_projection not a str or int - ValueError: bounding_box not length 4 or 6 - TypeError: Type of bounding_box not a dict or tuple - ValueError: use_australian_state_data not in state list ['WA', 'SA', 'QLD', 'NSW', 'TAS', 'VIC', 'ACT', 'NT'] - """ - self._error_state = ErrorState.NONE - self._error_state_msg = "" - self.verbose_level = verbose_level - self.samplers = [SamplerDecimator()] * len(Datatype) - self.set_default_samplers() - self.sorter = SorterUseHint() - self.thickness_calculator = ThicknessCalculatorAlpha() - self.throw_calculator = ThrowCalculatorAlpha() - self.loop_filename = loop_project_filename - - self.map_data = MapData(tmp_path=tmp_path, verbose_level=verbose_level) - self.map2model = Map2ModelWrapper(self.map_data) - self.stratigraphic_column = StratigraphicColumn() - self.deformation_history = DeformationHistory() - - # Check for alternate config filenames in kwargs - if "metadata_filename" in kwargs and config_filename == "": - config_filename = kwargs["metadata_filename"] - - # Sanity check on working projection parameter - if type(working_projection) is str or type(working_projection) is int: - self.map_data.set_working_projection(working_projection) - elif type(working_projection) is None: - if verbose_level != VerboseLevel.NONE: - print("No working projection set, will attempt to use the projection of the geology map") - else: - raise TypeError(f"Invalid type for working_projection {type(working_projection)}") - - # Sanity check bounding box - if type(bounding_box) is dict or type(bounding_box) is tuple: - if len(bounding_box) == 4 or len(bounding_box) == 6: - self.map_data.set_bounding_box(bounding_box) - else: - raise ValueError(f"Length of bounding_box {len(bounding_box)} is neither 4 (map boundary) nor 6 (volumetric boundary)") - else: - raise TypeError(f"Invalid type for bounding_box {type(bounding_box)}") - - # Assign filenames - if use_australian_state_data != "": - # Sanity check on state string - if use_australian_state_data in ['WA', 'SA', 'QLD', 'NSW', 'TAS', 'VIC', 'ACT', 'NT']: - self.map_data.set_filenames_from_australian_state(use_australian_state_data) - else: - raise ValueError(f"Australian state {use_australian_state_data} not in state url database") - if geology_filename != "": - self.map_data.set_filename(Datatype.GEOLOGY, geology_filename) - if structure_filename != "": - self.map_data.set_filename(Datatype.STRUCTURE, structure_filename) - if fault_filename != "": - self.map_data.set_filename(Datatype.FAULT, fault_filename) - if fold_filename != "": - self.map_data.set_filename(Datatype.FOLD, fold_filename) - if dtm_filename != "": - self.map_data.set_filename(Datatype.DTM, dtm_filename) - if config_filename != "": - self.map_data.set_config_filename(config_filename, legacy_format=clut_file_legacy) - if config_dictionary != {}: - self.map_data.config.update_from_dictionary(config_dictionary) - if clut_filename != "": - self.map_data.set_colour_filename(clut_filename) - - # Load all data (both shape and raster) - self.map_data.load_all_map_data() - - # If flag to save out data is check do so - if save_pre_checked_map_data: - self.map_data.save_all_map_data(tmp_path) - - # Populate the stratigraphic column and deformation history from map data - self.stratigraphic_column.populate(self.map_data.get_map_data(Datatype.GEOLOGY)) - self.deformation_history.populate(self.map_data.get_map_data(Datatype.FAULT)) - - # Set default minimum fault length to 5% of the longest bounding box dimension - bounding_box = self.map_data.get_bounding_box() - largest_dimension = max(bounding_box["maxx"] - bounding_box["minx"], bounding_box["maxy"] - bounding_box["miny"]) - self.deformation_history.set_minimum_fault_length(largest_dimension * 0.05) - - if len(kwargs): - print(f"These keywords were not used in initialising the Loop project ({kwargs})") - - # Getters and Setters - @beartype.beartype - def set_ignore_codes(self, codes: list): - """ - Set the ignore codes (a list of unit names to ignore in the geology shapefile) - - Args: - codes (list): The list of strings to ignore - """ - self.map_data.set_ignore_codes(codes) - # Re-populate the units in the column with the new set of ignored geographical units - self.stratigraphic_column.populate(self.map_data.get_map_data(Datatype.GEOLOGY)) - - @beartype.beartype - def set_sorter(self, sorter: Sorter): - """ - Set the sorter for determining stratigraphic order of units - - Args: - sorter (Sorter): - The sorter to use. Must be of base class Sorter - """ - self.sorter = sorter - - def get_sorter(self): - """ - Get the name of the sorter being used - - Returns: - str: The name of the sorter used - """ - return self.sorter.sorter_label - - @beartype.beartype - def set_thickness_calculator(self, thickness_calculator: ThicknessCalculator): - """ - Set the thickness calculator that estimates unit thickness of all units - - Args: - thickness_calculator (ThicknessCalculator): - The calculator to use. Must be of base class ThicknessCalculator - """ - self.thickness_calculator = thickness_calculator - - def get_thickness_calculator(self): - """ - Get the name of the thickness calculator being used - - Returns: - str: The name of the thickness calculator used - """ - return self.thickness_calculator.thickness_calculator_label - - @beartype.beartype - def set_throw_calculator(self, throw_calculator: ThrowCalculator): - """ - Set the throw calculator that estimates fault throw values for all faults - - Args: - throw_calculator (ThrowCalculator): - The calculator to use. Must be of base class ThrowCalculator - """ - self.throw_calculator = throw_calculator - - def get_throw_calculator(self): - """ - Get the name of the throw calculator being used - - Returns: - str: The name of the throw calculator used - """ - return self.throw_calculator.throw_calculator_label - - def set_default_samplers(self): - """ - Initialisation function to set or reset the point samplers - """ - self.samplers[Datatype.STRUCTURE] = SamplerDecimator(1) - self.samplers[Datatype.GEOLOGY] = SamplerSpacing(50.0) - self.samplers[Datatype.FAULT] = SamplerSpacing(50.0) - self.samplers[Datatype.FOLD] = SamplerSpacing(50.0) - self.samplers[Datatype.DTM] = SamplerSpacing(50.0) - - @beartype.beartype - def set_sampler(self, datatype: Datatype, sampler: Sampler): - """ - Set the point sampler for a specific datatype - - Args: - datatype (Datatype): - The datatype to use this sampler on - sampler (Sampler): - The sampler to use - """ - self.samplers[datatype] = sampler - - @beartype.beartype - def get_sampler(self, datatype: Datatype): - """ - Get the sampler name being used for a datatype - - Args: - datatype (Datatype): The datatype of the sampler - - Returns: - str: The name of the sampler being used on the specified datatype - """ - return self.samplers[datatype].sampler_label - - @beartype.beartype - def set_minimum_fault_length(self, length: float): - """ - Set the cutoff length for faults to ignore - - Args: - length (float): - The cutoff length - """ - self.deformation_history.set_minimum_fault_length(length) - - @beartype.beartype - def get_minimum_fault_length(self) -> float: - """ - Get the cutoff length for faults - - Returns: - float: The cutoff length - """ - return self.deformation_history.get_minimum_fault_length() - - # Processing functions - def sample_map_data(self): - """ - Use the samplers to extract points along polylines or unit boundaries - """ - self.geology_samples = self.samplers[Datatype.GEOLOGY].sample(self.map_data.get_map_data(Datatype.GEOLOGY)) - self.structure_samples = self.samplers[Datatype.STRUCTURE].sample(self.map_data.get_map_data(Datatype.STRUCTURE)) - self.fault_samples = self.samplers[Datatype.FAULT].sample(self.map_data.get_map_data(Datatype.FAULT)) - self.fold_samples = self.samplers[Datatype.FOLD].sample(self.map_data.get_map_data(Datatype.FOLD)) - - def extract_geology_contacts(self): - """ - Use the stratigraphic column, and fault and geology data to extract points along contacts - """ - # Use stratigraphic column to determine basal contacts - self.map_data.extract_basal_contacts(self.stratigraphic_column.column) - self.sampled_contacts = self.samplers[Datatype.GEOLOGY].sample(self.map_data.basal_contacts) - self.map_data.get_value_from_raster_df(Datatype.DTM, self.sampled_contacts) - - def calculate_stratigraphic_order(self, take_best=False): - """ - Use unit relationships, unit ages and the sorter to create a stratigraphic column - """ - if take_best: - sorters = [SorterUseHint(), SorterAgeBased(), SorterAlpha(), SorterUseNetworkX()] - columns = [sorter.sort(self.stratigraphic_column.stratigraphicUnits, - self.map2model.get_unit_unit_relationships(), - self.map2model.get_sorted_units(), - self.map_data.contacts, - self.map_data, - ) for sorter in sorters] - basal_contacts = [self.map_data.extract_basal_contacts(column, save_contacts=False) for column in columns] - basal_lengths = [sum(list(contacts[contacts["type"] == "BASAL"]["geometry"].length)) for contacts in basal_contacts] - max_length = -1 - column = columns[0] - best_sorter = sorters[0] - for i in range(len(sorters)): - if basal_lengths[i] > max_length: - max_length = basal_lengths[i] - column = columns[i] - best_sorter = sorters[i] - print(f"Best sorter {best_sorter.sorter_label} calculated contact length of {max_length}") - self.stratigraphic_column.column = column - else: - self.stratigraphic_column.column = \ - self.sorter.sort(self.stratigraphic_column.stratigraphicUnits, - self.map2model.get_unit_unit_relationships(), - self.map2model.get_sorted_units(), - self.map_data.contacts, - self.map_data - ) - - def calculate_unit_thicknesses(self): - """ - Use the stratigraphic column, and fault and contact data to estimate unit thicknesses - """ - self.stratigraphic_column.stratigraphicUnits = \ - self.thickness_calculator.compute(self.stratigraphic_column.stratigraphicUnits, - self.stratigraphic_column.column, - self.map_data.basal_contacts, - self.map_data) - - def apply_colour_to_units(self): - """ - Apply the clut file to the units in the stratigraphic column - """ - self.stratigraphic_column.stratigraphicUnits = \ - self.map_data.colour_units(self.stratigraphic_column.stratigraphicUnits) - - def sort_stratigraphic_column(self): - """ - Sort the units in the stratigraphic column data structure to match the column order - """ - self.stratigraphic_column.sort_from_relationship_list(self.stratigraphic_column.column) - - def summarise_fault_data(self): - """ - Use the fault shapefile to make a summary of each fault by name - """ - self.map_data.get_value_from_raster_df(Datatype.DTM, self.fault_samples) - self.fault_samples = self.fault_samples.merge(self.map_data.get_map_data(Datatype.FAULT)[["ID", "DIPDIR", "DIP"]], on="ID", how="left") - self.fault_samples["DIPDIR"] = self.fault_samples["DIPDIR"].replace(numpy.nan, 0) - self.fault_samples["DIP"] = self.fault_samples["DIP"].replace(numpy.nan, 90) - self.deformation_history.summarise_data(self.fault_samples) - self.deformation_history.faults = \ - self.throw_calculator.compute(self.deformation_history.faults, - self.stratigraphic_column.column, - self.map_data.basal_contacts, - self.map_data) - - def run_all(self, user_defined_stratigraphic_column=None, take_best=False): - """ - Runs the full map2loop process - - Args: - user_defined_stratigraphic_column (None or list, optional): - A user fed list that overrides the stratigraphic column sorter. Defaults to None. - """ - # Calculate contacts before stratigraphic column - self.map_data.extract_all_contacts() - - # Calculate the stratigraphic column - if type(user_defined_stratigraphic_column) is list: - self.stratigraphic_column.column = user_defined_stratigraphic_column - else: - if user_defined_stratigraphic_column is not None: - print("user_defined_stratigraphic_column is not of type list. Attempting to calculate column") - self.calculate_stratigraphic_order(take_best) - self.sort_stratigraphic_column() - - # Calculate basal contacts based on stratigraphic column - self.extract_geology_contacts() - self.calculate_unit_thicknesses() - self.sample_map_data() - self.summarise_fault_data() - self.apply_colour_to_units() - self.save_into_projectfile() - - def save_into_projectfile(self): - """ - Creates or updates a loop project file with all the data extracted from the map2loop process - """ - # Open project file - if self.loop_filename is None or self.loop_filename == "": - self.loop_filename = os.path.join(self.map_data.tmp_path, os.path.basename(self.map_data.tmp_path) + ".loop3d") - - # Check overwrite of mismatch version - file_exists = os.path.isfile(self.loop_filename) - version_mismatch = False - existing_extents = None - if file_exists: - file_version = LPF.Get(self.loop_filename, "version", verbose=False) - if file_version["errorFlag"] is True: - print(f"Error: {file_version['errorString']}") - print(f" Cannot export loop project file as current file of name {self.loop_filename} is not a loop project file") - return - else: - version_mismatch = file_version["value"] != LPF.LoopVersion() - if version_mismatch: - print(f"Mismatched loop project file versions {LPF.LoopVersion()} and {file_version}, old version will be replaced") - resp = LPF.Get(self.loop_filename, "extents") - if not resp["errorFlag"]: - existing_extents = resp["value"] - - if not file_exists or (version_mismatch): - LPF.CreateBasic(self.loop_filename) - - # Save extents - if existing_extents is None: - LPF.Set(self.loop_filename, "extents", geodesic=[-180, -179, 0, 1], - utm=[1, - 1, - self.map_data.bounding_box["minx"], - self.map_data.bounding_box["maxx"], - self.map_data.bounding_box["miny"], - self.map_data.bounding_box["maxy"] - ], - depth=[ - self.map_data.bounding_box["top"], - self.map_data.bounding_box["base"] - ], - spacing=[1000, 1000, 500], - preference="utm" - ) - else: - # TODO: Check loopfile extents match project extents before continuing - # if mismatch on extents warn the user and create new file - LPF.Set(self.loop_filename, "extents", **existing_extents) - - # Save unit information - stratigraphic_data = numpy.zeros(len(self.stratigraphic_column.stratigraphicUnits), LPF.stratigraphicLayerType) - stratigraphic_data["layerId"] = self.stratigraphic_column.stratigraphicUnits["layerId"] - stratigraphic_data["minAge"] = self.stratigraphic_column.stratigraphicUnits["minAge"] - stratigraphic_data["maxAge"] = self.stratigraphic_column.stratigraphicUnits["maxAge"] - stratigraphic_data["name"] = self.stratigraphic_column.stratigraphicUnits["name"] - stratigraphic_data["group"] = self.stratigraphic_column.stratigraphicUnits["group"] - stratigraphic_data["enabled"] = 1 - stratigraphic_data["rank"] = 0 - stratigraphic_data["thickness"] = self.stratigraphic_column.stratigraphicUnits["thickness"] - - stratigraphic_data["colour1Red"] = [int(a[1:3], 16) for a in self.stratigraphic_column.stratigraphicUnits["colour"]] - stratigraphic_data["colour1Green"] = [int(a[3:5], 16) for a in self.stratigraphic_column.stratigraphicUnits["colour"]] - stratigraphic_data["colour1Blue"] = [int(a[5:7], 16) for a in self.stratigraphic_column.stratigraphicUnits["colour"]] - stratigraphic_data["colour2Red"] = [int(a * 0.95) for a in stratigraphic_data["colour1Red"]] - stratigraphic_data["colour2Green"] = [int(a * 0.95) for a in stratigraphic_data["colour1Green"]] - stratigraphic_data["colour2Blue"] = [int(a * 0.95) for a in stratigraphic_data["colour1Blue"]] - LPF.Set(self.loop_filename, "stratigraphicLog", data=stratigraphic_data, verbose=True) - - # Save contacts - contacts_data = numpy.zeros(len(self.sampled_contacts), LPF.contactObservationType) - contacts_data["layerId"] = self.sampled_contacts["ID"] - contacts_data["easting"] = self.sampled_contacts["X"] - contacts_data["northing"] = self.sampled_contacts["Y"] - contacts_data["altitude"] = self.sampled_contacts["Z"] - LPF.Set(self.loop_filename, "contacts", data=contacts_data, verbose=True) - - # Save fault information - faults_obs_data = numpy.zeros(len(self.fault_samples), LPF.faultObservationType) - faults_obs_data["eventId"] = self.fault_samples["ID"] - faults_obs_data["easting"] = self.fault_samples["X"] - faults_obs_data["northing"] = self.fault_samples["Y"] - faults_obs_data["altitude"] = self.fault_samples["Z"] - faults_obs_data["type"] = 0 - faults_obs_data["dipDir"] = self.fault_samples["DIPDIR"] - faults_obs_data["dip"] = self.fault_samples["DIP"] - faults_obs_data["dipPolarity"] = 0 # self.fault_samples["DIPPOLARITY"] - # faults_obs_data["val"] = self.fault_samples["???"] - faults_obs_data["displacement"] = 100 # self.fault_samples["DISPLACEMENT"] - - # TODO: Find a better way to assign posOnly for fault observations - from itertools import cycle, islice - faults_obs_data["posOnly"] = list(islice(cycle([0, 1]), len(faults_obs_data))) - LPF.Set(self.loop_filename, "faultObservations", data=faults_obs_data, verbose=True) - - faults = self.deformation_history.get_faults_for_export() - faults_data = numpy.zeros(len(faults), LPF.faultEventType) - faults_data["eventId"] = faults["eventId"] - faults_data["name"] = faults["name"] - faults_data["minAge"] = faults["minAge"] - faults_data["maxAge"] = faults["maxAge"] - faults_data["group"] = faults["group"] - faults_data["supergroup"] = faults["supergroup"] - faults_data["avgDisplacement"] = faults["avgDisplacement"] - faults_data["avgDownthrowDir"] = faults["avgDownthrowDir"] - faults_data["influenceDistance"] = faults["influenceDistance"] - faults_data["verticalRadius"] = faults["verticalRadius"] - faults_data["horizontalRadius"] = faults["horizontalRadius"] - faults_data["colour"] = faults["colour"] - faults_data["centreEasting"] = faults["centreX"] - faults_data["centreNorthing"] = faults["centreY"] - faults_data["centreAltitude"] = faults["centreZ"] - faults_data["avgSlipDirEasting"] = faults["avgSlipDirX"] - faults_data["avgSlipDirNorthing"] = faults["avgSlipDirY"] - faults_data["avgSlipDirAltitude"] = faults["avgSlipDirZ"] - faults_data["avgNormalEasting"] = faults["avgNormalX"] - faults_data["avgNormalNorthing"] = faults["avgNormalY"] - faults_data["avgNormalAltitude"] = faults["avgNormalZ"] - LPF.Set(self.loop_filename, "faultLog", data=faults_data, verbose=True) - - # Save structural information - observations = numpy.zeros(len(self.structure_samples), LPF.stratigraphicObservationType) - observations["layer"] = "s0" - observations["layerId"] = self.structure_samples["ID"] - observations["easting"] = self.structure_samples["X"] - observations["northing"] = self.structure_samples["Y"] - # observations["altitude"] = self.structure_samples["Z"] - observations["dipDir"] = self.structure_samples["DIPDIR"] - observations["dip"] = self.structure_samples["DIP"] - observations["dipPolarity"] = self.structure_samples["OVERTURNED"] - LPF.Set(self.loop_filename, "stratigraphicObservations", data=observations, verbose=True) - - if self.map2model.fault_fault_relationships is not None: - ff_relationships = self.deformation_history.get_fault_relationships_with_ids(self.map2model.fault_fault_relationships) - relationships = numpy.zeros(len(ff_relationships), LPF.eventRelationshipType) - relationships["eventId1"] = ff_relationships["eventId1"] - relationships["eventId2"] = ff_relationships["eventId2"] - relationships["bidirectional"] = True - relationships["angle"] = ff_relationships["Angle"] - relationships["type"] = LPF.EventRelationshipType.FAULT_FAULT_ABUT - LPF.Set(self.loop_filename, "eventRelationships", data=relationships, verbose=True) - - @beartype.beartype - def draw_geology_map(self, points: pandas.DataFrame = None, overlay: str = ""): - """ - Plots the geology map with optional points or specific data - - Args: - points (pandas.DataFrame, optional): - A dataframe to overlay on the geology map (must contains "X" and "Y" columns). Defaults to None. - overlay (str, optional): - Layer of points to overlay (options are "contacts", "basal_contacts", "orientations", "faults"). Defaults to "". - """ - colour_lookup = self.stratigraphic_column.stratigraphicUnits[["name", "colour"]].set_index("name").to_dict()["colour"] - geol = self.map_data.get_map_data(Datatype.GEOLOGY).copy() - geol['colour'] = geol.apply(lambda row: colour_lookup[row.UNITNAME], axis=1) - geol['colour_rgba'] = geol.apply(lambda row: to_rgba(row['colour'], 1.0), axis=1) - if points is None and overlay == "": - geol.plot(color=geol['colour_rgba']) - return - else: - base = geol.plot(color=geol['colour_rgba']) - if overlay != "": - if overlay == "basal_contacts": - self.map_data.basal_contacts[self.map_data.basal_contacts["type"] == "BASAL"].plot(ax=base) - return - elif overlay == "contacts": - points = self.sampled_contacts - elif overlay == "orientations": - points = self.structure_samples - elif overlay == "faults": - points = self.fault_samples - else: - print(f"Invalid overlay option {overlay}") - return - gdf = geopandas.GeoDataFrame(points, geometry=geopandas.points_from_xy(points["X"], points["Y"], crs=geol.crs)) - gdf.plot(ax=base, marker="o", color="red", markersize=5) - - @beartype.beartype - def save_mapdata_to_files(self, save_path: str = ".", extension: str = ".shp.zip"): - """ - Saves the map data frames to csv files - - Args: - save_path (str, optional): - The path to save the file to. Defaults to ".". - extension (str, optional): - An alternate extension to save the GeoDataFrame in. Defaults to ".csv". - """ - if not os.path.exists(save_path): - os.mkdir(save_path) - self.map_data.save_all_map_data(save_path, extension) - - @beartype.beartype - def save_geotiff_raster(self, filename: str = "test.tif", projection: str = "", pixel_size: int = 25): - """ - Saves the geology map to a geotiff - - Args: - filename (str, optional): - The filename of the geotiff file to save to. Defaults to "test.tif". - projection (str, optional): - A string of the format "EPSG:3857" that is the projection to output. Defaults to the project working projection - pixel_size (int, optional): - The size of a pixel in metres for the geotiff. Defaults to 25 - """ - colour_lookup = self.stratigraphic_column.stratigraphicUnits[["name", "colour"]].set_index("name").to_dict()["colour"] - geol = self.map_data.get_map_data(Datatype.GEOLOGY).copy() - geol['colour'] = geol.apply(lambda row: colour_lookup[row.UNITNAME], axis=1) - geol["colour_red"] = geol.apply(lambda row: int(row['colour'][1:3], 16), axis=1) - geol["colour_green"] = geol.apply(lambda row: int(row['colour'][3:5], 16), axis=1) - geol["colour_blue"] = geol.apply(lambda row: int(row['colour'][5:7], 16), axis=1) - source_ds = gdal.OpenEx(geol.to_json()) - source_layer = source_ds.GetLayer() - x_min, x_max, y_min, y_max = source_layer.GetExtent() - - # Create the destination data source - x_res = int((x_max - x_min) / pixel_size) - y_res = int((y_max - y_min) / pixel_size) - target_ds = gdal.GetDriverByName('GTiff').Create(filename, x_res, y_res, 4, gdal.GDT_Byte) - target_ds.SetGeoTransform((x_min, pixel_size, 0, y_max, 0, -pixel_size)) - band = target_ds.GetRasterBand(1) - band.SetNoDataValue(0) - - # Rasterize - gdal.RasterizeLayer(target_ds, [1], source_layer, options=["ATTRIBUTE=colour_red"]) - gdal.RasterizeLayer(target_ds, [2], source_layer, options=["ATTRIBUTE=colour_green"]) - gdal.RasterizeLayer(target_ds, [3], source_layer, options=["ATTRIBUTE=colour_blue"]) - gdal.RasterizeLayer(target_ds, [4], source_layer, burn_values=[255]) - if re.search("^epsg:[0-9]+$", projection.lower()): - print("Projection is :", projection) - target_ds.SetProjection(projection) - else: - print("CRS is:", geol.crs.to_string()) - target_ds.SetProjection(geol.crs.to_string()) - target_ds = None - source_layer = None - source_ds = None diff --git a/map2loop/random_colour.py b/map2loop/random_colour.py deleted file mode 100644 index 9cb89873..00000000 --- a/map2loop/random_colour.py +++ /dev/null @@ -1,23 +0,0 @@ -import numpy as np - - -def random_colours_hex(n): - """ - Generate n random colours in hex format. - """ - rgb = np.random.rand(n, 3) - return rgb_to_hex(rgb) - - -def rgb_to_hex(rgb): - """ - Convert rgb values in the range [0,1] to hex format. - """ - return [rgb_to_hex_single(r, g, b) for r, g, b in rgb] - - -def rgb_to_hex_single(r, g, b): - """ - Convert rgb values in the range [0,1] to hex format. - """ - return "#%02x%02x%02x" % (int(r * 255), int(g * 255), int(b * 255)) diff --git a/map2loop/sampler.py b/map2loop/sampler.py deleted file mode 100644 index 8d9d284f..00000000 --- a/map2loop/sampler.py +++ /dev/null @@ -1,139 +0,0 @@ -from abc import ABC, abstractmethod -import beartype -import geopandas -import pandas -import shapely -import numpy - - -class Sampler(ABC): - """ - Base Class of Sampler used to force structure of Sampler - - Args: - ABC (ABC): Derived from Abstract Base Class - """ - def __init__(self): - """ - Initialiser of for Sampler - """ - self.sampler_label = "SamplerBaseClass" - - def type(self): - """ - Getter for subclass type label - - Returns: - str: Name of subclass - """ - return self.sampler_label - - @beartype.beartype - @abstractmethod - def sample(self, map_data: geopandas.GeoDataFrame) -> pandas.DataFrame: - """ - Execute sampling method (abstract method) - - Args: - map_data (geopandas.GeoDataFrame): data frame to sample - - Returns: - pandas.DataFrame: data frame containing samples - """ - pass - - -class SamplerDecimator(Sampler): - """ - Decimator sampler class which decimates the geo data frame based on the decimation value - ie. decimation = 10 means take every tenth point - Note: This only works on data frames with lists of points with columns "X" and "Y" - """ - @beartype.beartype - def __init__(self, decimation: int = 1): - """ - Initialiser for decimator sampler - - Args: - decimation (int, optional): stride of the points to sample. Defaults to 1. - """ - self.sampler_label = "SamplerDecimator" - decimation = max(decimation, 1) - self.decimation = decimation - - @beartype.beartype - def sample(self, map_data: geopandas.GeoDataFrame) -> pandas.DataFrame: - """ - Execute sample method takes full point data, samples the data and returns the decimated points - - Args: - map_data (geopandas.GeoDataFrame): the data frame to sample - - Returns: - pandas.DataFrame: the sampled data points - """ - data = map_data.copy() - data["X"] = data.geometry.x - data["Y"] = data.geometry.y - data.reset_index(drop=True, inplace=True) - return pandas.DataFrame(data[::self.decimation].drop(columns="geometry")) - - -class SamplerSpacing(Sampler): - """ - Spacing based sampler which decimates the geo data frame based on the distance between points along a line or - in the case of a polygon along the boundary of that polygon - ie. spacing = 500 means take a sample every 500 metres - Note: This only works on data frames that contain MultiPolgon, Polygon, MultiLineString and LineString geometry - """ - @beartype.beartype - def __init__(self, spacing: float = 50.0): - """ - Initialiser for spacing sampler - - Args: - spacing (float, optional): The distance between samples. Defaults to 50.0. - """ - self.sampler_label = "SamplerSpacing" - spacing = max(spacing, 1.0) - self.spacing = spacing - - @beartype.beartype - def sample(self, map_data: geopandas.GeoDataFrame) -> pandas.DataFrame: - """ - Execute sample method takes full point data, samples the data and returns the sampled points - - Args: - map_data (geopandas.GeoDataFrame): the data frame to sample (must contain column ["ID"]) - - Returns: - pandas.DataFrame: the sampled data points - """ - schema = {"ID": str, "X": float, "Y": float} - df = pandas.DataFrame(columns=schema.keys()).astype(schema) - for _, row in map_data.iterrows(): - if type(row.geometry) is shapely.geometry.multipolygon.MultiPolygon: - targets = row.geometry.boundary.geoms - elif type(row.geometry) is shapely.geometry.polygon.Polygon: - targets = [row.geometry.boundary] - elif type(row.geometry) is shapely.geometry.multilinestring.MultiLineString: - targets = row.geometry.geoms - elif type(row.geometry) is shapely.geometry.linestring.LineString: - targets = [row.geometry] - else: - targets = [] - - # For the main cases Polygon and LineString the list 'targets' has one element - for target in targets: - df2 = pandas.DataFrame(columns=schema.keys()).astype(schema) - distances = numpy.arange(0, target.length, self.spacing)[:-1] - points = [target.interpolate(distance) for distance in distances] - df2["X"] = [point.x for point in points] - df2["Y"] = [point.y for point in points] - if "ID" in map_data.columns: - df2["ID"] = row["ID"] - else: - df2["ID"] = 0 - df = pandas.concat([df, df2]) - df.reset_index(drop=True, inplace=True) - return df diff --git a/map2loop/sorter.py b/map2loop/sorter.py deleted file mode 100644 index 3c30da69..00000000 --- a/map2loop/sorter.py +++ /dev/null @@ -1,438 +0,0 @@ -from abc import ABC, abstractmethod -import beartype -import pandas -import math -from .mapdata import MapData - - -class Sorter(ABC): - """ - Base Class of Sorter used to force structure of Sorter - - Args: - ABC (ABC): Derived from Abstract Base Class - """ - def __init__(self): - """ - Initialiser of for Sorter - """ - self.sorter_label = "SorterBaseClass" - - def type(self): - """ - Getter for subclass type label - - Returns: - str: Name of subclass - """ - return self.sorter_label - - @beartype.beartype - @abstractmethod - def sort(self, units: pandas.DataFrame, unit_relationships: pandas.DataFrame, stratigraphic_order_hint: list, contacts: pandas.DataFrame, map_data: MapData) -> list: - """ - Execute sorter method (abstract method) - - Args: - units (pandas.DataFrame): the data frame to sort (columns must contain ["layerId", "name", "minAge", "maxAge", "group"]) - units_relationships (pandas.DataFrame): the relationships between units (columns must contain ["Index1", "Unitname1", "Index2", "Unitname2"]) - stratigraphic_order_hint (list): a list of unit names to be used as a hint to sorting the units - contacts (pandas.DataFrame): unit contacts with length of the contacts in metres - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - list: sorted list of unit names - """ - pass - - -class SorterUseHint(Sorter): - """ - Sorter class which only returns the hint (no algorithm for sorting is done in this class) - """ - def __init__(self): - """ - Initialiser for use hint sorter - """ - self.sorter_label = "SorterUseHint" - - @beartype.beartype - def sort(self, units: pandas.DataFrame, unit_relationships: pandas.DataFrame, stratigraphic_order_hint: list, contacts: pandas.DataFrame, map_data: MapData) -> list: - """ - Execute sorter method takes unit data, relationships and a hint and returns the sorted unit names based on this algorithm. - In this case it purely returns the hint list - - Args: - units (pandas.DataFrame): the data frame to sort - units_relationships (pandas.DataFrame): the relationships between units - stratigraphic_order_hint (list): a list of unit names to use as a hint to sorting the units - contacts (pandas.DataFrame): unit contacts with length of the contacts in metres - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - list: the sorted unit names - """ - return stratigraphic_order_hint - - -class SorterUseNetworkX(Sorter): - """ - Sorter class which returns a sorted list of units based on the unit relationships using a topological graph sorting algorithm - """ - def __init__(self): - """ - Initialiser for networkx graph sorter - """ - self.sorter_label = "SorterUseNetworkX" - - @beartype.beartype - def sort(self, units: pandas.DataFrame, unit_relationships: pandas.DataFrame, stratigraphic_order_hint: list, contacts: pandas.DataFrame, map_data: MapData) -> list: - """ - Execute sorter method takes unit data, relationships and a hint and returns the sorted unit names based on this algorithm. - - Args: - units (pandas.DataFrame): the data frame to sort - units_relationships (pandas.DataFrame): the relationships between units - stratigraphic_order_hint (list): a list of unit names to use as a hint to sorting the units - contacts (pandas.DataFrame): unit contacts with length of the contacts in metres - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - list: the sorted unit names - """ - try: - import networkx as nx - except Exception: - print("Cannot import networkx module, defaulting to SorterUseHint") - return stratigraphic_order_hint - - graph = nx.DiGraph() - for row in units.iterrows(): - graph.add_node(int(row[1]["layerId"]), name=row[1]["name"]) - for row in unit_relationships.iterrows(): - graph.add_edge(row[1]["Index1"], row[1]["Index2"]) - - cycles = list(nx.simple_cycles(graph)) - for i in range(0, len(cycles)): - if graph.has_edge(cycles[i][0], cycles[i][1]): - graph.remove_edge(cycles[i][0], cycles[i][1]) - print(" SorterUseNetworkX Warning: Cycle found and contact edge removed:", units["name"][cycles[i][0]], units["name"][cycles[i][1]]) - - indexes = (list(nx.topological_sort(graph))) - order = [units["name"][i] for i in list(indexes)] - return order - - -class SorterAgeBased(Sorter): - """ - Sorter class which returns a sorted list of units based on the min and max ages of the units - """ - def __init__(self): - """ - Initialiser for age based sorter - """ - self.sorter_label = "SorterAgeBased" - - def sort(self, units: pandas.DataFrame, unit_relationships: pandas.DataFrame, stratigraphic_order_hint: list, contacts: pandas.DataFrame, map_data: MapData) -> list: - """ - Execute sorter method takes unit data, relationships and a hint and returns the sorted unit names based on this algorithm. - - Args: - units (pandas.DataFrame): the data frame to sort - units_relationships (pandas.DataFrame): the relationships between units - stratigraphic_order_hint (list): a list of unit names to use as a hint to sorting the units - contacts (pandas.DataFrame): unit contacts with length of the contacts in metres - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - list: the sorted unit names - """ - sorted_units = units.copy() - if "minAge" in units.columns and "maxAge" in units.columns: - sorted_units["meanAge"] = sorted_units.apply(lambda row: (row["minAge"] + row["maxAge"]) / 2.0, axis=1) - else: - sorted_units["meanAge"] = 0 - if "group" in units.columns: - sorted_units = sorted_units.sort_values(by=["group", "meanAge"]) - else: - sorted_units = sorted_units.sort_values(by=["meanAge"]) - - return list(sorted_units["name"]) - - -class SorterAlpha(Sorter): - """ - Sorter class which returns a sorted list of units based on the adjacency of units - prioritising the units with lower number of contacting units - """ - def __init__(self): - """ - Initialiser for adjacency based sorter - """ - self.sorter_label = "SorterAlpha" - - def sort(self, units: pandas.DataFrame, unit_relationships: pandas.DataFrame, stratigraphic_order_hint: list, contacts: pandas.DataFrame, map_data: MapData) -> list: - """ - Execute sorter method takes unit data, relationships and a hint and returns the sorted unit names based on this algorithm. - - Args: - units (pandas.DataFrame): the data frame to sort - units_relationships (pandas.DataFrame): the relationships between units - stratigraphic_order_hint (list): a list of unit names to use as a hint to sorting the units - contacts (pandas.DataFrame): unit contacts with length of the contacts in metres - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - list: the sorted unit names - """ - try: - import networkx as nx - except Exception: - print("Cannot import networkx module, defaulting to SorterUseHint") - return stratigraphic_order_hint - - contacts = contacts.sort_values(by="length", ascending=False)[["UNITNAME_1", "UNITNAME_2", "length"]] - units = list(units["name"].unique()) - graph = nx.Graph() - for unit in units: - graph.add_node(unit, name=unit) - max_weight = max(list(contacts["length"])) + 1 - for _, row in contacts.iterrows(): - graph.add_edge(row["UNITNAME_1"], row["UNITNAME_2"], weight=int(max_weight - row["length"])) - - cnode = None - new_graph = nx.DiGraph() - while graph.number_of_nodes() > 0: - if cnode is None: - df = pandas.DataFrame(columns=["unit", "num_neighbours"]) - df["unit"] = list(graph.nodes) - df["num_neighbours"] = df.apply(lambda row: len(list(graph.neighbors(row["unit"]))), axis=1) - df.sort_values(by=["num_neighbours"], inplace=True) - df.reset_index(inplace=True, drop=True) - cnode = df["unit"][0] - new_graph.add_node(cnode) - neighbour_edge_count = {} - for neighbour in list(graph.neighbors(cnode)): - neighbour_edge_count[neighbour] = len(list(graph.neighbors(neighbour))) - if len(neighbour_edge_count) == 0: - graph.remove_node(cnode) - cnode = None - else: - node_with_min_edges = min(neighbour_edge_count, key=neighbour_edge_count.get) - if neighbour_edge_count[node_with_min_edges] < 2: - new_graph.add_node(node_with_min_edges) - new_graph.add_edge(cnode, node_with_min_edges) - graph.remove_node(node_with_min_edges) - else: - new_graph.add_node(node_with_min_edges) - new_graph.add_edge(cnode, node_with_min_edges) - graph.remove_node(cnode) - cnode = node_with_min_edges - order = (list(nx.topological_sort(new_graph))) - return order - - -class SorterMaximiseContacts(Sorter): - """ - Sorter class which returns a sorted list of units based on the adjacency of units - prioritising the maximum length of each contact - """ - def __init__(self): - """ - Initialiser for adjacency based sorter - """ - self.sorter_label = "SorterMaximiseContacts" - - def sort(self, units: pandas.DataFrame, unit_relationships: pandas.DataFrame, stratigraphic_order_hint: list, contacts: pandas.DataFrame, map_data: MapData) -> list: - """ - Execute sorter method takes unit data, relationships and a hint and returns the sorted unit names based on this algorithm. - - Args: - units (pandas.DataFrame): the data frame to sort - units_relationships (pandas.DataFrame): the relationships between units - stratigraphic_order_hint (list): a list of unit names to use as a hint to sorting the units - contacts (pandas.DataFrame): unit contacts with length of the contacts in metres - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - list: the sorted unit names - """ - try: - import networkx as nx - import networkx.algorithms.approximation as nx_app - except Exception: - print("Cannot import networkx module, defaulting to SorterUseHint") - return stratigraphic_order_hint - - sorted_contacts = contacts.sort_values(by="length", ascending=False) - graph = nx.Graph() - units = list(units["name"].unique()) - for unit in units: - graph.add_node(unit, name=unit) - - max_weight = max(list(sorted_contacts["length"])) + 1 - for _, row in sorted_contacts.iterrows(): - graph.add_edge(row["UNITNAME_1"], row["UNITNAME_2"], weight=int(max_weight - row["length"])) - - route = nx_app.traveling_salesman_problem(graph) - edge_list = list(nx.utils.pairwise(route)) - dg = nx.DiGraph() - dg.add_node(edge_list[0][0]) - for edge in edge_list: - if edge[1] not in dg.nodes(): - dg.add_node(edge[1]) - dg.add_edge(edge[0], edge[1]) - return list(nx.dfs_preorder_nodes(dg, source=list(dg.nodes())[0])) - - -class SorterObservationProjections(Sorter): - """ - Sorter class which returns a sorted list of units based on the adjacency of units - using the direction of observations to predict which unit is adjacent to the current one - """ - def __init__(self): - """ - Initialiser for adjacency based sorter - """ - self.sorter_label = "SorterObservationProjections" - - def sort(self, units: pandas.DataFrame, unit_relationships: pandas.DataFrame, stratigraphic_order_hint: list, contacts: pandas.DataFrame, map_data: MapData) -> list: - """ - Execute sorter method takes unit data, relationships and a hint and returns the sorted unit names based on this algorithm. - - Args: - units (pandas.DataFrame): the data frame to sort - units_relationships (pandas.DataFrame): the relationships between units - stratigraphic_order_hint (list): a list of unit names to use as a hint to sorting the units - contacts (pandas.DataFrame): unit contacts with length of the contacts in metres - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - list: the sorted unit names - """ - try: - import networkx as nx - import networkx.algorithms.approximation as nx_app - from shapely.geometry import LineString, Point - from map2loop.m2l_enums import Datatype - except Exception: - print("Cannot import networkx module, defaulting to SorterUseHint") - return stratigraphic_order_hint - - geol = map_data.get_map_data(Datatype.GEOLOGY).copy() - geol = geol[geol["INTRUSIVE"]==False] - geol = geol[geol["SILL"]==False] - orientations = map_data.get_map_data(Datatype.STRUCTURE).copy() - - verbose = False - - # Create a map of maps to store younger/older observations - ordered_unit_observations = [] - for _, row in orientations.iterrows(): - # get containing unit - containing_unit = geol[geol.contains(row.geometry)] - if len(containing_unit) > 1: - if verbose: - print(f"Orientation {row.ID} is within multiple units") - print(f"Check geology map around coordinates {row.geometry}") - if len(containing_unit) < 1: - if verbose: - print(f"Orientation {row.ID} is not in a unit") - print(f"Check geology map around coordinates {row.geometry}") - else: - starting_unit_name = containing_unit.iloc[0]['UNITNAME'] - - # Get units that a projected line passes through - # TODO: question (how far should the projection go) - # 1km, 10km ??? - length = 10000 - dipDirRadians = row.DIPDIR * math.pi / 180.0 - dipRadians = row.DIP * math.pi / 180.0 - start = row.geometry - end = Point(start.x + math.cos(dipDirRadians) * length, - start.y + math.sin(dipDirRadians) * length) - line = LineString([start, end]) - - inter = geol[line.intersects(geol.geometry)] - if len(inter) > 1: - intersect = line.intersection(inter.geometry.boundary) - - # Remove containing unit - intersect.drop(containing_unit.index, inplace=True) - - # sort by distance from start point - sub = geol.iloc[list(intersect.index)].copy() - sub["distance"] = geol.distance(start) - sub.sort_values(by="distance", inplace=True) - - # Get first unit it hits and the point of intersection - closest_unit_name = sub.iloc[0].UNITNAME - - # Get intersection point - if intersect[sub.index[0]].geom_type == "MultiPoint": - intersect_point = intersect[sub.index[0]].geoms[0] - elif intersect[sub.index[0]].geom_type == "Point": - intersect_point = intersect[sub.index[0]] - else: - continue - - # Get heights for intersection point and start of ray - height = map_data.get_value_from_raster(Datatype.DTM, intersect_point.x, intersect_point.y) - intersect_point = Point(intersect_point.x, intersect_point.y, height) - height = map_data.get_value_from_raster(Datatype.DTM, start.x, start.y) - start = Point(start.x, start.y, height) - - # Check vertical difference between points and compare to projected dip angle - horizontal_dist = (intersect_point.x - intersect_point.x, end.y - start.y) - horizontal_dist = math.sqrt(horizontal_dist[0]**2 + horizontal_dist[1]**2) - projected_height = start.z + horizontal_dist * math.cos(dipRadians) - - if intersect_point.z < projected_height: - ordered_unit_observations += [(starting_unit_name, closest_unit_name)] - else: - ordered_unit_observations += [(closest_unit_name, starting_unit_name)] - - # Create a matrix of older versus younger frequency from observations - unit_names = geol.UNITNAME.unique() - df = pandas.DataFrame(0, index=unit_names, columns=unit_names) - for (younger, older) in ordered_unit_observations: - df.loc[younger, older] += 1 - max_value = max(df.max()) - - # Using the older/younger matrix create a directed graph - g = nx.DiGraph() - remaining_units = unit_names - for unit1 in unit_names: - g.add_node(unit1) - for unit1 in unit_names: - remaining_units = remaining_units[1:] - for unit2 in remaining_units: - if unit1 != unit2: - weight = df.loc[unit1, unit2] - df.loc[unit2, unit1] - if weight < 0: - g.add_edge(unit1, unit2, weight=max_value+weight) - elif weight > 0: - g.add_edge(unit2, unit1, weight=max_value-weight) - - # Link in unlinked units from contacts with max weight - g_undirected = g.to_undirected() - for unit in unit_names: - if len(list(g_undirected.neighbors(unit))) < 1: - mask1 = contacts["UNITNAME_1"] == unit - mask2 = contacts["UNITNAME_2"] == unit - for _, row in contacts[mask1 | mask2].iterrows(): - if unit == row["UNITNAME_1"]: - g.add_edge(row["UNITNAME_2"], unit, weight=max_value * 10) - else: - g.add_edge(row["UNITNAME_1"], unit, weight=max_value * 10) - - # Run travelling salesman using the observation evidence as weighting - route = nx_app.traveling_salesman_problem(g.to_undirected()) - edge_list = list(nx.utils.pairwise(route)) - dd = nx.DiGraph() - dd.add_node(edge_list[0][0]) - for edge in edge_list: - if edge[1] not in dd.nodes(): - dd.add_node(edge[1]) - dd.add_edge(edge[0], edge[1]) - return list(nx.dfs_preorder_nodes(dd, source=list(dd.nodes())[0])) diff --git a/map2loop/stratigraphic_column.py b/map2loop/stratigraphic_column.py deleted file mode 100644 index 7c1733a7..00000000 --- a/map2loop/stratigraphic_column.py +++ /dev/null @@ -1,195 +0,0 @@ -import pandas -import numpy -import geopandas - - -class StratigraphicColumn: - """ - A class containing all the fault and fold summaries and relationships - - Attributes - ---------- - column: list - List of stratigraphic units in time order - groups: list - List of stratigraphic groups in time order - stratigraphicUnitColumns: numpy.dtype - Column names and types for stratigraphic unit summary - stratigraphicUnits: pandas.DataFrame - The stratigraphic units - lithologyUnitColumns: numpy.dtype - Column names and types for lithology layer summary - lithologyUnits: pandas.DataFrame - The lithology units - - """ - def __init__(self): - """ - The initialiser for the stratigraphic units. All attributes are defaulted - """ - self.column = [] - self.groups = [] - - # Create empty dataframes for units - self.stratigraphicUnitColumns = numpy.dtype( - [ - ("layerId", int), - ("name", str), - ("minAge", float), - ("maxAge", float), - ("group", str), - ("supergroup", str), - ("thickness", float), - ("colour", str), - # ("indexInGroup", int), - # ("groupNum", int), - # ("numInGroup", int), - ] - ) - self.stratigraphicUnits = pandas.DataFrame( - numpy.empty(0, dtype=self.stratigraphicUnitColumns) - ) - self.stratigraphicUnits = self.stratigraphicUnits.set_index("name") - - self.lithologyUnitColumns = numpy.dtype( - [ - ("layerId", int), - ("name", str), - ("minAge", float), - ("maxAge", float), - ("group", str), - ("thickness", float), - ("colour", str), - ] - ) - self.lithologyUnits = pandas.DataFrame(numpy.empty(0, dtype=self.lithologyUnitColumns)) - self.lithologyUnits = self.lithologyUnits.set_index("name") - - def findStratigraphicUnit(self, id): - """ - Find the unit in the units list based on its layerId or name - - Args: - id (int or str): - The layerId or name to look for - - Returns: - pandas.DataFrame: The sliced data frame containing the requested unit - """ - if type(id) is int: - return self.stratigraphicUnits[self.stratigraphicUnits["layerId"] == id] - elif type(id) is str: - return self.stratigraphicUnits[self.stratigraphicUnits["name"] == id] - else: - print("ERROR: Unknown identifier type used to find stratigraphic unit") - - def findLithologyUnit(self, id): - """ - Find the lithology unit in the units list based on its layerId or name - - Args: - id (int or str): - The layerId or name to look for - - Returns: - pandas.DataFrame: The sliced data frame containing the requested unit - """ - if type(id) is int: - return self.lithologyUnits[self.lithologyUnits["layerId"] == id] - elif type(id) is str: - return self.lithologyUnits[self.lithologyUnits["name"] == id] - else: - print("ERROR: Unknown identifier type used to find lithology unit") - - def addStratigraphicUnit(self, unit): - """ - Add stratigraphic unit to the units list - - Args: - fault (pandas.DataFrame or dict): - The unit information to add - """ - if type(unit) is pandas.DataFrame or type(unit) is dict: - if "name" in unit.keys(): - if unit["name"] in self.stratigraphicUnits.index: - print("Replacing stratigraphic unit", unit["name"]) - self.stratigraphicUnits.loc[unit["name"]] = unit - else: - print("No name field in stratigraphic unit", unit) - else: - print("Cannot add unit to dataframe with type", type(unit)) - - def addLithologyUnit(self, unit): - """ - Add lithology unit to the units list - - Args: - fault (pandas.DataFrame or dict): - The unit information to add - """ - if type(unit) is pandas.DataFrame or type(unit) is dict: - if "name" in unit.keys(): - if unit["name"] in self.lithologyUnits.index: - print("Replacing lithology unit", unit["name"]) - self.lithologyUnits.loc[unit["name"]] = unit - else: - print("No name field in lithology unit", unit) - else: - print("Cannot add unit to dataframe with type", type(unit)) - - def populate(self, geology_map_data: geopandas.GeoDataFrame): - """ - Parse the geodataframe data into the stratigraphic units list - - Args: - geology_map_data (geopandas.GeoDataFrame): - The geodataframe with the unit data - """ - if geology_map_data.shape[0] == 0: - return - geology_data = geology_map_data.copy() - geology_data = geology_data.drop_duplicates(subset=["UNITNAME"]) - geology_data = geology_data.reset_index(drop=True) - # geology_data = geology_data.dropna(subset=["UNITNAME"]) - - self.stratigraphicUnits = pandas.DataFrame( - numpy.empty(geology_data.shape[0], dtype=self.stratigraphicUnitColumns) - ) - self.stratigraphicUnits["layerId"] = numpy.arange(geology_data.shape[0]) - self.stratigraphicUnits["name"] = geology_data["UNITNAME"] - self.stratigraphicUnits["minAge"] = geology_data["MIN_AGE"] - self.stratigraphicUnits["maxAge"] = geology_data["MAX_AGE"] - self.stratigraphicUnits["group"] = geology_data["GROUP"] - self.stratigraphicUnits["supergroup"] = geology_data["SUPERGROUP"] - self.stratigraphicUnits["thickness"] = -1 - self.stratigraphicUnits["colour"] = "#000000" - # self.stratigraphicUnits["indexInGroup"] = -1 - - self.groups = list(self.stratigraphicUnits['group'].unique()) - - def set_stratigraphic_unit_parameter_by_name( - self, name: str, parameter: str, value - ): - """ - Set a specific parameter on a specific stratigraphic unit - - Args: - name (str): The name of the stratigraphic unit - parameter (str): The colmn name of the parameters - value (str or int or float): The value to set - """ - self.stratigraphicUnits.iloc[self.stratigraphicUnits["name"] == name][ - parameter - ] = value - - def sort_from_relationship_list(self, relationshipList: list): - """ - Sort the stratigraphic column based on the list of name - - Args: - relationshipList (list): - The order of the units by name - """ - sorter = dict(zip(relationshipList, range(len(relationshipList)))) - self.stratigraphicUnits["Order"] = self.stratigraphicUnits["name"].map(sorter) - self.stratigraphicUnits.sort_values(["Order"], inplace=True) diff --git a/map2loop/thickness_calculator.py b/map2loop/thickness_calculator.py deleted file mode 100644 index b9a75ec1..00000000 --- a/map2loop/thickness_calculator.py +++ /dev/null @@ -1,150 +0,0 @@ -from abc import ABC, abstractmethod -import beartype -import pandas -import geopandas -from statistics import mean -from .mapdata import MapData - - -class ThicknessCalculator(ABC): - """ - Base Class of Thickness Calculator used to force structure of ThicknessCalculator - - Args: - ABC (ABC): Derived from Abstract Base Class - """ - - def __init__(self): - """ - Initialiser of for ThicknessCalculator - """ - self.thickness_calculator_label = "ThicknessCalculatorBaseClass" - - def type(self): - """ - Getter for subclass type label - - Returns: - str: Name of subclass - """ - return self.thickness_calculator_label - - @beartype.beartype - @abstractmethod - def compute( - self, - units: pandas.DataFrame, - stratigraphic_order: list, - basal_contacts: geopandas.GeoDataFrame, - map_data: MapData, - ) -> pandas.DataFrame: - """ - Execute thickness calculator method (abstract method) - - Args: - units (pandas.DataFrame): the data frame of units to add thicknesses to - stratigraphic_order (list): a list of unit names sorted from youngest to oldest - basal_contacts (geopandas.GeoDataFrame): basal contact geo data with locations and unit names of the contacts (columns must contain ["ID","basal_unit","type","geometry"]) - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - pandas.DataFrame: units dataframe with added thickness column for calculated thickness values - """ - pass - - -class ThicknessCalculatorAlpha(ThicknessCalculator): - """ - ThicknessCalculator class which estimates unit thickness based on units, basal_contacts and stratigraphic order - """ - - def __init__(self): - """ - Initialiser for alpha version of the thickness calculator - """ - self.thickness_calculator_label = "ThicknessCalculatorAlpha" - - @beartype.beartype - def compute( - self, - units: pandas.DataFrame, - stratigraphic_order: list, - basal_contacts: pandas.DataFrame, - map_data: MapData, - ) -> pandas.DataFrame: - """ - Execute thickness calculator method takes unit data, basal_contacts and stratigraphic order and attempts to estimate unit thickness. - Note: Thicknesses of the top and bottom units are not possible with this data and so are assigned the average of all other calculated unit thicknesses. - - Args: - units (pandas.DataFrame): the data frame of units to add thicknesses to - stratigraphic_order (list): a list of unit names sorted from youngest to oldest - basal_contacts (geopandas.GeoDataFrame): basal contact geo data with locations and unit names of the contacts (columns must contain ["ID","basal_unit","type","geometry"]) - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - pandas.DataFrame: units dataframe with added thickness column for calculated thickness values - """ - # TODO: If we have orientation data near basal contact points we can estimate - # the actual distance between contacts rather than just using the horizontal distance - no_distance = -1 - basal_contacts = basal_contacts[basal_contacts["type"] == "BASAL"] - thicknesses = units.copy() - # Set default value - thicknesses["thickness"] = no_distance - basal_unit_list = basal_contacts["basal_unit"].to_list() - if len(stratigraphic_order) < 3: - print( - f"Cannot make any thickness calculations with only {len(stratigraphic_order)} units" - ) - return thicknesses - for i in range(1, len(stratigraphic_order) - 1): - # Compare basal contacts of adjacent units - if ( - stratigraphic_order[i] in basal_unit_list - and stratigraphic_order[i + 1] in basal_unit_list - ): - contact1 = basal_contacts[ - basal_contacts["basal_unit"] == stratigraphic_order[i] - ]["geometry"].to_list()[0] - contact2 = basal_contacts[ - basal_contacts["basal_unit"] == stratigraphic_order[i + 1] - ]["geometry"].to_list()[0] - if contact1 is not None and contact2 is not None: - distance = contact1.distance(contact2) - else: - print( - f"Cannot calculate thickness between {stratigraphic_order[i]} and {stratigraphic_order[i+1]}" - ) - distance = no_distance - else: - print( - f"Cannot calculate thickness between {stratigraphic_order[i]} and {stratigraphic_order[i+1]}" - ) - - distance = no_distance - - # Maximum thickness is the horizontal distance between the minimum of these distances - # Find row in unit_dataframe corresponding to unit and replace thickness value if it is -1 or larger than distance - idx = thicknesses.index[ - thicknesses["name"] == stratigraphic_order[i] - ].tolist()[0] - if thicknesses.loc[idx, "thickness"] == -1: - val = distance - else: - val = min(distance, thicknesses.at[idx, "thickness"]) - thicknesses.loc[idx, "thickness"] = val - - # If no thickness calculations can be made with current stratigraphic column set all units - # to a uniform thickness value - if len(thicknesses[thicknesses["thickness"] > 0]) < 1: - thicknesses["thickness"] = 100.0 - mean_thickness = mean(thicknesses[thicknesses["thickness"] > 0]["thickness"]) - - # For any unit thickness that still hasn't been calculated (i.e. at -1) set to - # the mean thickness of the other units - thicknesses["thickness"] = thicknesses.apply( - lambda row: mean_thickness if row["thickness"] == -1 else row["thickness"], - axis=1, - ) - return thicknesses diff --git a/map2loop/throw_calculator.py b/map2loop/throw_calculator.py deleted file mode 100644 index 5dd00856..00000000 --- a/map2loop/throw_calculator.py +++ /dev/null @@ -1,83 +0,0 @@ -from abc import ABC, abstractmethod -import beartype -import pandas -import geopandas -from .mapdata import MapData - - -class ThrowCalculator(ABC): - """ - Base Class of Throw Calculator used to force structure of ThrowCalculator - - Args: - ABC (ABC): Derived from Abstract Base Class - """ - def __init__(self): - """ - Initialiser of for Sorter - """ - self.throw_calculator_label = "ThrowCalculatorBaseClass" - - def type(self): - """ - Getter for subclass type label - - Returns: - str: Name of subclass - """ - return self.throw_calculator_label - - @beartype.beartype - @abstractmethod - def compute(self, faults: pandas.DataFrame, stratigraphic_order: list, basal_contacts: geopandas.GeoDataFrame, map_data: MapData) -> pandas.DataFrame: - """ - Execute throw calculator method (abstract method) - - Args: - faults (pandas.DataFrame): the data frame of the faults to add throw values to - stratigraphic_order (list): a list of unit names sorted from youngest to oldest - basal_contacts (geopandas.GeoDataFrame): basal contact geo data with locations and unit names of the contacts (columns must contain ["ID","basal_unit","type","geometry"]) - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - pandas.DataFrame: fault data frame with throw values (avgDisplacement and avgDownthrowDir) filled in - """ - pass - - -class ThrowCalculatorAlpha(ThrowCalculator): - """ - ThrowCalculator class which estimates fault throw values based on units, basal_contacts and stratigraphic order - """ - def __init__(self): - """ - Initialiser for alpha version of the throw calculator - """ - self.throw_calculator_label = "ThrowCalculatorAlpha" - - @beartype.beartype - def compute(self, faults: pandas.DataFrame, stratigraphic_order: list, basal_contacts: pandas.DataFrame, map_data: MapData) -> pandas.DataFrame: - """ - Execute throw calculator method takes fault data, basal_contacts and stratigraphic order and attempts to estimate fault throw. - - Args: - faults (pandas.DataFrame): the data frame of the faults to add throw values to - stratigraphic_order (list): a list of unit names sorted from youngest to oldest - basal_contacts (geopandas.GeoDataFrame): basal contact geo data with locations and unit names of the contacts (columns must contain ["ID","basal_unit","type","geometry"]) - map_data (map2loop.MapData): a catchall so that access to all map data is available - - Returns: - pandas.DataFrame: fault data frame with throw values (avgDisplacement and avgDownthrowDir) filled in - """ - # For each fault take the geometric join of all contact lines and that fault line - - # For each contact join take the length of that join as an approximation of the minimum throw of the fault - - # Take all the min throw approximations and set the largest one as the avgDisplacement - - # If a fault has no contact lines the maximum throw should be less than the thickness of the containing - # unit (if we exclude map height changes and fault angle) - - # Set any remaining displacement values to default value - faults["avgDisplacement"] = faults.apply(lambda row: 100 if row["avgDisplacement"] == -1 else row["avgDisplacement"], axis=1) - return faults diff --git a/map2loop/version.py b/map2loop/version.py deleted file mode 100644 index 8e10cb46..00000000 --- a/map2loop/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "3.0.4" diff --git a/docs/requirements.txt b/requirements.txt similarity index 100% rename from docs/requirements.txt rename to requirements.txt diff --git a/setup.py b/setup.py deleted file mode 100644 index 096cd875..00000000 --- a/setup.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import sys -import setuptools - -version = {} -package_root = os.path.abspath(os.path.dirname(__file__)) - -with open(os.path.join(package_root, "map2loop/version.py")) as fp: - exec(fp.read(), version) -version = version["__version__"] - -head, tail = os.path.split(sys.argv[0]) - - -def get_description(): - long_description = "" - readme_file = os.path.join(head, "README.md") - with open(readme_file, "r") as fh: - long_description = fh.read() - return long_description - - -setuptools.setup( - name="map2loop", - version=version, - author="The Loop Organisation", - author_email="roy.thomson@monash.edu", - description="Generate 3D model data from 2D maps.", - long_description=get_description(), - long_description_content_type="text/markdown", - install_requires=[ - "numpy", - # "gdal", - "pandas", - "geopandas", - "shapely", - "tqdm", - "networkx", - "owslib", - "hjson", - "loopprojectfile", - "map2model", - "beartype", - ], - url="https://github.com/Loop3D/map2loop", - packages=setuptools.find_packages(), - classifiers=[ - "Development Status :: 4 - Beta", - "Intended Audience :: Science/Research", - "Natural Language :: English", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: GIS", - ], - python_requires=">=3.8", -) diff --git a/docs/source/API.rst b/source/API.rst similarity index 100% rename from docs/source/API.rst rename to source/API.rst diff --git a/docs/source/_templates/custom-class-template.rst b/source/_templates/custom-class-template.rst similarity index 100% rename from docs/source/_templates/custom-class-template.rst rename to source/_templates/custom-class-template.rst diff --git a/docs/source/_templates/custom-module-template.rst b/source/_templates/custom-module-template.rst similarity index 100% rename from docs/source/_templates/custom-module-template.rst rename to source/_templates/custom-module-template.rst diff --git a/docs/source/_templates/custom.css b/source/_templates/custom.css similarity index 100% rename from docs/source/_templates/custom.css rename to source/_templates/custom.css diff --git a/docs/source/_templates/page.html b/source/_templates/page.html similarity index 100% rename from docs/source/_templates/page.html rename to source/_templates/page.html diff --git a/docs/source/conf.py b/source/conf.py similarity index 100% rename from docs/source/conf.py rename to source/conf.py diff --git a/docs/source/index.rst b/source/index.rst similarity index 100% rename from docs/source/index.rst rename to source/index.rst diff --git a/docs/source/sg_execution_times.rst b/source/sg_execution_times.rst similarity index 100% rename from docs/source/sg_execution_times.rst rename to source/sg_execution_times.rst diff --git a/docs/source/user_guide/explanation.rst b/source/user_guide/explanation.rst similarity index 100% rename from docs/source/user_guide/explanation.rst rename to source/user_guide/explanation.rst diff --git a/docs/source/user_guide/exporting.rst b/source/user_guide/exporting.rst similarity index 100% rename from docs/source/user_guide/exporting.rst rename to source/user_guide/exporting.rst diff --git a/docs/source/user_guide/fault_offset.rst b/source/user_guide/fault_offset.rst similarity index 100% rename from docs/source/user_guide/fault_offset.rst rename to source/user_guide/fault_offset.rst diff --git a/docs/source/user_guide/geomodelling.rst b/source/user_guide/geomodelling.rst similarity index 100% rename from docs/source/user_guide/geomodelling.rst rename to source/user_guide/geomodelling.rst diff --git a/docs/source/user_guide/getting_started.rst b/source/user_guide/getting_started.rst similarity index 100% rename from docs/source/user_guide/getting_started.rst rename to source/user_guide/getting_started.rst diff --git a/docs/source/user_guide/index.rst b/source/user_guide/index.rst similarity index 100% rename from docs/source/user_guide/index.rst rename to source/user_guide/index.rst diff --git a/docs/source/user_guide/installation.rst b/source/user_guide/installation.rst similarity index 100% rename from docs/source/user_guide/installation.rst rename to source/user_guide/installation.rst diff --git a/docs/source/user_guide/stratigraphic_order.rst b/source/user_guide/stratigraphic_order.rst similarity index 100% rename from docs/source/user_guide/stratigraphic_order.rst rename to source/user_guide/stratigraphic_order.rst diff --git a/docs/source/user_guide/stratigraphic_thickness.rst b/source/user_guide/stratigraphic_thickness.rst similarity index 100% rename from docs/source/user_guide/stratigraphic_thickness.rst rename to source/user_guide/stratigraphic_thickness.rst diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/test_import.py b/tests/test_import.py deleted file mode 100644 index 240e38ee..00000000 --- a/tests/test_import.py +++ /dev/null @@ -1,8 +0,0 @@ -import pytest - - -def test_import_map2loop(): - try: - import map2loop - except ImportError: - pytest.fail("Failed to import map2loop module")